summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-10-27 09:03:26 +0100
committerDavid S. Miller <davem@davemloft.net>2009-10-27 09:03:26 +0100
commitcfadf853f6cd9689f79a63ca960c6f9d6665314f (patch)
tree35418e342d9783f0974ea33ef03875aa21d2362a
parentvlan: allow null VLAN ID to be used (diff)
parentsh_eth: Add asm/cacheflush.h (diff)
downloadlinux-cfadf853f6cd9689f79a63ca960c6f9d6665314f.tar.xz
linux-cfadf853f6cd9689f79a63ca960c6f9d6665314f.zip
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/sh_eth.c
-rw-r--r--drivers/isdn/i4l/isdn_net.h6
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/au1000_eth.c1
-rw-r--r--drivers/net/benet/be_cmds.c33
-rw-r--r--drivers/net/benet/be_cmds.h5
-rw-r--r--drivers/net/benet/be_main.c27
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/dm9000.h2
-rw-r--r--drivers/net/e1000e/e1000.h12
-rw-r--r--drivers/net/e1000e/hw.h2
-rw-r--r--drivers/net/e1000e/ich8lan.c150
-rw-r--r--drivers/net/e1000e/phy.c469
-rw-r--r--drivers/net/ethoc.c21
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/fec_mpc52xx.c6
-rw-r--r--drivers/net/fec_mpc52xx_phy.c1
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/fsl_pq_mdio.c1
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/ibm_newemac/core.c2
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/igb/igb_ethtool.c35
-rw-r--r--drivers/net/igbvf/ethtool.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c22
-rw-r--r--drivers/net/ks8851.c42
-rw-r--r--drivers/net/ks8851.h1
-rw-r--r--drivers/net/myri10ge/myri10ge.c17
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c14
-rw-r--r--drivers/net/netxen/netxen_nic_init.c8
-rw-r--r--drivers/net/netxen/netxen_nic_main.c1
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/pppoe.c129
-rw-r--r--drivers/net/r8169.c13
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/stmmac/Kconfig53
-rw-r--r--drivers/net/stmmac/Makefile4
-rw-r--r--drivers/net/stmmac/common.h330
-rw-r--r--drivers/net/stmmac/descs.h163
-rw-r--r--drivers/net/stmmac/gmac.c693
-rw-r--r--drivers/net/stmmac/gmac.h204
-rw-r--r--drivers/net/stmmac/mac100.c517
-rw-r--r--drivers/net/stmmac/mac100.h116
-rw-r--r--drivers/net/stmmac/stmmac.h98
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c395
-rw-r--r--drivers/net/stmmac/stmmac_main.c2204
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c217
-rw-r--r--drivers/net/stmmac/stmmac_timer.c140
-rw-r--r--drivers/net/stmmac/stmmac_timer.h41
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c38
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--include/net/inet_timewait_sock.h8
-rw-r--r--net/bluetooth/hci_sysfs.c4
-rw-r--r--net/bluetooth/l2cap.c9
-rw-r--r--net/core/pktgen.c15
-rw-r--r--net/ipv4/inet_connection_sock.c34
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/tcp.c59
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/unix/af_unix.c2
67 files changed, 6046 insertions, 403 deletions
diff --git a/drivers/isdn/i4l/isdn_net.h b/drivers/isdn/i4l/isdn_net.h
index 74032d0881ef..7511f08effa5 100644
--- a/drivers/isdn/i4l/isdn_net.h
+++ b/drivers/isdn/i4l/isdn_net.h
@@ -83,19 +83,19 @@ static __inline__ isdn_net_local * isdn_net_get_locked_lp(isdn_net_dev *nd)
spin_lock_irqsave(&nd->queue_lock, flags);
lp = nd->queue; /* get lp on top of queue */
- spin_lock(&nd->queue->xmit_lock);
while (isdn_net_lp_busy(nd->queue)) {
- spin_unlock(&nd->queue->xmit_lock);
nd->queue = nd->queue->next;
if (nd->queue == lp) { /* not found -- should never happen */
lp = NULL;
goto errout;
}
- spin_lock(&nd->queue->xmit_lock);
}
lp = nd->queue;
nd->queue = nd->queue->next;
+ spin_unlock_irqrestore(&nd->queue_lock, flags);
+ spin_lock(&lp->xmit_lock);
local_bh_disable();
+ return lp;
errout:
spin_unlock_irqrestore(&nd->queue_lock, flags);
return lp;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 04fb8b0ca3e6..e012c2e0825a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1741,6 +1741,7 @@ config KS8851
config KS8851_MLL
tristate "Micrel KS8851 MLL"
depends on HAS_IOMEM
+ select MII
help
This platform driver is for Micrel KS8851 Address/data bus
multiplexed network chip.
@@ -2482,6 +2483,8 @@ config S6GMAC
To compile this driver as a module, choose M here. The module
will be called s6gmac.
+source "drivers/net/stmmac/Kconfig"
+
endif # NETDEV_1000
#
@@ -3232,7 +3235,7 @@ config VIRTIO_NET
config VMXNET3
tristate "VMware VMXNET3 ethernet driver"
- depends on PCI && X86
+ depends on PCI && X86 && INET
help
This driver supports VMware's vmxnet3 virtual ethernet NIC.
To compile this driver as a module, choose M here: the
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fc6c8bb92c50..246323d7f161 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
obj-$(CONFIG_RIONET) += rionet.o
obj-$(CONFIG_SH_ETH) += sh_eth.o
+obj-$(CONFIG_STMMAC_ETH) += stmmac/
#
# end link order section
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 04f63c77071d..ce6f1ac25df8 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -34,6 +34,7 @@
*
*
*/
+#include <linux/capability.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 25b6602e464c..827d86b5e70b 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -243,15 +243,26 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
int be_cmd_POST(struct be_adapter *adapter)
{
- u16 stage, error;
+ u16 stage;
+ int status, timeout = 0;
- error = be_POST_stage_get(adapter, &stage);
- if (error || stage != POST_STAGE_ARMFW_RDY) {
- dev_err(&adapter->pdev->dev, "POST failed.\n");
- return -1;
- }
+ do {
+ status = be_POST_stage_get(adapter, &stage);
+ if (status) {
+ dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
+ stage);
+ return -1;
+ } else if (stage != POST_STAGE_ARMFW_RDY) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(2 * HZ);
+ timeout += 2;
+ } else {
+ return 0;
+ }
+ } while (timeout < 20);
- return 0;
+ dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
+ return -1;
}
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
@@ -729,8 +740,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
/* Create an rx filtering policy configuration on an i/f
* Uses mbox
*/
-int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
- bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_create *req;
@@ -746,8 +757,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
- req->capability_flags = cpu_to_le32(flags);
- req->enable_flags = cpu_to_le32(flags);
+ req->capability_flags = cpu_to_le32(cap_flags);
+ req->enable_flags = cpu_to_le32(en_flags);
req->pmac_invalid = pmac_invalid;
if (!pmac_invalid)
memcpy(req->mac_addr, mac, ETH_ALEN);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index a1e78cc3e171..fe9f535eff12 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -753,8 +753,9 @@ extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
u32 if_id, u32 *pmac_id);
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
-extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac,
- bool pmac_invalid, u32 *if_handle, u32 *pmac_id);
+extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
+ u32 en_flags, u8 *mac, bool pmac_invalid,
+ u32 *if_handle, u32 *pmac_id);
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index e0f9d6477184..21b0657de9e8 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1616,19 +1616,22 @@ static int be_open(struct net_device *netdev)
static int be_setup(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- u32 if_flags;
+ u32 cap_flags, en_flags;
int status;
- if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
- status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr,
- false/* pmac_invalid */, &adapter->if_handle,
- &adapter->pmac_id);
+ cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS |
+ BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS;
+
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ netdev->dev_addr, false/* pmac_invalid */,
+ &adapter->if_handle, &adapter->pmac_id);
if (status != 0)
goto do_none;
-
status = be_tx_queues_create(adapter);
if (status != 0)
goto if_destroy;
@@ -2051,6 +2054,10 @@ static int be_hw_up(struct be_adapter *adapter)
if (status)
return status;
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ return status;
+
status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
if (status)
return status;
@@ -2104,10 +2111,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto free_netdev;
- status = be_cmd_reset_function(adapter);
- if (status)
- goto ctrl_clean;
-
status = be_stats_init(adapter);
if (status)
goto ctrl_clean;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index feb03ad0d803..3adbeed2c057 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3704,10 +3704,10 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_IP)) {
return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
- (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count;
+ (data->h_dest[5] ^ data->h_source[5])) % count;
}
- return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
+ return (data->h_dest[5] ^ data->h_source[5]) % count;
}
/*
@@ -3734,7 +3734,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
}
- return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
+ return (data->h_dest[5] ^ data->h_source[5]) % count;
}
/*
@@ -3745,7 +3745,7 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb,
{
struct ethhdr *data = (struct ethhdr *)skb->data;
- return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
+ return (data->h_dest[5] ^ data->h_source[5]) % count;
}
/*-------------------------- Device entry points ----------------------------*/
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 3373560405ba..9dd076a626a5 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -213,6 +213,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
{.compatible = "nxp,sja1000"},
{},
};
+MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
static struct of_platform_driver sja1000_ofp_driver = {
.owner = THIS_MODULE,
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
index 80817c2edfb3..fb1c924d79b4 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/dm9000.h
@@ -50,7 +50,7 @@
#define DM9000_RCSR 0x32
#define CHIPR_DM9000A 0x19
-#define CHIPR_DM9000B 0x1B
+#define CHIPR_DM9000B 0x1A
#define DM9000_MRCMDX 0xF0
#define DM9000_MRCMD 0xF2
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 1211df9ae883..08a4f9dd20e9 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -518,9 +518,13 @@ extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+ u16 *data);
extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+ u16 data);
extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
@@ -537,7 +541,11 @@ extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+ u16 data);
extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+ u16 *data);
extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
@@ -545,7 +553,11 @@ extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000e_check_downshift(struct e1000_hw *hw);
extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+ u16 *data);
extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+ u16 data);
extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index fd44d9f90769..7b05cf47f7f5 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -764,11 +764,13 @@ struct e1000_phy_operations {
s32 (*get_cable_length)(struct e1000_hw *);
s32 (*get_phy_info)(struct e1000_hw *);
s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
void (*release_phy)(struct e1000_hw *);
s32 (*reset_phy)(struct e1000_hw *);
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
+ s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
s32 (*cfg_on_link_up)(struct e1000_hw *);
};
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 99df2abf82a9..b6388b9535fd 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -122,6 +122,13 @@
#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
+#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -200,6 +207,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -242,7 +250,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
+ phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
+ phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
+ phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
+ phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->id = e1000_phy_unknown;
@@ -303,6 +315,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
case IGP03E1000_E_PHY_ID:
phy->type = e1000_phy_igp_3;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
+ phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
break;
case IFE_E_PHY_ID:
case IFE_PLUS_E_PHY_ID:
@@ -568,12 +582,39 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
static DEFINE_MUTEX(nvm_mutex);
/**
+ * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Acquires the mutex for performing NVM operations.
+ **/
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+{
+ mutex_lock(&nvm_mutex);
+
+ return 0;
+}
+
+/**
+ * e1000_release_nvm_ich8lan - Release NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Releases the mutex used while performing NVM operations.
+ **/
+static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+{
+ mutex_unlock(&nvm_mutex);
+
+ return;
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
* e1000_acquire_swflag_ich8lan - Acquire software control flag
* @hw: pointer to the HW structure
*
- * Acquires the software control flag for performing NVM and PHY
- * operations. This is a function pointer entry point only called by
- * read/write routines for the PHY and NVM parts.
+ * Acquires the software control flag for performing PHY and select
+ * MAC CSR accesses.
**/
static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
{
@@ -582,7 +623,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
might_sleep();
- mutex_lock(&nvm_mutex);
+ mutex_lock(&swflag_mutex);
while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -599,7 +640,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
goto out;
}
- timeout = PHY_CFG_TIMEOUT * 2;
+ timeout = SW_FLAG_TIMEOUT;
extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
@@ -623,7 +664,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
out:
if (ret_val)
- mutex_unlock(&nvm_mutex);
+ mutex_unlock(&swflag_mutex);
return ret_val;
}
@@ -632,9 +673,8 @@ out:
* e1000_release_swflag_ich8lan - Release software control flag
* @hw: pointer to the HW structure
*
- * Releases the software control flag for performing NVM and PHY operations.
- * This is a function pointer entry point only called by read/write
- * routines for the PHY and NVM parts.
+ * Releases the software control flag for performing PHY and select
+ * MAC CSR accesses.
**/
static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
{
@@ -644,7 +684,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
- mutex_unlock(&nvm_mutex);
+ mutex_unlock(&swflag_mutex);
+
+ return;
}
/**
@@ -844,7 +886,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
u32 i;
u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
s32 ret_val;
- u16 word_addr, reg_data, reg_addr, phy_page = 0;
+ u16 reg, word_addr, reg_data, reg_addr, phy_page = 0;
ret_val = e1000e_phy_hw_reset_generic(hw);
if (ret_val)
@@ -859,6 +901,10 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
return ret_val;
}
+ /* Dummy read to clear the phy wakeup bit after lcd reset */
+ if (hw->mac.type == e1000_pchlan)
+ e1e_rphy(hw, BM_WUC, &reg);
+
/*
* Initialize the PHY from the NVM on ICH platforms. This
* is needed due to an issue where the NVM configuration is
@@ -1054,6 +1100,38 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU state according to the active flag. For PCH, if OEM write
+ * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
+ * the phy speed. This function will manually set the LPLU bit and restart
+ * auto-neg as hw would do. D3 and D0 LPLU will call the same function
+ * since it configures the same bit.
+ **/
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
+{
+ s32 ret_val = 0;
+ u16 oem_reg;
+
+ ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
+ if (ret_val)
+ goto out;
+
+ if (active)
+ oem_reg |= HV_OEM_BITS_LPLU;
+ else
+ oem_reg &= ~HV_OEM_BITS_LPLU;
+
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+ ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
* @hw: pointer to the HW structure
* @active: TRUE to enable LPLU, FALSE to disable
@@ -1314,12 +1392,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
(words == 0)) {
hw_dbg(hw, "nvm parameter(s) out of bounds\n");
- return -E1000_ERR_NVM;
+ ret_val = -E1000_ERR_NVM;
+ goto out;
}
- ret_val = e1000_acquire_swflag_ich8lan(hw);
- if (ret_val)
- goto out;
+ nvm->ops.acquire_nvm(hw);
ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
if (ret_val) {
@@ -1345,7 +1422,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
}
}
- e1000_release_swflag_ich8lan(hw);
+ nvm->ops.release_nvm(hw);
out:
if (ret_val)
@@ -1603,11 +1680,15 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
return -E1000_ERR_NVM;
}
+ nvm->ops.acquire_nvm(hw);
+
for (i = 0; i < words; i++) {
dev_spec->shadow_ram[offset+i].modified = 1;
dev_spec->shadow_ram[offset+i].value = data[i];
}
+ nvm->ops.release_nvm(hw);
+
return 0;
}
@@ -1637,9 +1718,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (nvm->type != e1000_nvm_flash_sw)
goto out;
- ret_val = e1000_acquire_swflag_ich8lan(hw);
- if (ret_val)
- goto out;
+ nvm->ops.acquire_nvm(hw);
/*
* We're writing to the opposite bank so if we're on bank 1,
@@ -1657,7 +1736,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
old_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
if (ret_val) {
- e1000_release_swflag_ich8lan(hw);
+ nvm->ops.release_nvm(hw);
goto out;
}
} else {
@@ -1665,7 +1744,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
new_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
if (ret_val) {
- e1000_release_swflag_ich8lan(hw);
+ nvm->ops.release_nvm(hw);
goto out;
}
}
@@ -1723,7 +1802,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val) {
/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
hw_dbg(hw, "Flash commit failed.\n");
- e1000_release_swflag_ich8lan(hw);
+ nvm->ops.release_nvm(hw);
goto out;
}
@@ -1736,7 +1815,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
if (ret_val) {
- e1000_release_swflag_ich8lan(hw);
+ nvm->ops.release_nvm(hw);
goto out;
}
data &= 0xBFFF;
@@ -1744,7 +1823,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
act_offset * 2 + 1,
(u8)(data >> 8));
if (ret_val) {
- e1000_release_swflag_ich8lan(hw);
+ nvm->ops.release_nvm(hw);
goto out;
}
@@ -1757,7 +1836,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
if (ret_val) {
- e1000_release_swflag_ich8lan(hw);
+ nvm->ops.release_nvm(hw);
goto out;
}
@@ -1767,7 +1846,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
dev_spec->shadow_ram[i].value = 0xFFFF;
}
- e1000_release_swflag_ich8lan(hw);
+ nvm->ops.release_nvm(hw);
/*
* Reload the EEPROM, or else modifications will not appear
@@ -1831,14 +1910,12 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
**/
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
{
+ struct e1000_nvm_info *nvm = &hw->nvm;
union ich8_flash_protected_range pr0;
union ich8_hws_flash_status hsfsts;
u32 gfpreg;
- s32 ret_val;
- ret_val = e1000_acquire_swflag_ich8lan(hw);
- if (ret_val)
- return;
+ nvm->ops.acquire_nvm(hw);
gfpreg = er32flash(ICH_FLASH_GFPREG);
@@ -1859,7 +1936,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
hsfsts.hsf_status.flockdn = true;
ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
- e1000_release_swflag_ich8lan(hw);
+ nvm->ops.release_nvm(hw);
}
/**
@@ -2229,6 +2306,7 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
{
+ u16 reg;
u32 ctrl, icr, kab;
s32 ret_val;
@@ -2304,6 +2382,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
hw_dbg(hw, "Auto Read Done did not complete\n");
}
}
+ /* Dummy read to clear the phy wakeup bit after lcd reset */
+ if (hw->mac.type == e1000_pchlan)
+ e1e_rphy(hw, BM_WUC, &reg);
/*
* For PCH, this write will make sure that any noise
@@ -2843,9 +2924,8 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
E1000_PHY_CTRL_GBE_DISABLE;
ew32(PHY_CTRL, phy_ctrl);
- /* Workaround SWFLAG unexpectedly set during S0->Sx */
if (hw->mac.type == e1000_pchlan)
- udelay(500);
+ e1000_phy_hw_reset_ich8lan(hw);
default:
break;
}
@@ -3113,9 +3193,9 @@ static struct e1000_phy_operations ich8_phy_ops = {
};
static struct e1000_nvm_operations ich8_nvm_ops = {
- .acquire_nvm = e1000_acquire_swflag_ich8lan,
+ .acquire_nvm = e1000_acquire_nvm_ich8lan,
.read_nvm = e1000_read_nvm_ich8lan,
- .release_nvm = e1000_release_swflag_ich8lan,
+ .release_nvm = e1000_release_nvm_ich8lan,
.update_nvm = e1000_update_nvm_checksum_ich8lan,
.valid_led_default = e1000_valid_led_default_ich8lan,
.validate_nvm = e1000_validate_nvm_checksum_ich8lan,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 994401fd0664..f9d33ab05e97 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -164,16 +164,25 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
* MDIC mode. No harm in trying again in this case since
* the PHY ID is unknown at this point anyway
*/
+ ret_val = phy->ops.acquire_phy(hw);
+ if (ret_val)
+ goto out;
ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
if (ret_val)
goto out;
+ phy->ops.release_phy(hw);
retry_count++;
}
out:
/* Revert to MDIO fast mode, if applicable */
- if (retry_count)
+ if (retry_count) {
+ ret_val = phy->ops.acquire_phy(hw);
+ if (ret_val)
+ return ret_val;
ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
+ phy->ops.release_phy(hw);
+ }
return ret_val;
}
@@ -354,94 +363,173 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
}
/**
- * e1000e_read_phy_reg_igp - Read igp PHY register
+ * __e1000e_read_phy_reg_igp - Read igp PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary, then reads the PHY register at offset
- * and storing the retrieved information in data. Release any acquired
+ * and stores the retrieved information in data. Release any acquired
* semaphores before exiting.
**/
-s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
{
- s32 ret_val;
+ s32 ret_val = 0;
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- return ret_val;
+ if (!locked) {
+ if (!(hw->phy.ops.acquire_phy))
+ goto out;
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ goto out;
+ }
if (offset > MAX_PHY_MULTI_PAGE_REG) {
ret_val = e1000e_write_phy_reg_mdic(hw,
IGP01E1000_PHY_PAGE_SELECT,
(u16)offset);
- if (ret_val) {
- hw->phy.ops.release_phy(hw);
- return ret_val;
- }
+ if (ret_val)
+ goto release;
}
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
-
- hw->phy.ops.release_phy(hw);
+ data);
+release:
+ if (!locked)
+ hw->phy.ops.release_phy(hw);
+out:
return ret_val;
}
/**
+ * e1000e_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores the
+ * retrieved information in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000e_read_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000e_read_phy_reg_igp_locked - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000e_read_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
* e1000e_write_phy_reg_igp - Write igp PHY register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
**/
-s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
{
- s32 ret_val;
+ s32 ret_val = 0;
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- return ret_val;
+ if (!locked) {
+ if (!(hw->phy.ops.acquire_phy))
+ goto out;
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ goto out;
+ }
if (offset > MAX_PHY_MULTI_PAGE_REG) {
ret_val = e1000e_write_phy_reg_mdic(hw,
IGP01E1000_PHY_PAGE_SELECT,
(u16)offset);
- if (ret_val) {
- hw->phy.ops.release_phy(hw);
- return ret_val;
- }
+ if (ret_val)
+ goto release;
}
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
- hw->phy.ops.release_phy(hw);
+release:
+ if (!locked)
+ hw->phy.ops.release_phy(hw);
+out:
return ret_val;
}
/**
- * e1000e_read_kmrn_reg - Read kumeran register
+ * e1000e_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000e_write_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000e_write_phy_reg_igp_locked - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000e_write_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * __e1000_read_kmrn_reg - Read kumeran register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary. Then reads the PHY register at offset
* using the kumeran interface. The information retrieved is stored in data.
* Release any acquired semaphores before exiting.
**/
-s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
{
u32 kmrnctrlsta;
- s32 ret_val;
+ s32 ret_val = 0;
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- return ret_val;
+ if (!locked) {
+ if (!(hw->phy.ops.acquire_phy))
+ goto out;
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ goto out;
+ }
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
@@ -452,41 +540,111 @@ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
kmrnctrlsta = er32(KMRNCTRLSTA);
*data = (u16)kmrnctrlsta;
- hw->phy.ops.release_phy(hw);
+ if (!locked)
+ hw->phy.ops.release_phy(hw);
+out:
return ret_val;
}
/**
- * e1000e_write_kmrn_reg - Write kumeran register
+ * e1000e_read_kmrn_reg - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset using the
+ * kumeran interface. The information retrieved is stored in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000_read_kmrn_reg_locked - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the kumeran interface. The
+ * information retrieved is stored in data.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * __e1000_write_kmrn_reg - Write kumeran register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary. Then write the data to PHY register
* at the offset using the kumeran interface. Release any acquired semaphores
* before exiting.
**/
-s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
{
u32 kmrnctrlsta;
- s32 ret_val;
+ s32 ret_val = 0;
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- return ret_val;
+ if (!locked) {
+ if (!(hw->phy.ops.acquire_phy))
+ goto out;
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ goto out;
+ }
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
udelay(2);
- hw->phy.ops.release_phy(hw);
+ if (!locked)
+ hw->phy.ops.release_phy(hw);
+
+out:
return ret_val;
}
/**
+ * e1000e_write_kmrn_reg - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to the PHY register at the offset
+ * using the kumeran interface. Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000_write_kmrn_reg_locked - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Write the data to PHY register at the offset using the kumeran interface.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, true);
+}
+
+/**
* e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
* @hw: pointer to the HW structure
*
@@ -2105,6 +2263,10 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
u32 page = offset >> IGP_PAGE_SHIFT;
u32 page_shift = 0;
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ return ret_val;
+
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
@@ -2112,10 +2274,6 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
goto out;
}
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- goto out;
-
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2135,18 +2293,15 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
(page << page_shift));
- if (ret_val) {
- hw->phy.ops.release_phy(hw);
+ if (ret_val)
goto out;
- }
}
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
- hw->phy.ops.release_phy(hw);
-
out:
+ hw->phy.ops.release_phy(hw);
return ret_val;
}
@@ -2167,6 +2322,10 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
u32 page = offset >> IGP_PAGE_SHIFT;
u32 page_shift = 0;
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ return ret_val;
+
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
@@ -2174,10 +2333,6 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
goto out;
}
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- goto out;
-
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2197,17 +2352,14 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
(page << page_shift));
- if (ret_val) {
- hw->phy.ops.release_phy(hw);
+ if (ret_val)
goto out;
- }
}
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
- hw->phy.ops.release_phy(hw);
-
out:
+ hw->phy.ops.release_phy(hw);
return ret_val;
}
@@ -2226,17 +2378,17 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
s32 ret_val;
u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ return ret_val;
+
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
true);
- return ret_val;
+ goto out;
}
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- return ret_val;
-
hw->phy.addr = 1;
if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2245,16 +2397,14 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
page);
- if (ret_val) {
- hw->phy.ops.release_phy(hw);
- return ret_val;
- }
+ if (ret_val)
+ goto out;
}
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
+out:
hw->phy.ops.release_phy(hw);
-
return ret_val;
}
@@ -2272,17 +2422,17 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
s32 ret_val;
u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ return ret_val;
+
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
false);
- return ret_val;
+ goto out;
}
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- return ret_val;
-
hw->phy.addr = 1;
if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2290,17 +2440,15 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
page);
- if (ret_val) {
- hw->phy.ops.release_phy(hw);
- return ret_val;
- }
+ if (ret_val)
+ goto out;
}
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
+out:
hw->phy.ops.release_phy(hw);
-
return ret_val;
}
@@ -2320,6 +2468,8 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
* 3) Write the address using the address opcode (0x11)
* 4) Read or write the data using the data opcode (0x12)
* 5) Restore 769_17.2 to its original value
+ *
+ * Assumes semaphore already acquired.
**/
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
u16 *data, bool read)
@@ -2327,20 +2477,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u16 reg = BM_PHY_REG_NUM(offset);
u16 phy_reg = 0;
- u8 phy_acquired = 1;
-
/* Gig must be disabled for MDIO accesses to page 800 */
if ((hw->mac.type == e1000_pchlan) &&
(!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val) {
- phy_acquired = 0;
- goto out;
- }
-
/* All operations in this function are phy address 1 */
hw->phy.addr = 1;
@@ -2397,8 +2539,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
out:
- if (phy_acquired == 1)
- hw->phy.ops.release_phy(hw);
return ret_val;
}
@@ -2439,52 +2579,63 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
return 0;
}
+/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ * @slow: true for slow mode, false for normal mode
+ *
+ * Assumes semaphore already acquired.
+ **/
s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
{
s32 ret_val = 0;
u16 data = 0;
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- return ret_val;
-
/* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
hw->phy.addr = 1;
ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
(BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
- if (ret_val) {
- hw->phy.ops.release_phy(hw);
- return ret_val;
- }
+ if (ret_val)
+ goto out;
+
ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
(0x2180 | (slow << 10)));
+ if (ret_val)
+ goto out;
/* dummy read when reverting to fast mode - throw away result */
if (!slow)
- e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
-
- hw->phy.ops.release_phy(hw);
+ ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
+out:
return ret_val;
}
/**
- * e1000_read_phy_reg_hv - Read HV PHY register
+ * __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary, then reads the PHY register at offset
- * and storing the retrieved information in data. Release any acquired
+ * and stores the retrieved information in data. Release any acquired
* semaphore before exiting.
**/
-s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
{
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
bool in_slow_mode = false;
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Workaround failure in MDIO access while cable is disconnected */
if ((hw->phy.type == e1000_phy_82577) &&
!(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2508,10 +2659,6 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
goto out;
}
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- goto out;
-
hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
if (page == HV_INTC_FC_PAGE_START)
@@ -2529,42 +2676,76 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
ret_val = e1000e_write_phy_reg_mdic(hw,
IGP01E1000_PHY_PAGE_SELECT,
(page << IGP_PAGE_SHIFT));
- if (ret_val) {
- hw->phy.ops.release_phy(hw);
- goto out;
- }
hw->phy.addr = phy_addr;
}
}
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
- hw->phy.ops.release_phy(hw);
-
out:
/* Revert to MDIO fast mode, if applicable */
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
+ if (!locked)
+ hw->phy.ops.release_phy(hw);
+
return ret_val;
}
/**
- * e1000_write_phy_reg_hv - Write HV PHY register
+ * e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores
+ * the retrieved information in data. Release the acquired semaphore
+ * before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, false);
+}
+
+/**
+ * e1000_read_phy_reg_hv_locked - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, true);
+}
+
+/**
+ * __e1000_write_phy_reg_hv - Write HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
**/
-s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
{
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
bool in_slow_mode = false;
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Workaround failure in MDIO access while cable is disconnected */
if ((hw->phy.type == e1000_phy_82577) &&
!(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2588,10 +2769,6 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
goto out;
}
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- goto out;
-
hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
if (page == HV_INTC_FC_PAGE_START)
@@ -2607,15 +2784,10 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
((MAX_PHY_REG_ADDRESS & reg) == 0) &&
(data & (1 << 11))) {
u16 data2 = 0x7EFF;
- hw->phy.ops.release_phy(hw);
ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
&data2, false);
if (ret_val)
goto out;
-
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val)
- goto out;
}
if (reg > MAX_PHY_MULTI_PAGE_REG) {
@@ -2630,27 +2802,53 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
ret_val = e1000e_write_phy_reg_mdic(hw,
IGP01E1000_PHY_PAGE_SELECT,
(page << IGP_PAGE_SHIFT));
- if (ret_val) {
- hw->phy.ops.release_phy(hw);
- goto out;
- }
hw->phy.addr = phy_addr;
}
}
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
- hw->phy.ops.release_phy(hw);
out:
/* Revert to MDIO fast mode, if applicable */
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
+ if (!locked)
+ hw->phy.ops.release_phy(hw);
+
return ret_val;
}
/**
+ * e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register at the offset.
+ * Release the acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, false);
+}
+
+/**
+ * e1000_write_phy_reg_hv_locked - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset. Assumes semaphore
+ * already acquired.
+ **/
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, true);
+}
+
+/**
* e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
* @page: page to be accessed
**/
@@ -2671,10 +2869,9 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
* @data: pointer to the data to be read or written
* @read: determines if operation is read or written
*
- * Acquires semaphore, if necessary, then reads the PHY register at offset
- * and storing the retreived information in data. Release any acquired
- * semaphores before exiting. Note that the procedure to read these regs
- * uses the address port and data port to read/write.
+ * Reads the PHY register at offset and stores the retreived information
+ * in data. Assumes semaphore already acquired. Note that the procedure
+ * to read these regs uses the address port and data port to read/write.
**/
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
u16 *data, bool read)
@@ -2682,20 +2879,12 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u32 addr_reg = 0;
u32 data_reg = 0;
- u8 phy_acquired = 1;
/* This takes care of the difference with desktop vs mobile phy */
addr_reg = (hw->phy.type == e1000_phy_82578) ?
I82578_ADDR_REG : I82577_ADDR_REG;
data_reg = addr_reg + 1;
- ret_val = hw->phy.ops.acquire_phy(hw);
- if (ret_val) {
- hw_dbg(hw, "Could not acquire PHY\n");
- phy_acquired = 0;
- goto out;
- }
-
/* All operations in this function are phy address 2 */
hw->phy.addr = 2;
@@ -2718,8 +2907,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
}
out:
- if (phy_acquired == 1)
- hw->phy.ops.release_phy(hw);
return ret_val;
}
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a25f8ed8109d..f1c565282d58 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -222,24 +222,25 @@ struct ethoc_bd {
u32 addr;
};
-static u32 ethoc_read(struct ethoc *dev, loff_t offset)
+static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
{
return ioread32(dev->iobase + offset);
}
-static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
+static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
{
iowrite32(data, dev->iobase + offset);
}
-static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd)
+static inline void ethoc_read_bd(struct ethoc *dev, int index,
+ struct ethoc_bd *bd)
{
loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
bd->stat = ethoc_read(dev, offset + 0);
bd->addr = ethoc_read(dev, offset + 4);
}
-static void ethoc_write_bd(struct ethoc *dev, int index,
+static inline void ethoc_write_bd(struct ethoc *dev, int index,
const struct ethoc_bd *bd)
{
loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
@@ -247,33 +248,33 @@ static void ethoc_write_bd(struct ethoc *dev, int index,
ethoc_write(dev, offset + 4, bd->addr);
}
-static void ethoc_enable_irq(struct ethoc *dev, u32 mask)
+static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
{
u32 imask = ethoc_read(dev, INT_MASK);
imask |= mask;
ethoc_write(dev, INT_MASK, imask);
}
-static void ethoc_disable_irq(struct ethoc *dev, u32 mask)
+static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
{
u32 imask = ethoc_read(dev, INT_MASK);
imask &= ~mask;
ethoc_write(dev, INT_MASK, imask);
}
-static void ethoc_ack_irq(struct ethoc *dev, u32 mask)
+static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
{
ethoc_write(dev, INT_SOURCE, mask);
}
-static void ethoc_enable_rx_and_tx(struct ethoc *dev)
+static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
{
u32 mode = ethoc_read(dev, MODER);
mode |= MODER_RXEN | MODER_TXEN;
ethoc_write(dev, MODER, mode);
}
-static void ethoc_disable_rx_and_tx(struct ethoc *dev)
+static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
{
u32 mode = ethoc_read(dev, MODER);
mode &= ~(MODER_RXEN | MODER_TXEN);
@@ -507,7 +508,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- ethoc_ack_irq(priv, INT_MASK_ALL);
+ ethoc_ack_irq(priv, pending);
if (pending & INT_MASK_BUSY) {
dev_err(&dev->dev, "packet dropped\n");
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 29234380e6c6..16a1d58419d9 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1654,7 +1654,7 @@ static const struct net_device_ops fec_netdev_ops = {
*
* index is only used in legacy code
*/
-int __init fec_enet_init(struct net_device *dev, int index)
+static int fec_enet_init(struct net_device *dev, int index)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *cbd_base;
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index c40113f58963..66dace6d324f 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -759,12 +759,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
mpc52xx_fec_hw_init(dev);
- if (priv->phydev) {
- phy_stop(priv->phydev);
- phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
- phy_start(priv->phydev);
- }
-
bcom_fec_rx_reset(priv->rx_dmatsk);
bcom_fec_tx_reset(priv->tx_dmatsk);
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 31e6d62b785d..ee0f3c6d3f88 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -155,6 +155,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
{ .compatible = "mpc5200b-fec-phy", },
{}
};
+MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
struct of_platform_driver mpc52xx_fec_mdio_driver = {
.name = "mpc5200b-fec-phy",
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 2bc2d2b20644..ec2f5034457f 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1110,6 +1110,7 @@ static struct of_device_id fs_enet_match[] = {
#endif
{}
};
+MODULE_DEVICE_TABLE(of, fs_enet_match);
static struct of_platform_driver fs_enet_driver = {
.name = "fs_enet",
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 93b481b0e3c7..24ff9f43a62b 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -221,6 +221,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
static struct of_platform_driver fs_enet_bb_mdio_driver = {
.name = "fsl-bb-mdio",
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index a2d69c1cd07e..96eba4280c5c 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -219,6 +219,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
#endif
{},
};
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
static struct of_platform_driver fs_enet_fec_mdio_driver = {
.name = "fsl-fec-mdio",
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d167090248e2..6ac464866972 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -407,6 +407,7 @@ static struct of_device_id fsl_pq_mdio_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
static struct of_platform_driver fsl_pq_mdio_driver = {
.name = "fsl-pq_mdio",
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c6f6d3b7f4df..f7141865869d 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2397,9 +2397,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:fsl-gianfar");
-
static struct of_device_id gfar_match[] =
{
{
@@ -2408,6 +2405,7 @@ static struct of_device_id gfar_match[] =
},
{},
};
+MODULE_DEVICE_TABLE(of, gfar_match);
/* Structure for a device driver */
static struct of_platform_driver gfar_driver = {
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index c5d92ec176d0..af117c626e73 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -24,6 +24,7 @@
*
*/
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -2990,6 +2991,7 @@ static struct of_device_id emac_match[] =
},
{},
};
+MODULE_DEVICE_TABLE(of, emac_match);
static struct of_platform_driver emac_driver = {
.name = "emac",
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 801f088c134f..030913f8bd26 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -98,12 +98,13 @@ static void ri_tasklet(unsigned long dev)
stats->tx_packets++;
stats->tx_bytes +=skb->len;
- skb->dev = __dev_get_by_index(&init_net, skb->iif);
+ skb->dev = dev_get_by_index(&init_net, skb->iif);
if (!skb->dev) {
dev_kfree_skb(skb);
stats->tx_dropped++;
break;
}
+ dev_put(skb->dev);
skb->iif = _dev->ifindex;
if (from & AT_EGRESS) {
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index a6da32f25a83..dafb25bfd9e1 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -739,7 +739,7 @@ static int igb_set_ringparam(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_ring *temp_ring;
- int i, err;
+ int i, err = 0;
u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -759,18 +759,30 @@ static int igb_set_ringparam(struct net_device *netdev,
return 0;
}
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i].count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
+
if (adapter->num_tx_queues > adapter->num_rx_queues)
temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
else
temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
- if (!temp_ring)
- return -ENOMEM;
- while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
- if (netif_running(adapter->netdev))
- igb_down(adapter);
+ igb_down(adapter);
/*
* We can't just free everything and then setup again,
@@ -827,14 +839,11 @@ static int igb_set_ringparam(struct net_device *netdev,
adapter->rx_ring_count = new_rx_count;
}
-
- err = 0;
err_setup:
- if (netif_running(adapter->netdev))
- igb_up(adapter);
-
- clear_bit(__IGB_RESETTING, &adapter->state);
+ igb_up(adapter);
vfree(temp_ring);
+clear_reset:
+ clear_bit(__IGB_RESETTING, &adapter->state);
return err;
}
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index bc606f8b61aa..8afff07ff559 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -279,7 +279,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *temp_ring;
- int err;
+ int err = 0;
u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -299,15 +299,22 @@ static int igbvf_set_ringparam(struct net_device *netdev,
return 0;
}
- temp_ring = vmalloc(sizeof(struct igbvf_ring));
- if (!temp_ring)
- return -ENOMEM;
-
while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
msleep(1);
- if (netif_running(adapter->netdev))
- igbvf_down(adapter);
+ if (!netif_running(adapter->netdev)) {
+ adapter->tx_ring->count = new_tx_count;
+ adapter->rx_ring->count = new_rx_count;
+ goto clear_reset;
+ }
+
+ temp_ring = vmalloc(sizeof(struct igbvf_ring));
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ igbvf_down(adapter);
/*
* We can't just free everything and then setup again,
@@ -339,14 +346,11 @@ static int igbvf_set_ringparam(struct net_device *netdev,
memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
}
-
- err = 0;
err_setup:
- if (netif_running(adapter->netdev))
- igbvf_up(adapter);
-
- clear_bit(__IGBVF_RESETTING, &adapter->state);
+ igbvf_up(adapter);
vfree(temp_ring);
+clear_reset:
+ clear_bit(__IGBVF_RESETTING, &adapter->state);
return err;
}
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 08eccf418c67..9d2cc833691b 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -806,7 +806,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
- int i, err;
+ int i, err = 0;
u32 new_rx_count, new_tx_count;
bool need_update = false;
@@ -830,6 +830,16 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
msleep(1);
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i].count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto err_setup;
+ }
+
temp_tx_ring = kcalloc(adapter->num_tx_queues,
sizeof(struct ixgbe_ring), GFP_KERNEL);
if (!temp_tx_ring) {
@@ -887,8 +897,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* if rings need to be updated, here's the place to do it in one shot */
if (need_update) {
- if (netif_running(netdev))
- ixgbe_down(adapter);
+ ixgbe_down(adapter);
/* tx */
if (new_tx_count != adapter->tx_ring_count) {
@@ -905,13 +914,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
temp_rx_ring = NULL;
adapter->rx_ring_count = new_rx_count;
}
- }
-
- /* success! */
- err = 0;
- if (netif_running(netdev))
ixgbe_up(adapter);
-
+ }
err_setup:
clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 237835864357..a23f739d222f 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -171,6 +171,36 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
}
/**
+ * ks8851_wrreg8 - write 8bit register value to chip
+ * @ks: The chip state
+ * @reg: The register address
+ * @val: The value to write
+ *
+ * Issue a write to put the value @val into the register specified in @reg.
+ */
+static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
+{
+ struct spi_transfer *xfer = &ks->spi_xfer1;
+ struct spi_message *msg = &ks->spi_msg1;
+ __le16 txb[2];
+ int ret;
+ int bit;
+
+ bit = 1 << (reg & 3);
+
+ txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
+ txb[1] = val;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = 3;
+
+ ret = spi_sync(ks->spidev, msg);
+ if (ret < 0)
+ ks_err(ks, "spi_sync() failed\n");
+}
+
+/**
* ks8851_rx_1msg - select whether to use one or two messages for spi read
* @ks: The device structure
*
@@ -322,13 +352,12 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
static int ks8851_write_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- u16 *mcp = (u16 *)dev->dev_addr;
+ int i;
mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_MARL, mcp[0]);
- ks8851_wrreg16(ks, KS_MARM, mcp[1]);
- ks8851_wrreg16(ks, KS_MARH, mcp[2]);
+ for (i = 0; i < ETH_ALEN; i++)
+ ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
mutex_unlock(&ks->lock);
@@ -951,7 +980,7 @@ static void ks8851_set_rx_mode(struct net_device *dev)
mcptr = mcptr->next;
}
- rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA;
+ rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
} else {
/* just accept broadcast / unicast */
rxctrl.rxcr1 = RXCR1_RXPAFMA;
@@ -1239,6 +1268,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
ndev->netdev_ops = &ks8851_netdev_ops;
ndev->irq = spi->irq;
+ /* issue a global soft reset to reset the device. */
+ ks8851_soft_reset(ks, GRR_GSR);
+
/* simple check for a valid chip being connected to the bus */
if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index 85abe147afbf..f52c312cc356 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -16,6 +16,7 @@
#define CCR_32PIN (1 << 0)
/* MAC address registers */
+#define KS_MAR(_m) 0x15 - (_m)
#define KS_MARL 0x10
#define KS_MARM 0x12
#define KS_MARH 0x14
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 29c9fe2951e0..5319db9901d8 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
-#define MYRI10GE_VERSION_STR "1.5.0-1.432"
+#define MYRI10GE_VERSION_STR "1.5.1-1.451"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -1623,10 +1623,21 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
return 0;
}
}
- if (*ptr == 'R' || *ptr == 'Q') {
- /* We've found either an XFP or quad ribbon fiber */
+ if (*ptr == '2')
+ ptr++;
+ if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
+ /* We've found either an XFP, quad ribbon fiber, or SFP+ */
cmd->port = PORT_FIBRE;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ } else {
+ cmd->port = PORT_OTHER;
}
+ if (*ptr == 'R' || *ptr == 'S')
+ cmd->transceiver = XCVR_EXTERNAL;
+ else
+ cmd->transceiver = XCVR_INTERNAL;
+
return 0;
}
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 7386a7cce2ba..a39155d61bad 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -419,6 +419,7 @@ enum {
#define NETXEN_CRB_ROMUSB \
NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
+#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0)
#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index e43cbbd5bec1..69205ace16eb 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -1778,22 +1778,16 @@ netxen_setup_hwops(struct netxen_adapter *adapter)
int netxen_nic_get_board_info(struct netxen_adapter *adapter)
{
- int offset, board_type, magic, header_version;
+ int offset, board_type, magic;
struct pci_dev *pdev = adapter->pdev;
offset = NX_FW_MAGIC_OFFSET;
if (netxen_rom_fast_read(adapter, offset, &magic))
return -EIO;
- offset = NX_HDR_VERSION_OFFSET;
- if (netxen_rom_fast_read(adapter, offset, &header_version))
- return -EIO;
-
- if (magic != NETXEN_BDINFO_MAGIC ||
- header_version != NETXEN_BDINFO_VERSION) {
- dev_err(&pdev->dev,
- "invalid board config, magic=%08x, version=%08x\n",
- magic, header_version);
+ if (magic != NETXEN_BDINFO_MAGIC) {
+ dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+ magic);
return -EIO;
}
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index d8c4b70e35ba..27d20cbae0aa 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -514,6 +514,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
continue;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (off == (NETXEN_CRB_I2C0 + 0x1c))
+ continue;
/* do not reset PCI */
if (off == (ROMUSB_GLB + 0xbc))
continue;
@@ -537,12 +539,6 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
continue;
}
- if (off == NETXEN_ADDR_ERROR) {
- printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
- netxen_nic_driver_name, buf[i].addr);
- continue;
- }
-
init_delay = 1;
/* After writing this register, HW needs time for CRB */
/* to quiet down (else crb_window returns 0xffffffff) */
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 1071f090a124..c2bdfd3c7aad 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1925,6 +1925,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
request_reset:
adapter->need_fw_reset = 1;
+ clear_bit(__NX_RESETTING, &adapter->state);
}
struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 1d1e657991d2..5506f870037f 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3545,7 +3545,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
rp->rcr_index = index;
skb_reserve(skb, NET_IP_ALIGN);
- __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
+ __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
rp->rx_packets++;
rp->rx_bytes += skb->len;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 250e10f2c35b..8659d341e769 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -238,6 +238,7 @@ static struct of_device_id mdio_ofgpio_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
static struct of_platform_driver mdio_ofgpio_driver = {
.name = "mdio-gpio",
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 7cbf6f9b51de..2559991eea6a 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -111,9 +111,6 @@ struct pppoe_net {
rwlock_t hash_lock;
};
-/* to eliminate a race btw pppoe_flush_dev and pppoe_release */
-static DEFINE_SPINLOCK(flush_lock);
-
/*
* PPPoE could be in the following stages:
* 1) Discovery stage (to obtain remote MAC and Session ID)
@@ -303,45 +300,48 @@ static void pppoe_flush_dev(struct net_device *dev)
write_lock_bh(&pn->hash_lock);
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
struct pppox_sock *po = pn->hash_table[i];
+ struct sock *sk;
- while (po != NULL) {
- struct sock *sk;
- if (po->pppoe_dev != dev) {
+ while (po) {
+ while (po && po->pppoe_dev != dev) {
po = po->next;
- continue;
}
+
+ if (!po)
+ break;
+
sk = sk_pppox(po);
- spin_lock(&flush_lock);
- po->pppoe_dev = NULL;
- spin_unlock(&flush_lock);
- dev_put(dev);
/* We always grab the socket lock, followed by the
- * hash_lock, in that order. Since we should
- * hold the sock lock while doing any unbinding,
- * we need to release the lock we're holding.
- * Hold a reference to the sock so it doesn't disappear
- * as we're jumping between locks.
+ * hash_lock, in that order. Since we should hold the
+ * sock lock while doing any unbinding, we need to
+ * release the lock we're holding. Hold a reference to
+ * the sock so it doesn't disappear as we're jumping
+ * between locks.
*/
sock_hold(sk);
-
write_unlock_bh(&pn->hash_lock);
lock_sock(sk);
- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ if (po->pppoe_dev == dev
+ && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
pppox_unbind_sock(sk);
sk->sk_state = PPPOX_ZOMBIE;
sk->sk_state_change(sk);
+ po->pppoe_dev = NULL;
+ dev_put(dev);
}
release_sock(sk);
sock_put(sk);
- /* Restart scan at the beginning of this hash chain.
- * While the lock was dropped the chain contents may
- * have changed.
+ /* Restart the process from the start of the current
+ * hash chain. We dropped locks so the world may have
+ * change from underneath us.
*/
+
+ BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
write_lock_bh(&pn->hash_lock);
po = pn->hash_table[i];
}
@@ -388,11 +388,16 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
struct pppox_sock *po = pppox_sk(sk);
struct pppox_sock *relay_po;
+ /* Backlog receive. Semantics of backlog rcv preclude any code from
+ * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state
+ * can't change.
+ */
+
if (sk->sk_state & PPPOX_BOUND) {
ppp_input(&po->chan, skb);
} else if (sk->sk_state & PPPOX_RELAY) {
- relay_po = get_item_by_addr(dev_net(po->pppoe_dev),
- &po->pppoe_relay);
+ relay_po = get_item_by_addr(sock_net(sk),
+ &po->pppoe_relay);
if (relay_po == NULL)
goto abort_kfree;
@@ -447,6 +452,10 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
goto drop;
pn = pppoe_pernet(dev_net(dev));
+
+ /* Note that get_item does a sock_hold(), so sk_pppox(po)
+ * is known to be safe.
+ */
po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (!po)
goto drop;
@@ -561,6 +570,7 @@ static int pppoe_release(struct socket *sock)
struct sock *sk = sock->sk;
struct pppox_sock *po;
struct pppoe_net *pn;
+ struct net *net = NULL;
if (!sk)
return 0;
@@ -571,44 +581,28 @@ static int pppoe_release(struct socket *sock)
return -EBADF;
}
+ po = pppox_sk(sk);
+
+ if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ dev_put(po->pppoe_dev);
+ po->pppoe_dev = NULL;
+ }
+
pppox_unbind_sock(sk);
/* Signal the death of the socket. */
sk->sk_state = PPPOX_DEAD;
- /*
- * pppoe_flush_dev could lead to a race with
- * this routine so we use flush_lock to eliminate
- * such a case (we only need per-net specific data)
- */
- spin_lock(&flush_lock);
- po = pppox_sk(sk);
- if (!po->pppoe_dev) {
- spin_unlock(&flush_lock);
- goto out;
- }
- pn = pppoe_pernet(dev_net(po->pppoe_dev));
- spin_unlock(&flush_lock);
+ net = sock_net(sk);
+ pn = pppoe_pernet(net);
/*
* protect "po" from concurrent updates
* on pppoe_flush_dev
*/
- write_lock_bh(&pn->hash_lock);
+ delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
+ po->pppoe_ifindex);
- po = pppox_sk(sk);
- if (stage_session(po->pppoe_pa.sid))
- __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
- po->pppoe_ifindex);
-
- if (po->pppoe_dev) {
- dev_put(po->pppoe_dev);
- po->pppoe_dev = NULL;
- }
-
- write_unlock_bh(&pn->hash_lock);
-
-out:
sock_orphan(sk);
sock->sk = NULL;
@@ -625,8 +619,9 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
struct pppox_sock *po = pppox_sk(sk);
- struct net_device *dev;
+ struct net_device *dev = NULL;
struct pppoe_net *pn;
+ struct net *net = NULL;
int error;
lock_sock(sk);
@@ -652,12 +647,14 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
/* Delete the old binding */
if (stage_session(po->pppoe_pa.sid)) {
pppox_unbind_sock(sk);
+ pn = pppoe_pernet(sock_net(sk));
+ delete_item(pn, po->pppoe_pa.sid,
+ po->pppoe_pa.remote, po->pppoe_ifindex);
if (po->pppoe_dev) {
- pn = pppoe_pernet(dev_net(po->pppoe_dev));
- delete_item(pn, po->pppoe_pa.sid,
- po->pppoe_pa.remote, po->pppoe_ifindex);
dev_put(po->pppoe_dev);
+ po->pppoe_dev = NULL;
}
+
memset(sk_pppox(po) + 1, 0,
sizeof(struct pppox_sock) - sizeof(struct sock));
sk->sk_state = PPPOX_NONE;
@@ -666,16 +663,15 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
/* Re-bind in session stage only */
if (stage_session(sp->sa_addr.pppoe.sid)) {
error = -ENODEV;
- dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev);
+ net = sock_net(sk);
+ dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
if (!dev)
- goto end;
+ goto err_put;
po->pppoe_dev = dev;
po->pppoe_ifindex = dev->ifindex;
- pn = pppoe_pernet(dev_net(dev));
- write_lock_bh(&pn->hash_lock);
+ pn = pppoe_pernet(net);
if (!(dev->flags & IFF_UP)) {
- write_unlock_bh(&pn->hash_lock);
goto err_put;
}
@@ -683,6 +679,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
&sp->sa_addr.pppoe,
sizeof(struct pppoe_addr));
+ write_lock_bh(&pn->hash_lock);
error = __set_item(pn, po);
write_unlock_bh(&pn->hash_lock);
if (error < 0)
@@ -696,8 +693,11 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.ops = &pppoe_chan_ops;
error = ppp_register_net_channel(dev_net(dev), &po->chan);
- if (error)
+ if (error) {
+ delete_item(pn, po->pppoe_pa.sid,
+ po->pppoe_pa.remote, po->pppoe_ifindex);
goto err_put;
+ }
sk->sk_state = PPPOX_CONNECTED;
}
@@ -915,6 +915,14 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
struct pppoe_hdr *ph;
int data_len = skb->len;
+ /* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP
+ * xmit operations conclude prior to an unregistration call. Thus
+ * sk->sk_state cannot change, so we don't need to do lock_sock().
+ * But, we also can't do a lock_sock since that introduces a potential
+ * deadlock as we'd reverse the lock ordering used when calling
+ * ppp_unregister_channel().
+ */
+
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
goto abort;
@@ -944,7 +952,6 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
po->pppoe_pa.remote, NULL, data_len);
dev_queue_xmit(skb);
-
return 1;
abort:
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 7d9fc06ceb98..1f7946c7d4e8 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1029,7 +1029,10 @@ static void rtl8169_vlan_rx_register(struct net_device *dev,
spin_lock_irqsave(&tp->lock, flags);
tp->vlgrp = grp;
- if (tp->vlgrp)
+ /*
+ * Do not disable RxVlan on 8110SCd.
+ */
+ if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
tp->cp_cmd |= RxVlan;
else
tp->cp_cmd &= ~RxVlan;
@@ -3197,6 +3200,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
rtl8169_init_phy(dev, tp);
+
+ /*
+ * Pretend we are using VLANs; This bypasses a nasty bug where
+ * Interrupts stop flowing on high load on 8110SCd controllers.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
+
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
out:
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 161181a4b3d6..5783f50d18e9 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -31,6 +31,8 @@
#include <linux/cache.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <asm/cacheflush.h>
+
#include "sh_eth.h"
/* There is CPU dependent code */
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
new file mode 100644
index 000000000000..35eaa5251d7f
--- /dev/null
+++ b/drivers/net/stmmac/Kconfig
@@ -0,0 +1,53 @@
+config STMMAC_ETH
+ tristate "STMicroelectronics 10/100/1000 Ethernet driver"
+ select MII
+ select PHYLIB
+ depends on NETDEVICES && CPU_SUBTYPE_ST40
+ help
+ This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
+ controllers. ST Ethernet IPs are built around a Synopsys IP Core.
+
+if STMMAC_ETH
+
+config STMMAC_DA
+ bool "STMMAC DMA arbitration scheme"
+ default n
+ help
+ Selecting this option, rx has priority over Tx (only for Giga
+ Ethernet device).
+ By default, the DMA arbitration scheme is based on Round-robin
+ (rx:tx priority is 1:1).
+
+config STMMAC_DUAL_MAC
+ bool "STMMAC: dual mac support (EXPERIMENTAL)"
+ default n
+ depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
+ help
+ Some ST SoCs (for example the stx7141 and stx7200c2) have two
+ Ethernet Controllers. This option turns on the second Ethernet
+ device on this kind of platforms.
+
+config STMMAC_TIMER
+ bool "STMMAC Timer optimisation"
+ default n
+ help
+ Use an external timer for mitigating the number of network
+ interrupts.
+
+choice
+ prompt "Select Timer device"
+ depends on STMMAC_TIMER
+
+config STMMAC_TMU_TIMER
+ bool "TMU channel 2"
+ depends on CPU_SH4
+ help
+
+config STMMAC_RTC_TIMER
+ bool "Real time clock"
+ depends on RTC_CLASS
+ help
+
+endchoice
+
+endif
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
new file mode 100644
index 000000000000..b2d7a5564dfa
--- /dev/null
+++ b/drivers/net/stmmac/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_STMMAC_ETH) += stmmac.o
+stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
+ mac100.o gmac.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
new file mode 100644
index 000000000000..e49e5188e887
--- /dev/null
+++ b/drivers/net/stmmac/common.h
@@ -0,0 +1,330 @@
+/*******************************************************************************
+ STMMAC Common Header File
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "descs.h"
+#include <linux/io.h>
+
+/* *********************************************
+ DMA CRS Control and Status Register Mapping
+ * *********************************************/
+#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
+#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
+#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
+#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
+#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
+
+/* ********************************
+ DMA Control register defines
+ * ********************************/
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
+/* **************************************
+ DMA Interrupt Enable register defines
+ * **************************************/
+/**** NORMAL INTERRUPT ****/
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/**** ABNORMAL INTERRUPT ****/
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* ****************************
+ * DMA Status register defines
+ * ****************************/
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
+#define DMA_STATUS_GMI 0x08000000
+#define DMA_STATUS_GLI 0x04000000
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+/* Other defines */
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0x200
+
+/* Flow Control defines */
+#define FLOW_OFF 0
+#define FLOW_RX 1
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+
+/* DMA STORE-AND-FORWARD Operation Mode */
+#define SF_DMA_MODE 1
+
+#define HW_CSUM 1
+#define NO_HW_CSUM 0
+
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
+
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
+
+/* Common MAC defines */
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
+
+/* MAC Management Counters register */
+#define MMC_CONTROL 0x00000100 /* MMC Control */
+#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
+#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
+#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
+#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
+
+#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
+#define MMC_CONTROL_MAX_FRM_SHIFT 3
+#define MMC_CONTROL_MAX_FRAME 0x7FF
+
+struct stmmac_extra_stats {
+ /* Transmit errors */
+ unsigned long tx_underflow ____cacheline_aligned;
+ unsigned long tx_carrier;
+ unsigned long tx_losscarrier;
+ unsigned long tx_heartbeat;
+ unsigned long tx_deferred;
+ unsigned long tx_vlan;
+ unsigned long tx_jabber;
+ unsigned long tx_frame_flushed;
+ unsigned long tx_payload_error;
+ unsigned long tx_ip_header_error;
+ /* Receive errors */
+ unsigned long rx_desc;
+ unsigned long rx_partial;
+ unsigned long rx_runt;
+ unsigned long rx_toolong;
+ unsigned long rx_collision;
+ unsigned long rx_crc;
+ unsigned long rx_lenght;
+ unsigned long rx_mii;
+ unsigned long rx_multicast;
+ unsigned long rx_gmac_overflow;
+ unsigned long rx_watchdog;
+ unsigned long da_rx_filter_fail;
+ unsigned long sa_rx_filter_fail;
+ unsigned long rx_missed_cntr;
+ unsigned long rx_overflow_cntr;
+ unsigned long rx_vlan;
+ /* Tx/Rx IRQ errors */
+ unsigned long tx_undeflow_irq;
+ unsigned long tx_process_stopped_irq;
+ unsigned long tx_jabber_irq;
+ unsigned long rx_overflow_irq;
+ unsigned long rx_buf_unav_irq;
+ unsigned long rx_process_stopped_irq;
+ unsigned long rx_watchdog_irq;
+ unsigned long tx_early_irq;
+ unsigned long fatal_bus_error_irq;
+ /* Extra info */
+ unsigned long threshold;
+ unsigned long tx_pkt_n;
+ unsigned long rx_pkt_n;
+ unsigned long poll_n;
+ unsigned long sched_timer_n;
+ unsigned long normal_irq_n;
+};
+
+/* GMAC core can compute the checksums in HW. */
+enum rx_frame_status {
+ good_frame = 0,
+ discard_frame = 1,
+ csum_none = 2,
+};
+
+static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ writel(data, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+
+ return;
+}
+
+static inline void stmmac_get_mac_addr(unsigned long ioaddr,
+ unsigned char *addr, unsigned int high,
+ unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+
+ return;
+}
+
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
+ /* DMA core initialization */
+ int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+ /* Dump MAC registers */
+ void (*dump_mac_regs) (unsigned long ioaddr);
+ /* Dump DMA registers */
+ void (*dump_dma_regs) (unsigned long ioaddr);
+ /* Set tx/rx threshold in the csr6 register
+ * An invalid value enables the store-and-forward mode */
+ void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr);
+ /* RX descriptor ring initialization */
+ void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic);
+ /* TX descriptor ring initialization */
+ void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
+
+ /* Invoked by the xmit function to prepare the tx descriptor */
+ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
+ int csum_flag);
+ /* Set/get the owner of the descriptor */
+ void (*set_tx_owner) (struct dma_desc *p);
+ int (*get_tx_owner) (struct dma_desc *p);
+ /* Invoked by the xmit function to close the tx descriptor */
+ void (*close_tx_desc) (struct dma_desc *p);
+ /* Clean the tx descriptor as soon as the tx irq is received */
+ void (*release_tx_desc) (struct dma_desc *p);
+ /* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted */
+ void (*clear_tx_ic) (struct dma_desc *p);
+ /* Last tx segment reports the transmit status */
+ int (*get_tx_ls) (struct dma_desc *p);
+ /* Return the transmit status looking at the TDES1 */
+ int (*tx_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr);
+ /* Get the buffer size from the descriptor */
+ int (*get_tx_len) (struct dma_desc *p);
+ /* Handle extra events on specific interrupts hw dependent */
+ void (*host_irq_status) (unsigned long ioaddr);
+ int (*get_rx_owner) (struct dma_desc *p);
+ void (*set_rx_owner) (struct dma_desc *p);
+ /* Get the receive frame size */
+ int (*get_rx_frame_len) (struct dma_desc *p);
+ /* Return the reception status looking at the RDES1 */
+ int (*rx_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p);
+ /* Multicast filter setting */
+ void (*set_filter) (struct net_device *dev);
+ /* Flow control setting */
+ void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time);
+ /* Set power management mode (e.g. magic frame) */
+ void (*pmt) (unsigned long ioaddr, unsigned long mode);
+ /* Set/Get Unicast MAC addresses */
+ void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+ void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+};
+
+struct mac_link {
+ int port;
+ int duplex;
+ int speed;
+};
+
+struct mii_regs {
+ unsigned int addr; /* MII Address */
+ unsigned int data; /* MII Data */
+};
+
+struct hw_cap {
+ unsigned int version; /* Core Version register (GMAC) */
+ unsigned int pmt; /* Power-Down mode (GMAC) */
+ struct mac_link link;
+ struct mii_regs mii;
+};
+
+struct mac_device_info {
+ struct hw_cap hw;
+ struct stmmac_ops *ops;
+};
+
+struct mac_device_info *gmac_setup(unsigned long addr);
+struct mac_device_info *mac100_setup(unsigned long addr);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
new file mode 100644
index 000000000000..6d2a0b2f5e57
--- /dev/null
+++ b/drivers/net/stmmac/descs.h
@@ -0,0 +1,163 @@
+/*******************************************************************************
+ Header File to describe the DMA descriptors
+ Use enhanced descriptors in case of GMAC Cores.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+struct dma_desc {
+ /* Receive descriptor */
+ union {
+ struct {
+ /* RDES0 */
+ u32 reserved1:1;
+ u32 crc_error:1;
+ u32 dribbling:1;
+ u32 mii_error:1;
+ u32 receive_watchdog:1;
+ u32 frame_type:1;
+ u32 collision:1;
+ u32 frame_too_long:1;
+ u32 last_descriptor:1;
+ u32 first_descriptor:1;
+ u32 multicast_frame:1;
+ u32 run_frame:1;
+ u32 length_error:1;
+ u32 partial_frame_error:1;
+ u32 descriptor_error:1;
+ u32 error_summary:1;
+ u32 frame_length:14;
+ u32 filtering_fail:1;
+ u32 own:1;
+ /* RDES1 */
+ u32 buffer1_size:11;
+ u32 buffer2_size:11;
+ u32 reserved2:2;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 reserved3:5;
+ u32 disable_ic:1;
+ } rx;
+ struct {
+ /* RDES0 */
+ u32 payload_csum_error:1;
+ u32 crc_error:1;
+ u32 dribbling:1;
+ u32 error_gmii:1;
+ u32 receive_watchdog:1;
+ u32 frame_type:1;
+ u32 late_collision:1;
+ u32 ipc_csum_error:1;
+ u32 last_descriptor:1;
+ u32 first_descriptor:1;
+ u32 vlan_tag:1;
+ u32 overflow_error:1;
+ u32 length_error:1;
+ u32 sa_filter_fail:1;
+ u32 descriptor_error:1;
+ u32 error_summary:1;
+ u32 frame_length:14;
+ u32 da_filter_fail:1;
+ u32 own:1;
+ /* RDES1 */
+ u32 buffer1_size:13;
+ u32 reserved1:1;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 buffer2_size:13;
+ u32 reserved2:2;
+ u32 disable_ic:1;
+ } erx; /* -- enhanced -- */
+
+ /* Transmit descriptor */
+ struct {
+ /* TDES0 */
+ u32 deferred:1;
+ u32 underflow_error:1;
+ u32 excessive_deferral:1;
+ u32 collision_count:4;
+ u32 heartbeat_fail:1;
+ u32 excessive_collisions:1;
+ u32 late_collision:1;
+ u32 no_carrier:1;
+ u32 loss_carrier:1;
+ u32 reserved1:3;
+ u32 error_summary:1;
+ u32 reserved2:15;
+ u32 own:1;
+ /* TDES1 */
+ u32 buffer1_size:11;
+ u32 buffer2_size:11;
+ u32 reserved3:1;
+ u32 disable_padding:1;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 crc_disable:1;
+ u32 reserved4:2;
+ u32 first_segment:1;
+ u32 last_segment:1;
+ u32 interrupt:1;
+ } tx;
+ struct {
+ /* TDES0 */
+ u32 deferred:1;
+ u32 underflow_error:1;
+ u32 excessive_deferral:1;
+ u32 collision_count:4;
+ u32 vlan_frame:1;
+ u32 excessive_collisions:1;
+ u32 late_collision:1;
+ u32 no_carrier:1;
+ u32 loss_carrier:1;
+ u32 payload_error:1;
+ u32 frame_flushed:1;
+ u32 jabber_timeout:1;
+ u32 error_summary:1;
+ u32 ip_header_error:1;
+ u32 time_stamp_status:1;
+ u32 reserved1:2;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 checksum_insertion:2;
+ u32 reserved2:1;
+ u32 time_stamp_enable:1;
+ u32 disable_padding:1;
+ u32 crc_disable:1;
+ u32 first_segment:1;
+ u32 last_segment:1;
+ u32 interrupt:1;
+ u32 own:1;
+ /* TDES1 */
+ u32 buffer1_size:13;
+ u32 reserved3:3;
+ u32 buffer2_size:13;
+ u32 reserved4:3;
+ } etx; /* -- enhanced -- */
+ } des01;
+ unsigned int des2;
+ unsigned int des3;
+};
+
+/* Transmit checksum insertion control */
+enum tdes_csum_insertion {
+ cic_disabled = 0, /* Checksum Insertion Control */
+ cic_only_ip = 1, /* Only IP header */
+ cic_no_pseudoheader = 2, /* IP header but pseudoheader
+ * is not calculated */
+ cic_full = 3, /* IP header and pseudoheader */
+};
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/gmac.c
new file mode 100644
index 000000000000..b624bb5bae0a
--- /dev/null
+++ b/drivers/net/stmmac/gmac.c
@@ -0,0 +1,693 @@
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "stmmac.h"
+#include "gmac.h"
+
+#undef GMAC_DEBUG
+/*#define GMAC_DEBUG*/
+#undef FRAME_FILTER_DEBUG
+/*#define FRAME_FILTER_DEBUG*/
+#ifdef GMAC_DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+static void gmac_dump_regs(unsigned long ioaddr)
+{
+ int i;
+ pr_info("\t----------------------------------------------\n"
+ "\t GMAC registers (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
+
+ for (i = 0; i < 55; i++) {
+ int offset = i * 4;
+ pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+ return;
+}
+
+static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+
+ value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
+ ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
+ (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+
+#ifdef CONFIG_STMMAC_DA
+ value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
+#endif
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* The base address of the RX/TX descriptor lists must be written into
+ * DMA CSR3 and CSR4, respectively. */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+/* Transmit FIFO flush operation */
+static void gmac_flush_tx_fifo(unsigned long ioaddr)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
+
+ do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
+}
+
+static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode == SF_DMA_MODE) {
+ DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
+ /* Transmit COE type 2 cannot be done in cut-through mode. */
+ csr6 |= DMA_CONTROL_TSF;
+ /* Operating on second frame increase the performance
+ * especially when transmit store-and-forward is used.*/
+ csr6 |= DMA_CONTROL_OSF;
+ } else {
+ DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
+ " (threshold = %d)\n", txmode);
+ csr6 &= ~DMA_CONTROL_TSF;
+ csr6 &= DMA_CONTROL_TC_TX_MASK;
+ /* Set the transmit threashold */
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else if (txmode <= 128)
+ csr6 |= DMA_CONTROL_TTC_128;
+ else if (txmode <= 192)
+ csr6 |= DMA_CONTROL_TTC_192;
+ else
+ csr6 |= DMA_CONTROL_TTC_256;
+ }
+
+ if (rxmode == SF_DMA_MODE) {
+ DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
+ csr6 |= DMA_CONTROL_RSF;
+ } else {
+ DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
+ " (threshold = %d)\n", rxmode);
+ csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= DMA_CONTROL_TC_RX_MASK;
+ if (rxmode <= 32)
+ csr6 |= DMA_CONTROL_RTC_32;
+ else if (rxmode <= 64)
+ csr6 |= DMA_CONTROL_RTC_64;
+ else if (rxmode <= 96)
+ csr6 |= DMA_CONTROL_RTC_96;
+ else
+ csr6 |= DMA_CONTROL_RTC_128;
+ }
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+ return;
+}
+
+/* Not yet implemented --- no RMON module */
+static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr)
+{
+ return;
+}
+
+static void gmac_dump_dma_regs(unsigned long ioaddr)
+{
+ int i;
+ pr_info(" DMA registers\n");
+ for (i = 0; i < 22; i++) {
+ if ((i < 9) || (i > 17)) {
+ int offset = i * 4;
+ pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + offset),
+ readl(ioaddr + DMA_BUS_MODE + offset));
+ }
+ }
+ return;
+}
+
+static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.etx.error_summary)) {
+ DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
+ if (unlikely(p->des01.etx.jabber_timeout)) {
+ DBG(KERN_ERR "\tjabber_timeout error\n");
+ x->tx_jabber++;
+ }
+
+ if (unlikely(p->des01.etx.frame_flushed)) {
+ DBG(KERN_ERR "\tframe_flushed error\n");
+ x->tx_frame_flushed++;
+ gmac_flush_tx_fifo(ioaddr);
+ }
+
+ if (unlikely(p->des01.etx.loss_carrier)) {
+ DBG(KERN_ERR "\tloss_carrier error\n");
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.no_carrier)) {
+ DBG(KERN_ERR "\tno_carrier error\n");
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.late_collision)) {
+ DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_collisions)) {
+ DBG(KERN_ERR "\texcessive_collisions\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_deferral)) {
+ DBG(KERN_INFO "\texcessive tx_deferral\n");
+ x->tx_deferred++;
+ }
+
+ if (unlikely(p->des01.etx.underflow_error)) {
+ DBG(KERN_ERR "\tunderflow error\n");
+ gmac_flush_tx_fifo(ioaddr);
+ x->tx_underflow++;
+ }
+
+ if (unlikely(p->des01.etx.ip_header_error)) {
+ DBG(KERN_ERR "\tTX IP header csum error\n");
+ x->tx_ip_header_error++;
+ }
+
+ if (unlikely(p->des01.etx.payload_error)) {
+ DBG(KERN_ERR "\tAddr/Payload csum error\n");
+ x->tx_payload_error++;
+ gmac_flush_tx_fifo(ioaddr);
+ }
+
+ ret = -1;
+ }
+
+ if (unlikely(p->des01.etx.deferred)) {
+ DBG(KERN_INFO "GMAC TX status: tx deferred\n");
+ x->tx_deferred++;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.etx.vlan_frame) {
+ DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
+ x->tx_vlan++;
+ }
+#endif
+
+ return ret;
+}
+
+static int gmac_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.etx.buffer1_size;
+}
+
+static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
+{
+ int ret = good_frame;
+ u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
+
+ /* bits 5 7 0 | Frame status
+ * ----------------------------------------------------------
+ * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
+ * 1 0 0 | IPv4/6 No CSUM errorS.
+ * 1 0 1 | IPv4/6 CSUM PAYLOAD error
+ * 1 1 0 | IPv4/6 CSUM IP HR error
+ * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
+ * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
+ * 0 1 1 | COE bypassed.. no IPv4/6 frame
+ * 0 1 0 | Reserved.
+ */
+ if (status == 0x0) {
+ DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
+ ret = good_frame;
+ } else if (status == 0x4) {
+ DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
+ ret = good_frame;
+ } else if (status == 0x5) {
+ DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x6) {
+ DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
+ ret = csum_none;
+ } else if (status == 0x7) {
+ DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 Header and Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x1) {
+ DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
+ ret = discard_frame;
+ } else if (status == 0x3) {
+ DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
+ ret = discard_frame;
+ }
+ return ret;
+}
+
+static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = good_frame;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.erx.error_summary)) {
+ DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
+ if (unlikely(p->des01.erx.descriptor_error)) {
+ DBG(KERN_ERR "\tdescriptor error\n");
+ x->rx_desc++;
+ stats->rx_length_errors++;
+ }
+ if (unlikely(p->des01.erx.overflow_error)) {
+ DBG(KERN_ERR "\toverflow error\n");
+ x->rx_gmac_overflow++;
+ }
+
+ if (unlikely(p->des01.erx.ipc_csum_error))
+ DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
+
+ if (unlikely(p->des01.erx.late_collision)) {
+ DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.erx.receive_watchdog)) {
+ DBG(KERN_ERR "\treceive_watchdog error\n");
+ x->rx_watchdog++;
+ }
+ if (unlikely(p->des01.erx.error_gmii)) {
+ DBG(KERN_ERR "\tReceive Error\n");
+ x->rx_mii++;
+ }
+ if (unlikely(p->des01.erx.crc_error)) {
+ DBG(KERN_ERR "\tCRC error\n");
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+
+ /* After a payload csum error, the ES bit is set.
+ * It doesn't match with the information reported into the databook.
+ * At any rate, we need to understand if the CSUM hw computation is ok
+ * and report this info to the upper layers. */
+ ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
+ p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
+
+ if (unlikely(p->des01.erx.dribbling)) {
+ DBG(KERN_ERR "GMAC RX: dribbling error\n");
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.sa_filter_fail)) {
+ DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.da_filter_fail)) {
+ DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.length_error)) {
+ DBG(KERN_ERR "GMAC RX: length_error error\n");
+ x->rx_lenght++;
+ ret = discard_frame;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.erx.vlan_tag) {
+ DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
+ x->rx_vlan++;
+ }
+#endif
+ return ret;
+}
+
+static void gmac_irq_status(unsigned long ioaddr)
+{
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_TX_INTR));
+ if (unlikely(intr_status & mmc_rx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_INTR));
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ if (unlikely(intr_status & pmt_irq)) {
+ DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT
+ * status register. */
+ readl(ioaddr + GMAC_PMT);
+ }
+
+ return;
+}
+
+static void gmac_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+ value |= GMAC_CORE_INIT;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ /* STBus Bridge Configuration */
+ /*writel(0xc5608, ioaddr + 0x00007000);*/
+
+ /* Freeze MMC counters */
+ writel(0x8, ioaddr + GMAC_MMC_CTRL);
+ /* Mask GMAC interrupts */
+ writel(0x207, ioaddr + GMAC_INT_MASK);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Tag detection without filtering */
+ writel(0x0, ioaddr + GMAC_VLAN_TAG);
+#endif
+ return;
+}
+
+static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void gmac_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int value = 0;
+
+ DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, dev->mc_count, dev->uc_count);
+
+ if (dev->flags & IFF_PROMISC)
+ value = GMAC_FRAME_FILTER_PR;
+ else if ((dev->mc_count > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
+ } else if (dev->mc_count > 0) {
+ int i;
+ u32 mc_filter[2];
+ struct dev_mc_list *mclist;
+
+ /* Hash filter for multicast */
+ value = GMAC_FRAME_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ /* The upper 6 bits of the calculated CRC are used to
+ index the contens of the hash table */
+ int bit_nr =
+ bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
+ }
+
+ /* Handle multiple unicast addresses (perfect filtering)*/
+ if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
+ /* Switch to promiscuous mode is more than 16 addrs
+ are required */
+ value |= GMAC_FRAME_FILTER_PR;
+ else {
+ int i;
+ struct dev_addr_list *uc_ptr = dev->uc_list;
+
+ for (i = 0; i < dev->uc_count; i++) {
+ gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
+ i + 1);
+
+ DBG(KERN_INFO "\t%d "
+ "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
+ "%02x\n", i + 1,
+ uc_ptr->da_addr[0], uc_ptr->da_addr[1],
+ uc_ptr->da_addr[2], uc_ptr->da_addr[3],
+ uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
+ uc_ptr = uc_ptr->next;
+ }
+ }
+
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= GMAC_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + GMAC_FRAME_FILTER);
+
+ DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
+ readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
+
+ return;
+}
+
+static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = 0;
+
+ DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_RFE;
+ }
+ if (fc & FLOW_TX) {
+ DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_TFE;
+ }
+
+ if (duplex) {
+ DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+ flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ }
+
+ writel(flow, ioaddr + GMAC_FLOW_CTRL);
+ return;
+}
+
+static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode == WAKE_MAGIC) {
+ DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ } else if (mode == WAKE_UCAST) {
+ DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ pmt |= global_unicast;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+ return;
+}
+
+static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.erx.own = 1;
+ p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ /* To support jumbo frames */
+ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.erx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.erx.disable_ic = 1;
+ p++;
+ }
+ return;
+}
+
+static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+
+ for (i = 0; i < ring_size; i++) {
+ p->des01.etx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.etx.end_ring = 1;
+ p++;
+ }
+
+ return;
+}
+
+static int gmac_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.etx.own;
+}
+
+static int gmac_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.erx.own;
+}
+
+static void gmac_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.etx.own = 1;
+}
+
+static void gmac_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.erx.own = 1;
+}
+
+static int gmac_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.etx.last_segment;
+}
+
+static void gmac_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.etx.end_ring;
+
+ memset(p, 0, sizeof(struct dma_desc));
+ p->des01.etx.end_ring = ter;
+
+ return;
+}
+
+static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.etx.first_segment = is_fs;
+ if (unlikely(len > BUF_SIZE_4KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
+ p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ } else {
+ p->des01.etx.buffer1_size = len;
+ }
+ if (likely(csum_flag))
+ p->des01.etx.checksum_insertion = cic_full;
+}
+
+static void gmac_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.etx.interrupt = 0;
+}
+
+static void gmac_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.etx.last_segment = 1;
+ p->des01.etx.interrupt = 1;
+}
+
+static int gmac_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.erx.frame_length;
+}
+
+struct stmmac_ops gmac_driver = {
+ .core_init = gmac_core_init,
+ .dump_mac_regs = gmac_dump_regs,
+ .dma_init = gmac_dma_init,
+ .dump_dma_regs = gmac_dump_dma_regs,
+ .dma_mode = gmac_dma_operation_mode,
+ .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
+ .tx_status = gmac_get_tx_frame_status,
+ .rx_status = gmac_get_rx_frame_status,
+ .get_tx_len = gmac_get_tx_len,
+ .set_filter = gmac_set_filter,
+ .flow_ctrl = gmac_flow_ctrl,
+ .pmt = gmac_pmt,
+ .init_rx_desc = gmac_init_rx_desc,
+ .init_tx_desc = gmac_init_tx_desc,
+ .get_tx_owner = gmac_get_tx_owner,
+ .get_rx_owner = gmac_get_rx_owner,
+ .release_tx_desc = gmac_release_tx_desc,
+ .prepare_tx_desc = gmac_prepare_tx_desc,
+ .clear_tx_ic = gmac_clear_tx_ic,
+ .close_tx_desc = gmac_close_tx_desc,
+ .get_tx_ls = gmac_get_tx_ls,
+ .set_tx_owner = gmac_set_tx_owner,
+ .set_rx_owner = gmac_set_rx_owner,
+ .get_rx_frame_len = gmac_get_rx_frame_len,
+ .host_irq_status = gmac_irq_status,
+ .set_umac_addr = gmac_set_umac_addr,
+ .get_umac_addr = gmac_get_umac_addr,
+};
+
+struct mac_device_info *gmac_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+ u32 uid = readl(ioaddr + GMAC_VERSION);
+
+ pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ mac->ops = &gmac_driver;
+ mac->hw.pmt = PMT_SUPPORTED;
+ mac->hw.link.port = GMAC_CONTROL_PS;
+ mac->hw.link.duplex = GMAC_CONTROL_DM;
+ mac->hw.link.speed = GMAC_CONTROL_FES;
+ mac->hw.mii.addr = GMAC_MII_ADDR;
+ mac->hw.mii.data = GMAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/gmac.h
new file mode 100644
index 000000000000..684a363120a9
--- /dev/null
+++ b/drivers/net/stmmac/gmac.h
@@ -0,0 +1,204 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#define GMAC_CONTROL 0x00000000 /* Configuration */
+#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
+#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
+#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
+#define GMAC_MII_ADDR 0x00000010 /* MII Address */
+#define GMAC_MII_DATA 0x00000014 /* MII Data */
+#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
+#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
+#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
+#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
+
+#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
+enum gmac_irq_status {
+ time_stamp_irq = 0x0200,
+ mmc_rx_csum_offload_irq = 0x0080,
+ mmc_tx_irq = 0x0040,
+ mmc_rx_irq = 0x0020,
+ mmc_irq = 0x0010,
+ pmt_irq = 0x0008,
+ pcs_ane_irq = 0x0004,
+ pcs_link_irq = 0x0002,
+ rgmii_irq = 0x0001,
+};
+#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
+
+/* PMT Control and Status */
+#define GMAC_PMT 0x0000002c
+enum power_event {
+ pointer_reset = 0x80000000,
+ global_unicast = 0x00000200,
+ wake_up_rx_frame = 0x00000040,
+ magic_frame = 0x00000020,
+ wake_up_frame_en = 0x00000004,
+ magic_pkt_en = 0x00000002,
+ power_down = 0x00000001,
+};
+
+/* GMAC HW ADDR regs */
+#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8))
+#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8))
+#define GMAC_MAX_UNICAST_ADDRESSES 16
+
+#define GMAC_AN_CTRL 0x000000c0 /* AN control */
+#define GMAC_AN_STATUS 0x000000c4 /* AN status */
+#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
+#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */
+#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
+#define GMAC_TBI 0x000000d4 /* TBI extend status */
+#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */
+
+/* GMAC Configuration defines */
+#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
+#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
+#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
+#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
+#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
+enum inter_frame_gap {
+ GMAC_CONTROL_IFG_88 = 0x00040000,
+ GMAC_CONTROL_IFG_80 = 0x00020000,
+ GMAC_CONTROL_IFG_40 = 0x000e0000,
+};
+#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */
+#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
+#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
+#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
+#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
+#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
+#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
+#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */
+#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
+#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
+ GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE)
+
+/* GMAC Frame Filter defines */
+#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+/* GMII ADDR defines */
+#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+/* GMAC FLOW CTRL defines */
+#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define GMAC_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
+#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
+#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
+
+/*--- DMA BLOCK defines ---*/
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+/* Programmable burst length (passed thorugh platform)*/
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+
+enum rx_tx_priority_ratio {
+ double_ratio = 0x00004000, /*2:1 */
+ triple_ratio = 0x00008000, /*3:1 */
+ quadruple_ratio = 0x0000c000, /*4:1 */
+};
+
+#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
+#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT 17
+#define DMA_BUS_MODE_USP 0x00800000
+#define DMA_BUS_MODE_4PBL 0x01000000
+#define DMA_BUS_MODE_AAL 0x02000000
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
+#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT 14
+#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
+
+/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
+#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */
+#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
+#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
+/* Theshold for Activating the FC */
+enum rfa {
+ act_full_minus_1 = 0x00800000,
+ act_full_minus_2 = 0x00800200,
+ act_full_minus_3 = 0x00800400,
+ act_full_minus_4 = 0x00800600,
+};
+/* Theshold for Deactivating the FC */
+enum rfd {
+ deac_full_minus_1 = 0x00400000,
+ deac_full_minus_2 = 0x00400800,
+ deac_full_minus_3 = 0x00401000,
+ deac_full_minus_4 = 0x00401800,
+};
+#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+
+enum ttc_control {
+ DMA_CONTROL_TTC_64 = 0x00000000,
+ DMA_CONTROL_TTC_128 = 0x00004000,
+ DMA_CONTROL_TTC_192 = 0x00008000,
+ DMA_CONTROL_TTC_256 = 0x0000c000,
+ DMA_CONTROL_TTC_40 = 0x00010000,
+ DMA_CONTROL_TTC_32 = 0x00014000,
+ DMA_CONTROL_TTC_24 = 0x00018000,
+ DMA_CONTROL_TTC_16 = 0x0001c000,
+};
+#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
+
+#define DMA_CONTROL_EFC 0x00000100
+#define DMA_CONTROL_FEF 0x00000080
+#define DMA_CONTROL_FUF 0x00000040
+
+enum rtc_control {
+ DMA_CONTROL_RTC_64 = 0x00000000,
+ DMA_CONTROL_RTC_32 = 0x00000008,
+ DMA_CONTROL_RTC_96 = 0x00000010,
+ DMA_CONTROL_RTC_128 = 0x00000018,
+};
+#define DMA_CONTROL_TC_RX_MASK 0xffffffe7
+
+#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
+
+/* MMC registers offset */
+#define GMAC_MMC_CTRL 0x100
+#define GMAC_MMC_RX_INTR 0x104
+#define GMAC_MMC_TX_INTR 0x108
+#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/mac100.c
new file mode 100644
index 000000000000..625171b6062b
--- /dev/null
+++ b/drivers/net/stmmac/mac100.c
@@ -0,0 +1,517 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "common.h"
+#include "mac100.h"
+
+#undef MAC100_DEBUG
+/*#define MAC100_DEBUG*/
+#ifdef MAC100_DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+static void mac100_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
+#endif
+ return;
+}
+
+static void mac100_dump_mac_regs(unsigned long ioaddr)
+{
+ pr_info("\t----------------------------------------------\n"
+ "\t MAC100 CSR (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
+ pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
+ readl(ioaddr + MAC_CONTROL));
+ pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
+ readl(ioaddr + MAC_ADDR_HIGH));
+ pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
+ readl(ioaddr + MAC_ADDR_LOW));
+ pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ pr_info("\tflow control (offset 0x%x): 0x%08x\n",
+ MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
+ pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
+ readl(ioaddr + MAC_VLAN1));
+ pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
+ readl(ioaddr + MAC_VLAN2));
+ pr_info("\n\tMAC management counter registers\n");
+ pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+ pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+ pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+ pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+ pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+ return;
+}
+
+static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+
+ /* Enable Application Access by writing to DMA CSR0 */
+ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
+ ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* The base address of the RX/TX descriptor lists must be written into
+ * DMA CSR3 and CSR4, respectively. */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+/* Store and Forward capability is not used at all..
+ * The transmit threshold can be programmed by
+ * setting the TTC bits in the DMA control register.*/
+static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else
+ csr6 |= DMA_CONTROL_TTC_128;
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+static void mac100_dump_dma_regs(unsigned long ioaddr)
+{
+ int i;
+
+ DBG(KERN_DEBUG "MAC100 DMA CSR \n");
+ for (i = 0; i < 9; i++)
+ pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + i * 4),
+ readl(ioaddr + DMA_BUS_MODE + i * 4));
+ DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
+ DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
+ return;
+}
+
+/* DMA controller has two counters to track the number of
+ the receive missed frames. */
+static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
+
+ if (unlikely(csr8)) {
+ if (csr8 & DMA_MISSED_FRAME_OVE) {
+ stats->rx_over_errors += 0x800;
+ x->rx_overflow_cntr += 0x800;
+ } else {
+ unsigned int ove_cntr;
+ ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
+ stats->rx_over_errors += ove_cntr;
+ x->rx_overflow_cntr += ove_cntr;
+ }
+
+ if (csr8 & DMA_MISSED_FRAME_OVE_M) {
+ stats->rx_missed_errors += 0xffff;
+ x->rx_missed_cntr += 0xffff;
+ } else {
+ unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
+ stats->rx_missed_errors += miss_f;
+ x->rx_missed_cntr += miss_f;
+ }
+ }
+ return;
+}
+
+static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.tx.error_summary)) {
+ if (unlikely(p->des01.tx.underflow_error)) {
+ x->tx_underflow++;
+ stats->tx_fifo_errors++;
+ }
+ if (unlikely(p->des01.tx.no_carrier)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.tx.loss_carrier)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((p->des01.tx.excessive_deferral) ||
+ (p->des01.tx.excessive_collisions) ||
+ (p->des01.tx.late_collision)))
+ stats->collisions += p->des01.tx.collision_count;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.heartbeat_fail)) {
+ x->tx_heartbeat++;
+ stats->tx_heartbeat_errors++;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.deferred))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int mac100_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.tx.buffer1_size;
+}
+
+/* This function verifies if each incoming frame has some errors
+ * and, if required, updates the multicast statistics.
+ * In case of success, it returns csum_none becasue the device
+ * is not able to compute the csum in HW. */
+static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = csum_none;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.rx.last_descriptor == 0)) {
+ pr_warning("mac100 Error: Oversized Ethernet "
+ "frame spanned multiple buffers\n");
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
+ if (unlikely(p->des01.rx.error_summary)) {
+ if (unlikely(p->des01.rx.descriptor_error))
+ x->rx_desc++;
+ if (unlikely(p->des01.rx.partial_frame_error))
+ x->rx_partial++;
+ if (unlikely(p->des01.rx.run_frame))
+ x->rx_runt++;
+ if (unlikely(p->des01.rx.frame_too_long))
+ x->rx_toolong++;
+ if (unlikely(p->des01.rx.collision)) {
+ x->rx_collision++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.rx.crc_error)) {
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.dribbling))
+ ret = discard_frame;
+
+ if (unlikely(p->des01.rx.length_error)) {
+ x->rx_lenght++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.mii_error)) {
+ x->rx_mii++;
+ ret = discard_frame;
+ }
+ if (p->des01.rx.multicast_frame) {
+ x->rx_multicast++;
+ stats->multicast++;
+ }
+ return ret;
+}
+
+static void mac100_irq_status(unsigned long ioaddr)
+{
+ return;
+}
+
+static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void mac100_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= MAC_CONTROL_PR;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
+ MAC_CONTROL_HP);
+ } else if ((dev->mc_count > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value |= MAC_CONTROL_PM;
+ value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
+ writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + MAC_HASH_LOW);
+ } else if (dev->mc_count == 0) { /* no multicast */
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
+ MAC_CONTROL_HO | MAC_CONTROL_HP);
+ } else {
+ int i;
+ u32 mc_filter[2];
+ struct dev_mc_list *mclist;
+
+ /* Perfect filter mode for physical address and Hash
+ filter for multicast */
+ value |= MAC_CONTROL_HP;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
+ | MAC_CONTROL_HO);
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table */
+ int bit_nr =
+ ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
+ }
+
+ writel(value, ioaddr + MAC_CONTROL);
+
+ DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
+ "HI 0x%08x, LO 0x%08x\n",
+ __func__, readl(ioaddr + MAC_CONTROL),
+ readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
+ return;
+}
+
+static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = MAC_FLOW_CTRL_ENABLE;
+
+ if (duplex)
+ flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + MAC_FLOW_CTRL);
+
+ return;
+}
+
+/* No PMT module supported in our SoC for the Ethernet Controller. */
+static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ return;
+}
+
+static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.rx.own = 1;
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.rx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.rx.disable_ic = 1;
+ p++;
+ }
+ return;
+}
+
+static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.tx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.tx.end_ring = 1;
+ p++;
+ }
+ return;
+}
+
+static int mac100_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.tx.own;
+}
+
+static int mac100_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.rx.own;
+}
+
+static void mac100_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.tx.own = 1;
+}
+
+static void mac100_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.rx.own = 1;
+}
+
+static int mac100_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.tx.last_segment;
+}
+
+static void mac100_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.tx.end_ring;
+
+ /* clean field used within the xmit */
+ p->des01.tx.first_segment = 0;
+ p->des01.tx.last_segment = 0;
+ p->des01.tx.buffer1_size = 0;
+
+ /* clean status reported */
+ p->des01.tx.error_summary = 0;
+ p->des01.tx.underflow_error = 0;
+ p->des01.tx.no_carrier = 0;
+ p->des01.tx.loss_carrier = 0;
+ p->des01.tx.excessive_deferral = 0;
+ p->des01.tx.excessive_collisions = 0;
+ p->des01.tx.late_collision = 0;
+ p->des01.tx.heartbeat_fail = 0;
+ p->des01.tx.deferred = 0;
+
+ /* set termination field */
+ p->des01.tx.end_ring = ter;
+
+ return;
+}
+
+static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.tx.first_segment = is_fs;
+ p->des01.tx.buffer1_size = len;
+}
+
+static void mac100_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.tx.interrupt = 0;
+}
+
+static void mac100_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.tx.last_segment = 1;
+ p->des01.tx.interrupt = 1;
+}
+
+static int mac100_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.rx.frame_length;
+}
+
+struct stmmac_ops mac100_driver = {
+ .core_init = mac100_core_init,
+ .dump_mac_regs = mac100_dump_mac_regs,
+ .dma_init = mac100_dma_init,
+ .dump_dma_regs = mac100_dump_dma_regs,
+ .dma_mode = mac100_dma_operation_mode,
+ .dma_diagnostic_fr = mac100_dma_diagnostic_fr,
+ .tx_status = mac100_get_tx_frame_status,
+ .rx_status = mac100_get_rx_frame_status,
+ .get_tx_len = mac100_get_tx_len,
+ .set_filter = mac100_set_filter,
+ .flow_ctrl = mac100_flow_ctrl,
+ .pmt = mac100_pmt,
+ .init_rx_desc = mac100_init_rx_desc,
+ .init_tx_desc = mac100_init_tx_desc,
+ .get_tx_owner = mac100_get_tx_owner,
+ .get_rx_owner = mac100_get_rx_owner,
+ .release_tx_desc = mac100_release_tx_desc,
+ .prepare_tx_desc = mac100_prepare_tx_desc,
+ .clear_tx_ic = mac100_clear_tx_ic,
+ .close_tx_desc = mac100_close_tx_desc,
+ .get_tx_ls = mac100_get_tx_ls,
+ .set_tx_owner = mac100_set_tx_owner,
+ .set_rx_owner = mac100_set_rx_owner,
+ .get_rx_frame_len = mac100_get_rx_frame_len,
+ .host_irq_status = mac100_irq_status,
+ .set_umac_addr = mac100_set_umac_addr,
+ .get_umac_addr = mac100_get_umac_addr,
+};
+
+struct mac_device_info *mac100_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ pr_info("\tMAC 10/100\n");
+
+ mac->ops = &mac100_driver;
+ mac->hw.pmt = PMT_NOT_SUPPORTED;
+ mac->hw.link.port = MAC_CONTROL_PS;
+ mac->hw.link.duplex = MAC_CONTROL_F;
+ mac->hw.link.speed = 0;
+ mac->hw.mii.addr = MAC_MII_ADDR;
+ mac->hw.mii.data = MAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/mac100.h
new file mode 100644
index 000000000000..0f8f110d004a
--- /dev/null
+++ b/drivers/net/stmmac/mac100.h
@@ -0,0 +1,116 @@
+/*******************************************************************************
+ MAC 10/100 Header File
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/*----------------------------------------------------------------------------
+ * MAC BLOCK defines
+ *---------------------------------------------------------------------------*/
+/* MAC CSR offset */
+#define MAC_CONTROL 0x00000000 /* MAC Control */
+#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */
+#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */
+#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */
+#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */
+#define MAC_MII_ADDR 0x00000014 /* MII Address */
+#define MAC_MII_DATA 0x00000018 /* MII Data */
+#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */
+#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */
+#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */
+
+/* MAC CTRL defines */
+#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
+#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
+#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
+#define MAC_CONTROL_PS 0x08000000 /* Port Select */
+#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
+#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
+#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
+#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
+#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */
+#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */
+#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */
+#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */
+#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */
+#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */
+#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
+#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
+#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
+#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */
+#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
+#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
+#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
+#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
+#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
+#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
+
+/* MAC FLOW CTRL defines */
+#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define MAC_FLOW_CTRL_PT_SHIFT 16
+#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
+#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */
+#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
+
+/* MII ADDR defines */
+#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+
+/*----------------------------------------------------------------------------
+ * DMA BLOCK defines
+ *---------------------------------------------------------------------------*/
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
+#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DEFAULT 0x00000000
+
+/* DMA Control register defines */
+#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
+
+/* Transmit Threshold Control */
+enum ttc_control {
+ DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
+ DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */
+ DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */
+ DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */
+ DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */
+ DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */
+ DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */
+ DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */
+ DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */
+ DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */
+};
+
+/* STMAC110 DMA Missed Frame Counter register defines */
+#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */
+#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
+#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
+#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
new file mode 100644
index 000000000000..6d2eae3040e5
--- /dev/null
+++ b/drivers/net/stmmac/stmmac.h
@@ -0,0 +1,98 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#define DRV_MODULE_VERSION "Oct_09"
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define STMMAC_VLAN_TAG_USED
+#include <linux/if_vlan.h>
+#endif
+
+#include "common.h"
+#ifdef CONFIG_STMMAC_TIMER
+#include "stmmac_timer.h"
+#endif
+
+struct stmmac_priv {
+ /* Frequently used values are kept adjacent for cache effect */
+ struct dma_desc *dma_tx ____cacheline_aligned;
+ dma_addr_t dma_tx_phy;
+ struct sk_buff **tx_skbuff;
+ unsigned int cur_tx;
+ unsigned int dirty_tx;
+ unsigned int dma_tx_size;
+ int tx_coe;
+ int tx_coalesce;
+
+ struct dma_desc *dma_rx ;
+ unsigned int cur_rx;
+ unsigned int dirty_rx;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *rx_skbuff_dma;
+ struct sk_buff_head rx_recycle;
+
+ struct net_device *dev;
+ int is_gmac;
+ dma_addr_t dma_rx_phy;
+ unsigned int dma_rx_size;
+ int rx_csum;
+ unsigned int dma_buf_sz;
+ struct device *device;
+ struct mac_device_info *mac_type;
+
+ struct stmmac_extra_stats xstats;
+ struct napi_struct napi;
+
+ phy_interface_t phy_interface;
+ int pbl;
+ int bus_id;
+ int phy_addr;
+ int phy_mask;
+ int (*phy_reset) (void *priv);
+ void (*fix_mac_speed) (void *priv, unsigned int speed);
+ void *bsp_priv;
+
+ int phy_irq;
+ struct phy_device *phydev;
+ int oldlink;
+ int speed;
+ int oldduplex;
+ unsigned int flow_ctrl;
+ unsigned int pause;
+ struct mii_bus *mii;
+
+ u32 msg_enable;
+ spinlock_t lock;
+ int wolopts;
+ int wolenabled;
+ int shutdown;
+#ifdef CONFIG_STMMAC_TIMER
+ struct stmmac_timer *tm;
+#endif
+#ifdef STMMAC_VLAN_TAG_USED
+ struct vlan_group *vlgrp;
+#endif
+};
+
+extern int stmmac_mdio_unregister(struct net_device *ndev);
+extern int stmmac_mdio_register(struct net_device *ndev);
+extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
new file mode 100644
index 000000000000..694ebe6a0758
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -0,0 +1,395 @@
+/*******************************************************************************
+ STMMAC Ethtool support
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "stmmac.h"
+
+#define REG_SPACE_SIZE 0x1054
+#define MAC100_ETHTOOL_NAME "st_mac100"
+#define GMAC_ETHTOOL_NAME "st_gmac"
+
+struct stmmac_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define STMMAC_STAT(m) \
+ { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
+ offsetof(struct stmmac_priv, xstats.m)}
+
+static const struct stmmac_stats stmmac_gstrings_stats[] = {
+ STMMAC_STAT(tx_underflow),
+ STMMAC_STAT(tx_carrier),
+ STMMAC_STAT(tx_losscarrier),
+ STMMAC_STAT(tx_heartbeat),
+ STMMAC_STAT(tx_deferred),
+ STMMAC_STAT(tx_vlan),
+ STMMAC_STAT(rx_vlan),
+ STMMAC_STAT(tx_jabber),
+ STMMAC_STAT(tx_frame_flushed),
+ STMMAC_STAT(tx_payload_error),
+ STMMAC_STAT(tx_ip_header_error),
+ STMMAC_STAT(rx_desc),
+ STMMAC_STAT(rx_partial),
+ STMMAC_STAT(rx_runt),
+ STMMAC_STAT(rx_toolong),
+ STMMAC_STAT(rx_collision),
+ STMMAC_STAT(rx_crc),
+ STMMAC_STAT(rx_lenght),
+ STMMAC_STAT(rx_mii),
+ STMMAC_STAT(rx_multicast),
+ STMMAC_STAT(rx_gmac_overflow),
+ STMMAC_STAT(rx_watchdog),
+ STMMAC_STAT(da_rx_filter_fail),
+ STMMAC_STAT(sa_rx_filter_fail),
+ STMMAC_STAT(rx_missed_cntr),
+ STMMAC_STAT(rx_overflow_cntr),
+ STMMAC_STAT(tx_undeflow_irq),
+ STMMAC_STAT(tx_process_stopped_irq),
+ STMMAC_STAT(tx_jabber_irq),
+ STMMAC_STAT(rx_overflow_irq),
+ STMMAC_STAT(rx_buf_unav_irq),
+ STMMAC_STAT(rx_process_stopped_irq),
+ STMMAC_STAT(rx_watchdog_irq),
+ STMMAC_STAT(tx_early_irq),
+ STMMAC_STAT(fatal_bus_error_irq),
+ STMMAC_STAT(threshold),
+ STMMAC_STAT(tx_pkt_n),
+ STMMAC_STAT(rx_pkt_n),
+ STMMAC_STAT(poll_n),
+ STMMAC_STAT(sched_timer_n),
+ STMMAC_STAT(normal_irq_n),
+};
+#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
+
+void stmmac_ethtool_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->is_gmac)
+ strcpy(info->driver, MAC100_ETHTOOL_NAME);
+ else
+ strcpy(info->driver, GMAC_ETHTOOL_NAME);
+
+ strcpy(info->version, DRV_MODULE_VERSION);
+ info->fw_version[0] = '\0';
+ info->n_stats = STMMAC_STATS_LEN;
+ return;
+}
+
+int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phydev;
+ int rc;
+ if (phy == NULL) {
+ pr_err("%s: %s: PHY is not registered\n",
+ __func__, dev->name);
+ return -ENODEV;
+ }
+ if (!netif_running(dev)) {
+ pr_err("%s: interface is disabled: we cannot track "
+ "link speed / duplex setting\n", dev->name);
+ return -EBUSY;
+ }
+ cmd->transceiver = XCVR_INTERNAL;
+ spin_lock_irq(&priv->lock);
+ rc = phy_ethtool_gset(phy, cmd);
+ spin_unlock_irq(&priv->lock);
+ return rc;
+}
+
+int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phydev;
+ int rc;
+
+ spin_lock(&priv->lock);
+ rc = phy_ethtool_sset(phy, cmd);
+ spin_unlock(&priv->lock);
+
+ return rc;
+}
+
+u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+
+}
+
+int stmmac_check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EBUSY;
+ return 0;
+}
+
+int stmmac_ethtool_get_regs_len(struct net_device *dev)
+{
+ return REG_SPACE_SIZE;
+}
+
+void stmmac_ethtool_gregs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ int i;
+ u32 *reg_space = (u32 *) space;
+
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ memset(reg_space, 0x0, REG_SPACE_SIZE);
+
+ if (!priv->is_gmac) {
+ /* MAC registers */
+ for (i = 0; i < 12; i++)
+ reg_space[i] = readl(dev->base_addr + (i * 4));
+ /* DMA registers */
+ for (i = 0; i < 9; i++)
+ reg_space[i + 12] =
+ readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
+ reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR);
+ reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR);
+ } else {
+ /* MAC registers */
+ for (i = 0; i < 55; i++)
+ reg_space[i] = readl(dev->base_addr + (i * 4));
+ /* DMA registers */
+ for (i = 0; i < 22; i++)
+ reg_space[i + 55] =
+ readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
+ }
+
+ return;
+}
+
+int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
+{
+ if (data)
+ netdev->features |= NETIF_F_HW_CSUM;
+ else
+ netdev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
+}
+
+u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return priv->rx_csum;
+}
+
+static void
+stmmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ pause->autoneg = priv->phydev->autoneg;
+
+ if (priv->flow_ctrl & FLOW_RX)
+ pause->rx_pause = 1;
+ if (priv->flow_ctrl & FLOW_TX)
+ pause->tx_pause = 1;
+
+ spin_unlock(&priv->lock);
+ return;
+}
+
+static int
+stmmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+ struct phy_device *phy = priv->phydev;
+ int new_pause = FLOW_OFF;
+ int ret = 0;
+
+ spin_lock(&priv->lock);
+
+ if (pause->rx_pause)
+ new_pause |= FLOW_RX;
+ if (pause->tx_pause)
+ new_pause |= FLOW_TX;
+
+ priv->flow_ctrl = new_pause;
+
+ if (phy->autoneg) {
+ if (netif_running(netdev)) {
+ struct ethtool_cmd cmd;
+ /* auto-negotiation automatically restarted */
+ cmd.cmd = ETHTOOL_NWAY_RST;
+ cmd.supported = phy->supported;
+ cmd.advertising = phy->advertising;
+ cmd.autoneg = phy->autoneg;
+ cmd.speed = phy->speed;
+ cmd.duplex = phy->duplex;
+ cmd.phy_address = phy->addr;
+ ret = phy_ethtool_sset(phy, &cmd);
+ }
+ } else {
+ unsigned long ioaddr = netdev->base_addr;
+ priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
+ priv->flow_ctrl, priv->pause);
+ }
+ spin_unlock(&priv->lock);
+ return ret;
+}
+
+static void stmmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int i;
+
+ /* Update HW stats if supported */
+ priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
+ ioaddr);
+
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
+ data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
+ }
+
+ return;
+}
+
+static int stmmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return STMMAC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ memcpy(p, stmmac_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ return;
+}
+
+/* Currently only support WOL through Magic packet. */
+static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ spin_lock_irq(&priv->lock);
+ if (priv->wolenabled == PMT_SUPPORTED) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = priv->wolopts;
+ }
+ spin_unlock_irq(&priv->lock);
+}
+
+static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 support = WAKE_MAGIC;
+
+ if (priv->wolenabled == PMT_NOT_SUPPORTED)
+ return -EINVAL;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ if (wol->wolopts == 0)
+ device_set_wakeup_enable(priv->device, 0);
+ else
+ device_set_wakeup_enable(priv->device, 1);
+
+ spin_lock_irq(&priv->lock);
+ priv->wolopts = wol->wolopts;
+ spin_unlock_irq(&priv->lock);
+
+ return 0;
+}
+
+static struct ethtool_ops stmmac_ethtool_ops = {
+ .begin = stmmac_check_if_running,
+ .get_drvinfo = stmmac_ethtool_getdrvinfo,
+ .get_settings = stmmac_ethtool_getsettings,
+ .set_settings = stmmac_ethtool_setsettings,
+ .get_msglevel = stmmac_ethtool_getmsglevel,
+ .set_msglevel = stmmac_ethtool_setmsglevel,
+ .get_regs = stmmac_ethtool_gregs,
+ .get_regs_len = stmmac_ethtool_get_regs_len,
+ .get_link = ethtool_op_get_link,
+ .get_rx_csum = stmmac_ethtool_get_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = stmmac_ethtool_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_pauseparam = stmmac_get_pauseparam,
+ .set_pauseparam = stmmac_set_pauseparam,
+ .get_ethtool_stats = stmmac_get_ethtool_stats,
+ .get_strings = stmmac_get_strings,
+ .get_wol = stmmac_get_wol,
+ .set_wol = stmmac_set_wol,
+ .get_sset_count = stmmac_get_sset_count,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+#endif
+};
+
+void stmmac_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
+}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
new file mode 100644
index 000000000000..c2f14dc9ba28
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -0,0 +1,2204 @@
+/*******************************************************************************
+ This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
+ ST Ethernet IPs are built around a Synopsys IP Core.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+
+ Documentation available at:
+ http://www.stlinux.com
+ Support available at:
+ https://bugzilla.stlinux.com/
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/stm/soc.h>
+#include "stmmac.h"
+
+#define STMMAC_RESOURCE_NAME "stmmaceth"
+#define PHY_RESOURCE_NAME "stmmacphy"
+
+#undef STMMAC_DEBUG
+/*#define STMMAC_DEBUG*/
+#ifdef STMMAC_DEBUG
+#define DBG(nlevel, klevel, fmt, args...) \
+ ((void)(netif_msg_##nlevel(priv) && \
+ printk(KERN_##klevel fmt, ## args)))
+#else
+#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
+#endif
+
+#undef STMMAC_RX_DEBUG
+/*#define STMMAC_RX_DEBUG*/
+#ifdef STMMAC_RX_DEBUG
+#define RX_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define RX_DBG(fmt, args...) do { } while (0)
+#endif
+
+#undef STMMAC_XMIT_DEBUG
+/*#define STMMAC_XMIT_DEBUG*/
+#ifdef STMMAC_TX_DEBUG
+#define TX_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define TX_DBG(fmt, args...) do { } while (0)
+#endif
+
+#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
+#define JUMBO_LEN 9000
+
+/* Module parameters */
+#define TX_TIMEO 5000 /* default 5 seconds */
+static int watchdog = TX_TIMEO;
+module_param(watchdog, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
+
+static int debug = -1; /* -1: default, 0: no output, 16: all */
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
+
+static int phyaddr = -1;
+module_param(phyaddr, int, S_IRUGO);
+MODULE_PARM_DESC(phyaddr, "Physical device address");
+
+#define DMA_TX_SIZE 256
+static int dma_txsize = DMA_TX_SIZE;
+module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
+
+#define DMA_RX_SIZE 256
+static int dma_rxsize = DMA_RX_SIZE;
+module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
+
+static int flow_ctrl = FLOW_OFF;
+module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+
+static int pause = PAUSE_TIME;
+module_param(pause, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+
+#define TC_DEFAULT 64
+static int tc = TC_DEFAULT;
+module_param(tc, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tc, "DMA threshold control value");
+
+#define RX_NO_COALESCE 1 /* Always interrupt on completion */
+#define TX_NO_COALESCE -1 /* No moderation by default */
+
+/* Pay attention to tune this parameter; take care of both
+ * hardware capability and network stabitily/performance impact.
+ * Many tests showed that ~4ms latency seems to be good enough. */
+#ifdef CONFIG_STMMAC_TIMER
+#define DEFAULT_PERIODIC_RATE 256
+static int tmrate = DEFAULT_PERIODIC_RATE;
+module_param(tmrate, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)");
+#endif
+
+#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
+static int buf_sz = DMA_BUFFER_SIZE;
+module_param(buf_sz, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(buf_sz, "DMA buffer size");
+
+/* In case of Giga ETH, we can enable/disable the COE for the
+ * transmit HW checksum computation.
+ * Note that, if tx csum is off in HW, SG will be still supported. */
+static int tx_coe = HW_CSUM;
+module_param(tx_coe, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]");
+
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
+
+/**
+ * stmmac_verify_args - verify the driver parameters.
+ * Description: it verifies if some wrong parameter is passed to the driver.
+ * Note that wrong parameters are replaced with the default values.
+ */
+static void stmmac_verify_args(void)
+{
+ if (unlikely(watchdog < 0))
+ watchdog = TX_TIMEO;
+ if (unlikely(dma_rxsize < 0))
+ dma_rxsize = DMA_RX_SIZE;
+ if (unlikely(dma_txsize < 0))
+ dma_txsize = DMA_TX_SIZE;
+ if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
+ buf_sz = DMA_BUFFER_SIZE;
+ if (unlikely(flow_ctrl > 1))
+ flow_ctrl = FLOW_AUTO;
+ else if (likely(flow_ctrl < 0))
+ flow_ctrl = FLOW_OFF;
+ if (unlikely((pause < 0) || (pause > 0xffff)))
+ pause = PAUSE_TIME;
+
+ return;
+}
+
+#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
+static void print_pkt(unsigned char *buf, int len)
+{
+ int j;
+ pr_info("len = %d byte, buf addr: 0x%p", len, buf);
+ for (j = 0; j < len; j++) {
+ if ((j % 16) == 0)
+ pr_info("\n %03x:", j);
+ pr_info(" %02x", buf[j]);
+ }
+ pr_info("\n");
+ return;
+}
+#endif
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
+
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
+{
+ return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
+}
+
+/**
+ * stmmac_adjust_link
+ * @dev: net device structure
+ * Description: it adjusts the link parameters.
+ */
+static void stmmac_adjust_link(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long flags;
+ int new_state = 0;
+ unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
+
+ if (phydev == NULL)
+ return;
+
+ DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
+ phydev->addr, phydev->link);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (phydev->link) {
+ u32 ctrl = readl(ioaddr + MAC_CTRL_REG);
+
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
+ ctrl &= ~priv->mac_type->hw.link.duplex;
+ else
+ ctrl |= priv->mac_type->hw.link.duplex;
+ priv->oldduplex = phydev->duplex;
+ }
+ /* Flow Control operation */
+ if (phydev->pause)
+ priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex,
+ fc, pause_time);
+
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case 1000:
+ if (likely(priv->is_gmac))
+ ctrl &= ~priv->mac_type->hw.link.port;
+ break;
+ case 100:
+ case 10:
+ if (priv->is_gmac) {
+ ctrl |= priv->mac_type->hw.link.port;
+ if (phydev->speed == SPEED_100) {
+ ctrl |=
+ priv->mac_type->hw.link.
+ speed;
+ } else {
+ ctrl &=
+ ~(priv->mac_type->hw.
+ link.speed);
+ }
+ } else {
+ ctrl &= ~priv->mac_type->hw.link.port;
+ }
+ priv->fix_mac_speed(priv->bsp_priv,
+ phydev->speed);
+ break;
+ default:
+ if (netif_msg_link(priv))
+ pr_warning("%s: Speed (%d) is not 10"
+ " or 100!\n", dev->name, phydev->speed);
+ break;
+ }
+
+ priv->speed = phydev->speed;
+ }
+
+ writel(ctrl, ioaddr + MAC_CTRL_REG);
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->speed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
+}
+
+/**
+ * stmmac_init_phy - PHY initialization
+ * @dev: net device structure
+ * Description: it initializes the driver's PHY state, and attaches the PHY
+ * to the mac driver.
+ * Return value:
+ * 0 on success
+ */
+static int stmmac_init_phy(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ char phy_id[BUS_ID_SIZE]; /* PHY to connect */
+ char bus_id[BUS_ID_SIZE];
+
+ priv->oldlink = 0;
+ priv->speed = 0;
+ priv->oldduplex = -1;
+
+ if (priv->phy_addr == -1) {
+ /* We don't have a PHY, so do nothing */
+ return 0;
+ }
+
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+ snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
+
+ phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
+ priv->phy_interface);
+
+ if (IS_ERR(phydev)) {
+ pr_err("%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /*
+ * Broken HW is sometimes missing the pull-up resistor on the
+ * MDIO line, which results in reads to non-existent devices returning
+ * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
+ * device as well.
+ * Note: phydev->phy_id is the result of reading the UID PHY registers.
+ */
+ if (phydev->phy_id == 0) {
+ phy_disconnect(phydev);
+ return -ENODEV;
+ }
+ pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
+ " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+ value |= MAC_RNABLE_RX;
+ /* Set the RE (receive enable bit into the MAC CTRL register). */
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+ value |= MAC_ENABLE_TX;
+ /* Set the TE (transmit enable bit into the MAC CTRL register). */
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+static inline void stmmac_mac_disable_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+ value &= ~MAC_RNABLE_RX;
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+static inline void stmmac_mac_disable_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+ value &= ~MAC_ENABLE_TX;
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+/**
+ * display_ring
+ * @p: pointer to the ring.
+ * @size: size of the ring.
+ * Description: display all the descriptors within the ring.
+ */
+static void display_ring(struct dma_desc *p, int size)
+{
+ struct tmp_s {
+ u64 a;
+ unsigned int b;
+ unsigned int c;
+ };
+ int i;
+ for (i = 0; i < size; i++) {
+ struct tmp_s *x = (struct tmp_s *)(p + i);
+ pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
+ i, (unsigned int)virt_to_phys(&p[i]),
+ (unsigned int)(x->a), (unsigned int)((x->a) >> 32),
+ x->b, x->c);
+ pr_info("\n");
+ }
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers.
+ */
+static void init_dma_desc_rings(struct net_device *dev)
+{
+ int i;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+ unsigned int bfsize = priv->dma_buf_sz;
+ int buff2_needed = 0;
+ int dis_ic = 0;
+
+#ifdef CONFIG_STMMAC_TIMER
+ /* Using Timers disable interrupts on completion for the reception */
+ dis_ic = 1;
+#endif
+ /* Set the Buffer size according to the MTU;
+ * indeed, in case of jumbo we need to bump-up the buffer sizes.
+ */
+ if (unlikely(dev->mtu >= BUF_SIZE_8KiB))
+ bfsize = BUF_SIZE_16KiB;
+ else if (unlikely(dev->mtu >= BUF_SIZE_4KiB))
+ bfsize = BUF_SIZE_8KiB;
+ else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
+ bfsize = BUF_SIZE_4KiB;
+ else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
+ bfsize = BUF_SIZE_2KiB;
+ else
+ bfsize = DMA_BUFFER_SIZE;
+
+ /* If the MTU exceeds 8k so use the second buffer in the chain */
+ if (bfsize >= BUF_SIZE_8KiB)
+ buff2_needed = 1;
+
+ DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
+ txsize, rxsize, bfsize);
+
+ priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
+ priv->rx_skbuff =
+ kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
+ priv->dma_rx =
+ (struct dma_desc *)dma_alloc_coherent(priv->device,
+ rxsize *
+ sizeof(struct dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
+ GFP_KERNEL);
+ priv->dma_tx =
+ (struct dma_desc *)dma_alloc_coherent(priv->device,
+ txsize *
+ sizeof(struct dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+
+ if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
+ pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
+ return;
+ }
+
+ DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, "
+ "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+ dev->name, priv->dma_rx, priv->dma_tx,
+ (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+
+ /* RX INITIALIZATION */
+ DBG(probe, INFO, "stmmac: SKB addresses:\n"
+ "skb\t\tskb data\tdma data\n");
+
+ for (i = 0; i < rxsize; i++) {
+ struct dma_desc *p = priv->dma_rx + i;
+
+ skb = netdev_alloc_skb_ip_align(dev, bfsize);
+ if (unlikely(skb == NULL)) {
+ pr_err("%s: Rx init fails; skb is NULL\n", __func__);
+ break;
+ }
+ priv->rx_skbuff[i] = skb;
+ priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+ bfsize, DMA_FROM_DEVICE);
+
+ p->des2 = priv->rx_skbuff_dma[i];
+ if (unlikely(buff2_needed))
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
+ DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
+ priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
+ }
+ priv->cur_rx = 0;
+ priv->dirty_rx = (unsigned int)(i - rxsize);
+ priv->dma_buf_sz = bfsize;
+ buf_sz = bfsize;
+
+ /* TX INITIALIZATION */
+ for (i = 0; i < txsize; i++) {
+ priv->tx_skbuff[i] = NULL;
+ priv->dma_tx[i].des2 = 0;
+ }
+ priv->dirty_tx = 0;
+ priv->cur_tx = 0;
+
+ /* Clear the Rx/Tx descriptors */
+ priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
+ priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize);
+
+ if (netif_msg_hw(priv)) {
+ pr_info("RX descriptor ring:\n");
+ display_ring(priv->dma_rx, rxsize);
+ pr_info("TX descriptor ring:\n");
+ display_ring(priv->dma_tx, txsize);
+ }
+ return;
+}
+
+static void dma_free_rx_skbufs(struct stmmac_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ if (priv->rx_skbuff[i]) {
+ dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ }
+ priv->rx_skbuff[i] = NULL;
+ }
+ return;
+}
+
+static void dma_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ if (priv->tx_skbuff[i] != NULL) {
+ struct dma_desc *p = priv->dma_tx + i;
+ if (p->des2)
+ dma_unmap_single(priv->device, p->des2,
+ priv->mac_type->ops->get_tx_len(p),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
+ }
+ }
+ return;
+}
+
+static void free_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* Release the DMA TX/RX socket buffers */
+ dma_free_rx_skbufs(priv);
+ dma_free_tx_skbufs(priv);
+
+ /* Free the region of consistent memory previously allocated for
+ * the DMA */
+ dma_free_coherent(priv->device,
+ priv->dma_tx_size * sizeof(struct dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device,
+ priv->dma_rx_size * sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ kfree(priv->rx_skbuff_dma);
+ kfree(priv->rx_skbuff);
+ kfree(priv->tx_skbuff);
+
+ return;
+}
+
+/**
+ * stmmac_dma_start_tx
+ * @ioaddr: device I/O address
+ * Description: this function starts the DMA tx process.
+ */
+static void stmmac_dma_start_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+static void stmmac_dma_stop_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+/**
+ * stmmac_dma_start_rx
+ * @ioaddr: device I/O address
+ * Description: this function starts the DMA rx process.
+ */
+static void stmmac_dma_start_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+static void stmmac_dma_stop_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+/**
+ * stmmac_dma_operation_mode - HW DMA operation mode
+ * @priv : pointer to the private device structure.
+ * Description: it sets the DMA operation mode: tx/rx DMA thresholds
+ * or Store-And-Forward capability. It also verifies the COE for the
+ * transmission in case of Giga ETH.
+ */
+static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
+{
+ if (!priv->is_gmac) {
+ /* MAC 10/100 */
+ priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0);
+ priv->tx_coe = NO_HW_CSUM;
+ } else {
+ if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
+ priv->mac_type->ops->dma_mode(priv->dev->base_addr,
+ SF_DMA_MODE, SF_DMA_MODE);
+ tc = SF_DMA_MODE;
+ priv->tx_coe = HW_CSUM;
+ } else {
+ /* Checksum computation is performed in software. */
+ priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc,
+ SF_DMA_MODE);
+ priv->tx_coe = NO_HW_CSUM;
+ }
+ }
+ tx_coe = priv->tx_coe;
+
+ return;
+}
+
+#ifdef STMMAC_DEBUG
+/**
+ * show_tx_process_state
+ * @status: tx descriptor status field
+ * Description: it shows the Transmit Process State for CSR5[22:20]
+ */
+static void show_tx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- TX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- TX (Running):Fetching the Tx desc\n");
+ break;
+ case 2:
+ pr_info("- TX (Running): Waiting for end of tx\n");
+ break;
+ case 3:
+ pr_info("- TX (Running): Reading the data "
+ "and queuing the data into the Tx buf\n");
+ break;
+ case 6:
+ pr_info("- TX (Suspended): Tx Buff Underflow "
+ "or an unavailable Transmit descriptor\n");
+ break;
+ case 7:
+ pr_info("- TX (Running): Closing Tx descriptor\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+/**
+ * show_rx_process_state
+ * @status: rx descriptor status field
+ * Description: it shows the Receive Process State for CSR5[19:17]
+ */
+static void show_rx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- RX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- RX (Running): Fetching the Rx desc\n");
+ break;
+ case 2:
+ pr_info("- RX (Running):Checking for end of pkt\n");
+ break;
+ case 3:
+ pr_info("- RX (Running): Waiting for Rx pkt\n");
+ break;
+ case 4:
+ pr_info("- RX (Suspended): Unavailable Rx buf\n");
+ break;
+ case 5:
+ pr_info("- RX (Running): Closing Rx descriptor\n");
+ break;
+ case 6:
+ pr_info("- RX(Running): Flushing the current frame"
+ " from the Rx buf\n");
+ break;
+ case 7:
+ pr_info("- RX (Running): Queuing the Rx frame"
+ " from the Rx buf into memory\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+#endif
+
+/**
+ * stmmac_tx:
+ * @priv: private driver structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void stmmac_tx(struct stmmac_priv *priv)
+{
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned long ioaddr = priv->dev->base_addr;
+
+ while (priv->dirty_tx != priv->cur_tx) {
+ int last;
+ unsigned int entry = priv->dirty_tx % txsize;
+ struct sk_buff *skb = priv->tx_skbuff[entry];
+ struct dma_desc *p = priv->dma_tx + entry;
+
+ /* Check if the descriptor is owned by the DMA. */
+ if (priv->mac_type->ops->get_tx_owner(p))
+ break;
+
+ /* Verify tx error by looking at the last segment */
+ last = priv->mac_type->ops->get_tx_ls(p);
+ if (likely(last)) {
+ int tx_error =
+ priv->mac_type->ops->tx_status(&priv->dev->stats,
+ &priv->xstats,
+ p, ioaddr);
+ if (likely(tx_error == 0)) {
+ priv->dev->stats.tx_packets++;
+ priv->xstats.tx_pkt_n++;
+ } else
+ priv->dev->stats.tx_errors++;
+ }
+ TX_DBG("%s: curr %d, dirty %d\n", __func__,
+ priv->cur_tx, priv->dirty_tx);
+
+ if (likely(p->des2))
+ dma_unmap_single(priv->device, p->des2,
+ priv->mac_type->ops->get_tx_len(p),
+ DMA_TO_DEVICE);
+ if (unlikely(p->des3))
+ p->des3 = 0;
+
+ if (likely(skb != NULL)) {
+ /*
+ * If there's room in the queue (limit it to size)
+ * we add this skb back into the pool,
+ * if it's the right size.
+ */
+ if ((skb_queue_len(&priv->rx_recycle) <
+ priv->dma_rx_size) &&
+ skb_recycle_check(skb, priv->dma_buf_sz))
+ __skb_queue_head(&priv->rx_recycle, skb);
+ else
+ dev_kfree_skb(skb);
+
+ priv->tx_skbuff[entry] = NULL;
+ }
+
+ priv->mac_type->ops->release_tx_desc(p);
+
+ entry = (++priv->dirty_tx) % txsize;
+ }
+ if (unlikely(netif_queue_stopped(priv->dev) &&
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
+ netif_tx_lock(priv->dev);
+ if (netif_queue_stopped(priv->dev) &&
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
+ TX_DBG("%s: restart transmit\n", __func__);
+ netif_wake_queue(priv->dev);
+ }
+ netif_tx_unlock(priv->dev);
+ }
+ return;
+}
+
+static inline void stmmac_enable_irq(struct stmmac_priv *priv)
+{
+#ifndef CONFIG_STMMAC_TIMER
+ writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
+#else
+ priv->tm->timer_start(tmrate);
+#endif
+}
+
+static inline void stmmac_disable_irq(struct stmmac_priv *priv)
+{
+#ifndef CONFIG_STMMAC_TIMER
+ writel(0, priv->dev->base_addr + DMA_INTR_ENA);
+#else
+ priv->tm->timer_stop();
+#endif
+}
+
+static int stmmac_has_work(struct stmmac_priv *priv)
+{
+ unsigned int has_work = 0;
+ int rxret, tx_work = 0;
+
+ rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx +
+ (priv->cur_rx % priv->dma_rx_size));
+
+ if (priv->dirty_tx != priv->cur_tx)
+ tx_work = 1;
+
+ if (likely(!rxret || tx_work))
+ has_work = 1;
+
+ return has_work;
+}
+
+static inline void _stmmac_schedule(struct stmmac_priv *priv)
+{
+ if (likely(stmmac_has_work(priv))) {
+ stmmac_disable_irq(priv);
+ napi_schedule(&priv->napi);
+ }
+}
+
+#ifdef CONFIG_STMMAC_TIMER
+void stmmac_schedule(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ priv->xstats.sched_timer_n++;
+
+ _stmmac_schedule(priv);
+
+ return;
+}
+
+static void stmmac_no_timer_started(unsigned int x)
+{;
+};
+
+static void stmmac_no_timer_stopped(void)
+{;
+};
+#endif
+
+/**
+ * stmmac_tx_err:
+ * @priv: pointer to the private device structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void stmmac_tx_err(struct stmmac_priv *priv)
+{
+ netif_stop_queue(priv->dev);
+
+ stmmac_dma_stop_tx(priv->dev->base_addr);
+ dma_free_tx_skbufs(priv);
+ priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+ priv->dirty_tx = 0;
+ priv->cur_tx = 0;
+ stmmac_dma_start_tx(priv->dev->base_addr);
+
+ priv->dev->stats.tx_errors++;
+ netif_wake_queue(priv->dev);
+
+ return;
+}
+
+/**
+ * stmmac_dma_interrupt - Interrupt handler for the driver
+ * @dev: net device structure
+ * Description: Interrupt handler for the driver (DMA).
+ */
+static void stmmac_dma_interrupt(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ /* read the status register (CSR5) */
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
+
+ DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+
+#ifdef STMMAC_DEBUG
+ /* It displays the DMA transmit process state (CSR5 register) */
+ if (netif_msg_tx_done(priv))
+ show_tx_process_state(intr_status);
+ if (netif_msg_rx_status(priv))
+ show_rx_process_state(intr_status);
+#endif
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ DBG(intr, INFO, "transmit underflow\n");
+ if (unlikely(tc != SF_DMA_MODE)
+ && (tc <= 256)) {
+ /* Try to bump up the threshold */
+ tc += 64;
+ priv->mac_type->ops->dma_mode(ioaddr, tc,
+ SF_DMA_MODE);
+ priv->xstats.threshold = tc;
+ }
+ stmmac_tx_err(priv);
+ priv->xstats.tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT)) {
+ DBG(intr, INFO, "transmit jabber\n");
+ priv->xstats.tx_jabber_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_OVF)) {
+ DBG(intr, INFO, "recv overflow\n");
+ priv->xstats.rx_overflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RU)) {
+ DBG(intr, INFO, "receive buffer unavailable\n");
+ priv->xstats.rx_buf_unav_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RPS)) {
+ DBG(intr, INFO, "receive process stopped\n");
+ priv->xstats.rx_process_stopped_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RWT)) {
+ DBG(intr, INFO, "receive watchdog\n");
+ priv->xstats.rx_watchdog_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ETI)) {
+ DBG(intr, INFO, "transmit early interrupt\n");
+ priv->xstats.tx_early_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ DBG(intr, INFO, "transmit process stopped\n");
+ priv->xstats.tx_process_stopped_irq++;
+ stmmac_tx_err(priv);
+ }
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
+ DBG(intr, INFO, "fatal bus error\n");
+ priv->xstats.fatal_bus_error_irq++;
+ stmmac_tx_err(priv);
+ }
+ }
+
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & DMA_STATUS_NIS) {
+ priv->xstats.normal_irq_n++;
+ if (likely((intr_status & DMA_STATUS_RI) ||
+ (intr_status & (DMA_STATUS_TI))))
+ _stmmac_schedule(priv);
+ }
+
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
+
+ DBG(intr, INFO, "\n\n");
+
+ return;
+}
+
+/**
+ * stmmac_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int ret;
+
+ /* Check that the MAC address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using the following linux command:
+ * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ pr_warning("%s: generated random MAC address %pM\n", dev->name,
+ dev->dev_addr);
+ }
+
+ stmmac_verify_args();
+
+ ret = stmmac_init_phy(dev);
+ if (unlikely(ret)) {
+ pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
+ return ret;
+ }
+
+ /* Request the IRQ lines */
+ ret = request_irq(dev->irq, &stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ return ret;
+ }
+
+#ifdef CONFIG_STMMAC_TIMER
+ priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
+ if (unlikely(priv->tm == NULL)) {
+ pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
+ return -ENOMEM;
+ }
+ priv->tm->freq = tmrate;
+
+ /* Test if the HW timer can be actually used.
+ * In case of failure continue with no timer. */
+ if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
+ pr_warning("stmmaceth: cannot attach the HW timer\n");
+ tmrate = 0;
+ priv->tm->freq = 0;
+ priv->tm->timer_start = stmmac_no_timer_started;
+ priv->tm->timer_stop = stmmac_no_timer_stopped;
+ }
+#endif
+
+ /* Create and initialize the TX/RX descriptors chains. */
+ priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
+ priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
+ priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
+ init_dma_desc_rings(dev);
+
+ /* DMA initialization and SW reset */
+ if (unlikely(priv->mac_type->ops->dma_init(ioaddr,
+ priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) {
+
+ pr_err("%s: DMA initialization failed\n", __func__);
+ return -1;
+ }
+
+ /* Copy the MAC addr into the HW */
+ priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ /* Initialize the MAC Core */
+ priv->mac_type->ops->core_init(ioaddr);
+
+ priv->shutdown = 0;
+
+ /* Initialise the MMC (if present) to disable all interrupts. */
+ writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK);
+ writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK);
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_enable_rx(ioaddr);
+ stmmac_mac_enable_tx(ioaddr);
+
+ /* Set the HW DMA mode and the COE */
+ stmmac_dma_operation_mode(priv);
+
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+ priv->xstats.threshold = tc;
+
+ /* Start the ball rolling... */
+ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
+ stmmac_dma_start_tx(ioaddr);
+ stmmac_dma_start_rx(ioaddr);
+
+#ifdef CONFIG_STMMAC_TIMER
+ priv->tm->timer_start(tmrate);
+#endif
+ /* Dump DMA/MAC registers */
+ if (netif_msg_hw(priv)) {
+ priv->mac_type->ops->dump_mac_regs(ioaddr);
+ priv->mac_type->ops->dump_dma_regs(ioaddr);
+ }
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ napi_enable(&priv->napi);
+ skb_queue_head_init(&priv->rx_recycle);
+ netif_start_queue(dev);
+ return 0;
+}
+
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Stop and disconnect the PHY */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ netif_stop_queue(dev);
+
+#ifdef CONFIG_STMMAC_TIMER
+ /* Stop and release the timer */
+ stmmac_close_ext_timer();
+ if (priv->tm != NULL)
+ kfree(priv->tm);
+#endif
+ napi_disable(&priv->napi);
+ skb_queue_purge(&priv->rx_recycle);
+
+ /* Free the IRQ lines */
+ free_irq(dev->irq, dev);
+
+ /* Stop TX/RX DMA and clear the descriptors */
+ stmmac_dma_stop_tx(dev->base_addr);
+ stmmac_dma_stop_rx(dev->base_addr);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC core */
+ stmmac_mac_disable_tx(dev->base_addr);
+ stmmac_mac_disable_rx(dev->base_addr);
+
+ netif_carrier_off(dev);
+
+ return 0;
+}
+
+/*
+ * To perform emulated hardware segmentation on skb.
+ */
+static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
+{
+ struct sk_buff *segs, *curr_skb;
+ int gso_segs = skb_shinfo(skb)->gso_segs;
+
+ /* Estimate the number of fragments in the worst case */
+ if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
+ netif_stop_queue(priv->dev);
+ TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
+ __func__);
+ if (stmmac_tx_avail(priv) < gso_segs)
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(priv->dev);
+ }
+ TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
+ skb, skb->len);
+
+ segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
+ if (unlikely(IS_ERR(segs)))
+ goto sw_tso_end;
+
+ do {
+ curr_skb = segs;
+ segs = segs->next;
+ TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
+ "*next %p\n", curr_skb->len, curr_skb, segs);
+ curr_skb->next = NULL;
+ stmmac_xmit(curr_skb, priv->dev);
+ } while (segs);
+
+sw_tso_end:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
+ struct net_device *dev,
+ int csum_insertion)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry = priv->cur_tx % txsize;
+ struct dma_desc *desc = priv->dma_tx + entry;
+
+ if (nopaged_len > BUF_SIZE_8KiB) {
+
+ int buf2_size = nopaged_len - BUF_SIZE_8KiB;
+
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ BUF_SIZE_8KiB, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
+ csum_insertion);
+
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ desc->des2 = dma_map_single(priv->device,
+ skb->data + BUF_SIZE_8KiB,
+ buf2_size, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->mac_type->ops->prepare_tx_desc(desc, 0,
+ buf2_size, csum_insertion);
+ priv->mac_type->ops->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ } else {
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
+ }
+ return entry;
+}
+
+/**
+ * stmmac_xmit:
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : Tx entry point of the driver.
+ */
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry;
+ int i, csum_insertion = 0;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct dma_desc *desc, *first;
+
+ if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ /* This is a hard error, log it. */
+ pr_err("%s: BUG! Tx Ring full when queue awake\n",
+ __func__);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = priv->cur_tx % txsize;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if ((skb->len > ETH_FRAME_LEN) || nfrags)
+ pr_info("stmmac xmit:\n"
+ "\tskb addr %p - len: %d - nopaged_len: %d\n"
+ "\tn_frags: %d - ip_summed: %d - %s gso\n",
+ skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
+ !skb_is_gso(skb) ? "isn't" : "is");
+#endif
+
+ if (unlikely(skb_is_gso(skb)))
+ return stmmac_sw_tso(priv, skb);
+
+ if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
+ if (likely(priv->tx_coe == NO_HW_CSUM))
+ skb_checksum_help(skb);
+ else
+ csum_insertion = 1;
+ }
+
+ desc = priv->dma_tx + entry;
+ first = desc;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
+ pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
+ "\t\tn_frags: %d, ip_summed: %d\n",
+ skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
+#endif
+ priv->tx_skbuff[entry] = skb;
+ if (unlikely(skb->len >= BUF_SIZE_4KiB)) {
+ entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion);
+ desc = priv->dma_tx + entry;
+ } else {
+ unsigned int nopaged_len = skb_headlen(skb);
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
+ }
+
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = frag->size;
+
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
+ desc->des2 = dma_map_page(priv->device, frag->page,
+ frag->page_offset,
+ len, DMA_TO_DEVICE);
+ priv->tx_skbuff[entry] = NULL;
+ priv->mac_type->ops->prepare_tx_desc(desc, 0, len,
+ csum_insertion);
+ priv->mac_type->ops->set_tx_owner(desc);
+ }
+
+ /* Interrupt on completition only for the latest segment */
+ priv->mac_type->ops->close_tx_desc(desc);
+#ifdef CONFIG_STMMAC_TIMER
+ /* Clean IC while using timers */
+ priv->mac_type->ops->clear_tx_ic(desc);
+#endif
+ /* To avoid raise condition */
+ priv->mac_type->ops->set_tx_owner(first);
+
+ priv->cur_tx++;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if (netif_msg_pktdata(priv)) {
+ pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, "
+ "first=%p, nfrags=%d\n",
+ (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
+ entry, first, nfrags);
+ display_ring(priv->dma_tx, txsize);
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb->len);
+ }
+#endif
+ if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ TX_DBG("%s: stop transmitted packets\n", __func__);
+ netif_stop_queue(dev);
+ }
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* CSR1 enables the transmit DMA to check for new descriptor */
+ writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
+
+ return NETDEV_TX_OK;
+}
+
+static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+{
+ unsigned int rxsize = priv->dma_rx_size;
+ int bfsize = priv->dma_buf_sz;
+ struct dma_desc *p = priv->dma_rx;
+
+ for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
+ unsigned int entry = priv->dirty_rx % rxsize;
+ if (likely(priv->rx_skbuff[entry] == NULL)) {
+ struct sk_buff *skb;
+
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb_ip_align(priv->dev,
+ bfsize);
+
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ priv->rx_skbuff_dma[entry] =
+ dma_map_single(priv->device, skb->data, bfsize,
+ DMA_FROM_DEVICE);
+
+ (p + entry)->des2 = priv->rx_skbuff_dma[entry];
+ if (unlikely(priv->is_gmac)) {
+ if (bfsize >= BUF_SIZE_8KiB)
+ (p + entry)->des3 =
+ (p + entry)->des2 + BUF_SIZE_8KiB;
+ }
+ RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
+ }
+ priv->mac_type->ops->set_rx_owner(p + entry);
+ }
+ return;
+}
+
+static int stmmac_rx(struct stmmac_priv *priv, int limit)
+{
+ unsigned int rxsize = priv->dma_rx_size;
+ unsigned int entry = priv->cur_rx % rxsize;
+ unsigned int next_entry;
+ unsigned int count = 0;
+ struct dma_desc *p = priv->dma_rx + entry;
+ struct dma_desc *p_next;
+
+#ifdef STMMAC_RX_DEBUG
+ if (netif_msg_hw(priv)) {
+ pr_debug(">>> stmmac_rx: descriptor ring:\n");
+ display_ring(priv->dma_rx, rxsize);
+ }
+#endif
+ count = 0;
+ while (!priv->mac_type->ops->get_rx_owner(p)) {
+ int status;
+
+ if (count >= limit)
+ break;
+
+ count++;
+
+ next_entry = (++priv->cur_rx) % rxsize;
+ p_next = priv->dma_rx + next_entry;
+ prefetch(p_next);
+
+ /* read the status of the incoming frame */
+ status = (priv->mac_type->ops->rx_status(&priv->dev->stats,
+ &priv->xstats, p));
+ if (unlikely(status == discard_frame))
+ priv->dev->stats.rx_errors++;
+ else {
+ struct sk_buff *skb;
+ /* Length should omit the CRC */
+ int frame_len =
+ priv->mac_type->ops->get_rx_frame_len(p) - 4;
+
+#ifdef STMMAC_RX_DEBUG
+ if (frame_len > ETH_FRAME_LEN)
+ pr_debug("\tRX frame size %d, COE status: %d\n",
+ frame_len, status);
+
+ if (netif_msg_hw(priv))
+ pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
+ p, entry, p->des2);
+#endif
+ skb = priv->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ pr_err("%s: Inconsistent Rx descriptor chain\n",
+ priv->dev->name);
+ priv->dev->stats.rx_dropped++;
+ break;
+ }
+ prefetch(skb->data - NET_IP_ALIGN);
+ priv->rx_skbuff[entry] = NULL;
+
+ skb_put(skb, frame_len);
+ dma_unmap_single(priv->device,
+ priv->rx_skbuff_dma[entry],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+#ifdef STMMAC_RX_DEBUG
+ if (netif_msg_pktdata(priv)) {
+ pr_info(" frame received (%dbytes)", frame_len);
+ print_pkt(skb->data, frame_len);
+ }
+#endif
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(status == csum_none)) {
+ /* always for the old mac 10/100 */
+ skb->ip_summed = CHECKSUM_NONE;
+ netif_receive_skb(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ napi_gro_receive(&priv->napi, skb);
+ }
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += frame_len;
+ priv->dev->last_rx = jiffies;
+ }
+ entry = next_entry;
+ p = p_next; /* use prefetched values */
+ }
+
+ stmmac_rx_refill(priv);
+
+ priv->xstats.rx_pkt_n += count;
+
+ return count;
+}
+
+/**
+ * stmmac_poll - stmmac poll method (NAPI)
+ * @napi : pointer to the napi structure.
+ * @budget : maximum number of packets that the current CPU can receive from
+ * all interfaces.
+ * Description :
+ * This function implements the the reception process.
+ * Also it runs the TX completion thread
+ */
+static int stmmac_poll(struct napi_struct *napi, int budget)
+{
+ struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
+ int work_done = 0;
+
+ priv->xstats.poll_n++;
+ stmmac_tx(priv);
+ work_done = stmmac_rx(priv, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ stmmac_enable_irq(priv);
+ }
+ return work_done;
+}
+
+/**
+ * stmmac_tx_timeout
+ * @dev : Pointer to net device structure
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable tmrate. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void stmmac_tx_timeout(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Clear Tx resources and restart transmitting again */
+ stmmac_tx_err(priv);
+ return;
+}
+
+/* Configuration changes (passed on by ifconfig) */
+static int stmmac_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP) /* can't act on a running interface */
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr) {
+ pr_warning("%s: can't change I/O address\n", dev->name);
+ return -EOPNOTSUPP;
+ }
+
+ /* Don't allow changing the IRQ */
+ if (map->irq != dev->irq) {
+ pr_warning("%s: can't change IRQ number %d\n",
+ dev->name, dev->irq);
+ return -EOPNOTSUPP;
+ }
+
+ /* ignore other fields */
+ return 0;
+}
+
+/**
+ * stmmac_multicast_list - entry point for multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void stmmac_multicast_list(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ priv->mac_type->ops->set_filter(dev);
+ spin_unlock(&priv->lock);
+ return;
+}
+
+/**
+ * stmmac_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int max_mtu;
+
+ if (netif_running(dev)) {
+ pr_err("%s: must be stopped to change its MTU\n", dev->name);
+ return -EBUSY;
+ }
+
+ if (priv->is_gmac)
+ max_mtu = JUMBO_LEN;
+ else
+ max_mtu = ETH_DATA_LEN;
+
+ if ((new_mtu < 46) || (new_mtu > max_mtu)) {
+ pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ pr_err("%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ if (priv->is_gmac) {
+ unsigned long ioaddr = dev->base_addr;
+ /* To handle GMAC own interrupts */
+ priv->mac_type->ops->host_irq_status(ioaddr);
+ }
+ stmmac_dma_interrupt(dev);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void stmmac_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ stmmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/**
+ * stmmac_ioctl - Entry point for the Ioctl
+ * @dev: Device pointer.
+ * @rq: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd: IOCTL command
+ * Description:
+ * Currently there are no special functionality supported in IOCTL, just the
+ * phy_mii_ioctl(...) can be invoked.
+ */
+static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!priv->phydev)
+ return -EINVAL;
+
+ spin_lock(&priv->lock);
+ ret = phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
+ spin_unlock(&priv->lock);
+ default:
+ break;
+ }
+ return ret;
+}
+
+#ifdef STMMAC_VLAN_TAG_USED
+static void stmmac_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ DBG(probe, INFO, "%s: Setting vlgrp to %p\n", dev->name, grp);
+
+ spin_lock(&priv->lock);
+ priv->vlgrp = grp;
+ spin_unlock(&priv->lock);
+
+ return;
+}
+#endif
+
+static const struct net_device_ops stmmac_netdev_ops = {
+ .ndo_open = stmmac_open,
+ .ndo_start_xmit = stmmac_xmit,
+ .ndo_stop = stmmac_release,
+ .ndo_change_mtu = stmmac_change_mtu,
+ .ndo_set_multicast_list = stmmac_multicast_list,
+ .ndo_tx_timeout = stmmac_tx_timeout,
+ .ndo_do_ioctl = stmmac_ioctl,
+ .ndo_set_config = stmmac_config,
+#ifdef STMMAC_VLAN_TAG_USED
+ .ndo_vlan_rx_register = stmmac_vlan_rx_register,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = stmmac_poll_controller,
+#endif
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+/**
+ * stmmac_probe - Initialization of the adapter .
+ * @dev : device pointer
+ * Description: The function initializes the network device structure for
+ * the STMMAC driver. It also calls the low level routines
+ * in order to init the HW (i.e. the DMA engine)
+ */
+static int stmmac_probe(struct net_device *dev)
+{
+ int ret = 0;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ ether_setup(dev);
+
+ dev->netdev_ops = &stmmac_netdev_ops;
+ stmmac_set_ethtool_ops(dev);
+
+ dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA);
+ dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Both mac100 and gmac support receive VLAN tag detection */
+ dev->features |= NETIF_F_HW_VLAN_RX;
+#endif
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ if (priv->is_gmac)
+ priv->rx_csum = 1;
+
+ if (flow_ctrl)
+ priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+
+ priv->pause = pause;
+ netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
+
+ /* Get the MAC address */
+ priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ pr_warning("\tno valid MAC address;"
+ "please, use ifconfig or nwhwconfig!\n");
+
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("%s: ERROR %i registering the device\n",
+ __func__, ret);
+ return -ENODEV;
+ }
+
+ DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
+ dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
+ (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
+
+ spin_lock_init(&priv->lock);
+
+ return ret;
+}
+
+/**
+ * stmmac_mac_device_setup
+ * @dev : device pointer
+ * Description: select and initialise the mac device (mac100 or Gmac).
+ */
+static int stmmac_mac_device_setup(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+
+ struct mac_device_info *device;
+
+ if (priv->is_gmac)
+ device = gmac_setup(ioaddr);
+ else
+ device = mac100_setup(ioaddr);
+
+ if (!device)
+ return -ENOMEM;
+
+ priv->mac_type = device;
+
+ priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */
+ if (priv->wolenabled == PMT_SUPPORTED)
+ priv->wolopts = WAKE_MAGIC; /* Magic Frame */
+
+ return 0;
+}
+
+static int stmmacphy_dvr_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacphy_data *plat_dat;
+ plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
+
+ pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
+ plat_dat->bus_id);
+
+ return 0;
+}
+
+static int stmmacphy_dvr_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver stmmacphy_driver = {
+ .driver = {
+ .name = PHY_RESOURCE_NAME,
+ },
+ .probe = stmmacphy_dvr_probe,
+ .remove = stmmacphy_dvr_remove,
+};
+
+/**
+ * stmmac_associate_phy
+ * @dev: pointer to device structure
+ * @data: points to the private structure.
+ * Description: Scans through all the PHYs we have registered and checks if
+ * any are associated with our MAC. If so, then just fill in
+ * the blanks in our local context structure
+ */
+static int stmmac_associate_phy(struct device *dev, void *data)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)data;
+ struct plat_stmmacphy_data *plat_dat;
+
+ plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
+
+ DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
+ plat_dat->bus_id);
+
+ /* Check that this phy is for the MAC being initialised */
+ if (priv->bus_id != plat_dat->bus_id)
+ return 0;
+
+ /* OK, this PHY is connected to the MAC.
+ Go ahead and get the parameters */
+ DBG(probe, DEBUG, "%s: OK. Found PHY config\n", __func__);
+ priv->phy_irq =
+ platform_get_irq_byname(to_platform_device(dev), "phyirq");
+ DBG(probe, DEBUG, "%s: PHY irq on bus %d is %d\n", __func__,
+ plat_dat->bus_id, priv->phy_irq);
+
+ /* Override with kernel parameters if supplied XXX CRS XXX
+ * this needs to have multiple instances */
+ if ((phyaddr >= 0) && (phyaddr <= 31))
+ plat_dat->phy_addr = phyaddr;
+
+ priv->phy_addr = plat_dat->phy_addr;
+ priv->phy_mask = plat_dat->phy_mask;
+ priv->phy_interface = plat_dat->interface;
+ priv->phy_reset = plat_dat->phy_reset;
+
+ DBG(probe, DEBUG, "%s: exiting\n", __func__);
+ return 1; /* forces exit of driver_for_each_device() */
+}
+
+/**
+ * stmmac_dvr_probe
+ * @pdev: platform device pointer
+ * Description: the driver is initialized through platform_device.
+ */
+static int stmmac_dvr_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ unsigned int *addr = NULL;
+ struct net_device *ndev = NULL;
+ struct stmmac_priv *priv;
+ struct plat_stmmacenet_data *plat_dat;
+
+ pr_info("STMMAC driver:\n\tplatform registration... ");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto out;
+ }
+ pr_info("done!\n");
+
+ if (!request_mem_region(res->start, (res->end - res->start),
+ pdev->name)) {
+ pr_err("%s: ERROR: memory allocation failed"
+ "cannot get the I/O addr 0x%x\n",
+ __func__, (unsigned int)res->start);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ addr = ioremap(res->start, (res->end - res->start));
+ if (!addr) {
+ pr_err("%s: ERROR: memory mapping failed \n", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+ if (!ndev) {
+ pr_err("%s: ERROR: allocating the device\n", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ /* Get the MAC information */
+ ndev->irq = platform_get_irq_byname(pdev, "macirq");
+ if (ndev->irq == -ENXIO) {
+ pr_err("%s: ERROR: MAC IRQ configuration "
+ "information not found\n", __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->device = &(pdev->dev);
+ priv->dev = ndev;
+ plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data);
+ priv->bus_id = plat_dat->bus_id;
+ priv->pbl = plat_dat->pbl; /* TLI */
+ priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
+
+ platform_set_drvdata(pdev, ndev);
+
+ /* Set the I/O base addr */
+ ndev->base_addr = (unsigned long)addr;
+
+ /* MAC HW revice detection */
+ ret = stmmac_mac_device_setup(ndev);
+ if (ret < 0)
+ goto out;
+
+ /* Network Device Registration */
+ ret = stmmac_probe(ndev);
+ if (ret < 0)
+ goto out;
+
+ /* associate a PHY - it is provided by another platform bus */
+ if (!driver_for_each_device
+ (&(stmmacphy_driver.driver), NULL, (void *)priv,
+ stmmac_associate_phy)) {
+ pr_err("No PHY device is associated with this MAC!\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ priv->fix_mac_speed = plat_dat->fix_mac_speed;
+ priv->bsp_priv = plat_dat->bsp_priv;
+
+ pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
+ "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name,
+ pdev->id, ndev->irq, (unsigned int)addr);
+
+ /* MDIO bus Registration */
+ pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0)
+ goto out;
+ pr_debug("registered!\n");
+
+out:
+ if (ret < 0) {
+ platform_set_drvdata(pdev, NULL);
+ release_mem_region(res->start, (res->end - res->start));
+ if (addr != NULL)
+ iounmap(addr);
+ }
+
+ return ret;
+}
+
+/**
+ * stmmac_dvr_remove
+ * @pdev: platform device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings,
+ * unregisters the MDIO bus and unmaps the allocated memory.
+ */
+static int stmmac_dvr_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ pr_info("%s:\n\tremoving driver", __func__);
+
+ stmmac_dma_stop_rx(ndev->base_addr);
+ stmmac_dma_stop_tx(ndev->base_addr);
+
+ stmmac_mac_disable_rx(ndev->base_addr);
+ stmmac_mac_disable_tx(ndev->base_addr);
+
+ netif_carrier_off(ndev);
+
+ stmmac_mdio_unregister(ndev);
+
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(ndev);
+
+ iounmap((void *)ndev->base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, (res->end - res->start));
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int dis_ic = 0;
+
+ if (!dev || !netif_running(dev))
+ return 0;
+
+ spin_lock(&priv->lock);
+
+ if (state.event == PM_EVENT_SUSPEND) {
+ netif_device_detach(dev);
+ netif_stop_queue(dev);
+ if (priv->phydev)
+ phy_stop(priv->phydev);
+
+#ifdef CONFIG_STMMAC_TIMER
+ priv->tm->timer_stop();
+ dis_ic = 1;
+#endif
+ napi_disable(&priv->napi);
+
+ /* Stop TX/RX DMA */
+ stmmac_dma_stop_tx(dev->base_addr);
+ stmmac_dma_stop_rx(dev->base_addr);
+ /* Clear the Rx/Tx descriptors */
+ priv->mac_type->ops->init_rx_desc(priv->dma_rx,
+ priv->dma_rx_size, dis_ic);
+ priv->mac_type->ops->init_tx_desc(priv->dma_tx,
+ priv->dma_tx_size);
+
+ stmmac_mac_disable_tx(dev->base_addr);
+
+ if (device_may_wakeup(&(pdev->dev))) {
+ /* Enable Power down mode by programming the PMT regs */
+ if (priv->wolenabled == PMT_SUPPORTED)
+ priv->mac_type->ops->pmt(dev->base_addr,
+ priv->wolopts);
+ } else {
+ stmmac_mac_disable_rx(dev->base_addr);
+ }
+ } else {
+ priv->shutdown = 1;
+ /* Although this can appear slightly redundant it actually
+ * makes fast the standby operation and guarantees the driver
+ * working if hibernation is on media. */
+ stmmac_release(dev);
+ }
+
+ spin_unlock(&priv->lock);
+ return 0;
+}
+
+static int stmmac_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+
+ if (!netif_running(dev))
+ return 0;
+
+ spin_lock(&priv->lock);
+
+ if (priv->shutdown) {
+ /* Re-open the interface and re-init the MAC/DMA
+ and the rings. */
+ stmmac_open(dev);
+ goto out_resume;
+ }
+
+ /* Power Down bit, into the PM register, is cleared
+ * automatically as soon as a magic packet or a Wake-up frame
+ * is received. Anyway, it's better to manually clear
+ * this bit because it can generate problems while resuming
+ * from another devices (e.g. serial console). */
+ if (device_may_wakeup(&(pdev->dev)))
+ if (priv->wolenabled == PMT_SUPPORTED)
+ priv->mac_type->ops->pmt(dev->base_addr, 0);
+
+ netif_device_attach(dev);
+
+ /* Enable the MAC and DMA */
+ stmmac_mac_enable_rx(ioaddr);
+ stmmac_mac_enable_tx(ioaddr);
+ stmmac_dma_start_tx(ioaddr);
+ stmmac_dma_start_rx(ioaddr);
+
+#ifdef CONFIG_STMMAC_TIMER
+ priv->tm->timer_start(tmrate);
+#endif
+ napi_enable(&priv->napi);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ netif_start_queue(dev);
+
+out_resume:
+ spin_unlock(&priv->lock);
+ return 0;
+}
+#endif
+
+static struct platform_driver stmmac_driver = {
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ },
+ .probe = stmmac_dvr_probe,
+ .remove = stmmac_dvr_remove,
+#ifdef CONFIG_PM
+ .suspend = stmmac_suspend,
+ .resume = stmmac_resume,
+#endif
+
+};
+
+/**
+ * stmmac_init_module - Entry point for the driver
+ * Description: This function is the entry point for the driver.
+ */
+static int __init stmmac_init_module(void)
+{
+ int ret;
+
+ if (platform_driver_register(&stmmacphy_driver)) {
+ pr_err("No PHY devices registered!\n");
+ return -ENODEV;
+ }
+
+ ret = platform_driver_register(&stmmac_driver);
+ return ret;
+}
+
+/**
+ * stmmac_cleanup_module - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver.
+ */
+static void __exit stmmac_cleanup_module(void)
+{
+ platform_driver_unregister(&stmmacphy_driver);
+ platform_driver_unregister(&stmmac_driver);
+}
+
+#ifndef MODULE
+static int __init stmmac_cmdline_opt(char *str)
+{
+ char *opt;
+
+ if (!str || !*str)
+ return -EINVAL;
+ while ((opt = strsep(&str, ",")) != NULL) {
+ if (!strncmp(opt, "debug:", 6))
+ strict_strtoul(opt + 6, 0, (unsigned long *)&debug);
+ else if (!strncmp(opt, "phyaddr:", 8))
+ strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr);
+ else if (!strncmp(opt, "dma_txsize:", 11))
+ strict_strtoul(opt + 11, 0,
+ (unsigned long *)&dma_txsize);
+ else if (!strncmp(opt, "dma_rxsize:", 11))
+ strict_strtoul(opt + 11, 0,
+ (unsigned long *)&dma_rxsize);
+ else if (!strncmp(opt, "buf_sz:", 7))
+ strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
+ else if (!strncmp(opt, "tc:", 3))
+ strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
+ else if (!strncmp(opt, "tx_coe:", 7))
+ strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe);
+ else if (!strncmp(opt, "watchdog:", 9))
+ strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
+ else if (!strncmp(opt, "flow_ctrl:", 10))
+ strict_strtoul(opt + 10, 0,
+ (unsigned long *)&flow_ctrl);
+ else if (!strncmp(opt, "pause:", 6))
+ strict_strtoul(opt + 6, 0, (unsigned long *)&pause);
+#ifdef CONFIG_STMMAC_TIMER
+ else if (!strncmp(opt, "tmrate:", 7))
+ strict_strtoul(opt + 7, 0, (unsigned long *)&tmrate);
+#endif
+ }
+ return 0;
+}
+
+__setup("stmmaceth=", stmmac_cmdline_opt);
+#endif
+
+module_init(stmmac_init_module);
+module_exit(stmmac_cleanup_module);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
new file mode 100644
index 000000000000..8498552a22fc
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -0,0 +1,217 @@
+/*******************************************************************************
+ STMMAC Ethernet Driver -- MDIO bus implementation
+ Provides Bus interface for MII registers
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Carl Shaw <carl.shaw@st.com>
+ Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "stmmac.h"
+
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
+
+/**
+ * stmmac_mdio_read
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 15-11
+ * @phyreg: MII addr reg bits 10-6
+ * Description: it reads data from the MII register from within the phy device.
+ * For the 7111 GMAC, we must set the bit 0 in the MII address register while
+ * accessing the PHY registers.
+ * Fortunately, it seems this has no drawback for the 7109 MAC.
+ */
+static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long ioaddr = ndev->base_addr;
+ unsigned int mii_address = priv->mac_type->hw.mii.addr;
+ unsigned int mii_data = priv->mac_type->hw.mii.data;
+
+ int data;
+ u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
+ ((phyreg << 6) & (0x000007C0)));
+ regValue |= MII_BUSY; /* in case of GMAC */
+
+ do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+ writel(regValue, ioaddr + mii_address);
+ do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+
+ /* Read the data from the MII data register */
+ data = (int)readl(ioaddr + mii_data);
+
+ return data;
+}
+
+/**
+ * stmmac_mdio_write
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 15-11
+ * @phyreg: MII addr reg bits 10-6
+ * @phydata: phy data
+ * Description: it writes the data into the MII register from within the device.
+ */
+static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long ioaddr = ndev->base_addr;
+ unsigned int mii_address = priv->mac_type->hw.mii.addr;
+ unsigned int mii_data = priv->mac_type->hw.mii.data;
+
+ u16 value =
+ (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
+ | MII_WRITE;
+
+ value |= MII_BUSY;
+
+ /* Wait until any existing MII operation is complete */
+ do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+
+ /* Set the MII address register to write */
+ writel(phydata, ioaddr + mii_data);
+ writel(value, ioaddr + mii_address);
+
+ /* Wait until any existing MII operation is complete */
+ do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+
+ return 0;
+}
+
+/**
+ * stmmac_mdio_reset
+ * @bus: points to the mii_bus structure
+ * Description: reset the MII bus
+ */
+static int stmmac_mdio_reset(struct mii_bus *bus)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long ioaddr = ndev->base_addr;
+ unsigned int mii_address = priv->mac_type->hw.mii.addr;
+
+ if (priv->phy_reset) {
+ pr_debug("stmmac_mdio_reset: calling phy_reset\n");
+ priv->phy_reset(priv->bsp_priv);
+ }
+
+ /* This is a workaround for problems with the STE101P PHY.
+ * It doesn't complete its reset until at least one clock cycle
+ * on MDC, so perform a dummy mdio read.
+ */
+ writel(0, ioaddr + mii_address);
+
+ return 0;
+}
+
+/**
+ * stmmac_mdio_register
+ * @ndev: net device structure
+ * Description: it registers the MII bus
+ */
+int stmmac_mdio_register(struct net_device *ndev)
+{
+ int err = 0;
+ struct mii_bus *new_bus;
+ int *irqlist;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int addr, found;
+
+ new_bus = mdiobus_alloc();
+ if (new_bus == NULL)
+ return -ENOMEM;
+
+ irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (irqlist == NULL) {
+ err = -ENOMEM;
+ goto irqlist_alloc_fail;
+ }
+
+ /* Assign IRQ to phy at address phy_addr */
+ if (priv->phy_addr != -1)
+ irqlist[priv->phy_addr] = priv->phy_irq;
+
+ new_bus->name = "STMMAC MII Bus";
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
+ new_bus->reset = &stmmac_mdio_reset;
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+ new_bus->priv = ndev;
+ new_bus->irq = irqlist;
+ new_bus->phy_mask = priv->phy_mask;
+ new_bus->parent = priv->device;
+ err = mdiobus_register(new_bus);
+ if (err != 0) {
+ pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
+ goto bus_register_fail;
+ }
+
+ priv->mii = new_bus;
+
+ found = 0;
+ for (addr = 0; addr < 32; addr++) {
+ struct phy_device *phydev = new_bus->phy_map[addr];
+ if (phydev) {
+ if (priv->phy_addr == -1) {
+ priv->phy_addr = addr;
+ phydev->irq = priv->phy_irq;
+ irqlist[addr] = priv->phy_irq;
+ }
+ pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n",
+ ndev->name, phydev->phy_id, addr,
+ phydev->irq, dev_name(&phydev->dev),
+ (addr == priv->phy_addr) ? " active" : "");
+ found = 1;
+ }
+ }
+
+ if (!found)
+ pr_warning("%s: No PHY found\n", ndev->name);
+
+ return 0;
+bus_register_fail:
+ kfree(irqlist);
+irqlist_alloc_fail:
+ kfree(new_bus);
+ return err;
+}
+
+/**
+ * stmmac_mdio_unregister
+ * @ndev: net device structure
+ * Description: it unregisters the MII bus
+ */
+int stmmac_mdio_unregister(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ mdiobus_unregister(priv->mii);
+ priv->mii->priv = NULL;
+ kfree(priv->mii);
+
+ return 0;
+}
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
new file mode 100644
index 000000000000..b838c6582077
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -0,0 +1,140 @@
+/*******************************************************************************
+ STMMAC external timer support.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include "stmmac_timer.h"
+
+static void stmmac_timer_handler(void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+
+ stmmac_schedule(dev);
+
+ return;
+}
+
+#define STMMAC_TIMER_MSG(timer, freq) \
+printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
+
+#if defined(CONFIG_STMMAC_RTC_TIMER)
+#include <linux/rtc.h>
+static struct rtc_device *stmmac_rtc;
+static rtc_task_t stmmac_task;
+
+static void stmmac_rtc_start(unsigned int new_freq)
+{
+ rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
+ return;
+}
+
+static void stmmac_rtc_stop(void)
+{
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
+ return;
+}
+
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
+{
+ stmmac_task.private_data = dev;
+ stmmac_task.func = stmmac_timer_handler;
+
+ stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (stmmac_rtc == NULL) {
+ pr_error("open rtc device failed\n");
+ return -ENODEV;
+ }
+
+ rtc_irq_register(stmmac_rtc, &stmmac_task);
+
+ /* Periodic mode is not supported */
+ if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
+ pr_error("set periodic failed\n");
+ rtc_irq_unregister(stmmac_rtc, &stmmac_task);
+ rtc_class_close(stmmac_rtc);
+ return -1;
+ }
+
+ STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
+
+ tm->timer_start = stmmac_rtc_start;
+ tm->timer_stop = stmmac_rtc_stop;
+
+ return 0;
+}
+
+int stmmac_close_ext_timer(void)
+{
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
+ rtc_irq_unregister(stmmac_rtc, &stmmac_task);
+ rtc_class_close(stmmac_rtc);
+ return 0;
+}
+
+#elif defined(CONFIG_STMMAC_TMU_TIMER)
+#include <linux/clk.h>
+#define TMU_CHANNEL "tmu2_clk"
+static struct clk *timer_clock;
+
+static void stmmac_tmu_start(unsigned int new_freq)
+{
+ clk_set_rate(timer_clock, new_freq);
+ clk_enable(timer_clock);
+ return;
+}
+
+static void stmmac_tmu_stop(void)
+{
+ clk_disable(timer_clock);
+ return;
+}
+
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
+{
+ timer_clock = clk_get(NULL, TMU_CHANNEL);
+
+ if (timer_clock == NULL)
+ return -1;
+
+ if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
+ timer_clock = NULL;
+ return -1;
+ }
+
+ STMMAC_TIMER_MSG("TMU2", tm->freq);
+ tm->timer_start = stmmac_tmu_start;
+ tm->timer_stop = stmmac_tmu_stop;
+
+ return 0;
+}
+
+int stmmac_close_ext_timer(void)
+{
+ clk_disable(timer_clock);
+ tmu2_unregister_user();
+ clk_put(timer_clock);
+ return 0;
+}
+#endif
diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/stmmac/stmmac_timer.h
new file mode 100644
index 000000000000..f795cae33725
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+ STMMAC external timer Header File.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+struct stmmac_timer {
+ void (*timer_start) (unsigned int new_freq);
+ void (*timer_stop) (void);
+ unsigned int freq;
+};
+
+/* Open the HW timer device and return 0 in case of success */
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
+/* Stop the timer and release it */
+int stmmac_close_ext_timer(void);
+/* Function used for scheduling task within the stmmac */
+void stmmac_schedule(struct net_device *dev);
+
+#if defined(CONFIG_STMMAC_TMU_TIMER)
+extern int tmu2_register_user(void *fnt, void *data);
+extern void tmu2_unregister_user(void);
+#endif
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 556512dc6072..e78486504d3a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -451,7 +451,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
tot_sgs += skb_vnet_hdr(skb)->num_sg;
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
}
return tot_sgs;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 44fb0c5a2800..004353a46af0 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -481,7 +481,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
}
rq->uncommitted[ring_idx] += num_allocated;
- dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
+ dev_dbg(&adapter->netdev->dev,
+ "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
"%u, uncommited %u\n", num_allocated, ring->next2fill,
ring->next2comp, rq->uncommitted[ring_idx]);
@@ -539,7 +540,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_NONE;
- dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
+ dev_dbg(&adapter->netdev->dev,
+ "txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -572,7 +574,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
gdesc->dword[2] = dw2 | buf_size;
gdesc->dword[3] = 0;
- dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
+ dev_dbg(&adapter->netdev->dev,
+ "txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill, gdesc->txd.addr,
gdesc->dword[2], gdesc->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -600,7 +603,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
gdesc->dword[2] = dw2 | frag->size;
gdesc->dword[3] = 0;
- dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n",
+ dev_dbg(&adapter->netdev->dev,
+ "txd[%u]: 0x%llu %u %u\n",
tq->tx_ring.next2fill, gdesc->txd.addr,
gdesc->dword[2], gdesc->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -697,7 +701,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
tdd = tq->data_ring.base + tq->tx_ring.next2fill;
memcpy(tdd->data, skb->data, ctx->copy_size);
- dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n",
+ dev_dbg(&adapter->netdev->dev,
+ "copy %u bytes to dataRing[%u]\n",
ctx->copy_size, tq->tx_ring.next2fill);
return 1;
@@ -808,7 +813,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
tq->stats.tx_ring_full++;
- dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u"
+ dev_dbg(&adapter->netdev->dev,
+ "tx queue stopped on %s, next2comp %u"
" next2fill %u\n", adapter->netdev->name,
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
@@ -853,7 +859,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
/* finally flips the GEN bit of the SOP desc */
gdesc->dword[2] ^= VMXNET3_TXD_GEN;
- dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
+ dev_dbg(&adapter->netdev->dev,
+ "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
(u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
gdesc->dword[3]);
@@ -990,7 +997,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
if (unlikely(rcd->len == 0)) {
/* Pretend the rx buffer is skipped. */
BUG_ON(!(rcd->sop && rcd->eop));
- dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n",
+ dev_dbg(&adapter->netdev->dev,
+ "rxRing[%u][%u] 0 length\n",
ring_idx, idx);
goto rcd_done;
}
@@ -1314,9 +1322,11 @@ vmxnet3_netpoll(struct net_device *netdev)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int irq;
+#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX)
irq = adapter->intr.msix_entries[0].vector;
else
+#endif
irq = adapter->pdev->irq;
disable_irq(irq);
@@ -1330,12 +1340,15 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
{
int err;
+#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
/* we only use 1 MSI-X vector */
err = request_irq(adapter->intr.msix_entries[0].vector,
vmxnet3_intr, 0, adapter->netdev->name,
adapter->netdev);
- } else if (adapter->intr.type == VMXNET3_IT_MSI) {
+ } else
+#endif
+ if (adapter->intr.type == VMXNET3_IT_MSI) {
err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
adapter->netdev->name, adapter->netdev);
} else {
@@ -1376,6 +1389,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
adapter->intr.num_intrs <= 0);
switch (adapter->intr.type) {
+#ifdef CONFIG_PCI_MSI
case VMXNET3_IT_MSIX:
{
int i;
@@ -1385,6 +1399,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
adapter->netdev);
break;
}
+#endif
case VMXNET3_IT_MSI:
free_irq(adapter->pdev->irq, adapter->netdev);
break;
@@ -1676,7 +1691,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
int err;
u32 ret;
- dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
+ dev_dbg(&adapter->netdev->dev,
+ "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
" %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
adapter->rx_queue.rx_ring[0].size,
@@ -2134,6 +2150,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
if (adapter->intr.type == VMXNET3_IT_AUTO) {
int err;
+#ifdef CONFIG_PCI_MSI
adapter->intr.msix_entries[0].entry = 0;
err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
VMXNET3_LINUX_MAX_MSIX_VECT);
@@ -2142,6 +2159,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
adapter->intr.type = VMXNET3_IT_MSIX;
return;
}
+#endif
err = pci_enable_msi(adapter->pdev);
if (!err) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 6bb91576e999..3c0d70d58111 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
@@ -59,7 +60,6 @@
#include <linux/if_vlan.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
-#include <linux/dst.h>
#include "vmxnet3_defs.h"
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 37f3aea074a5..773b10fa38e4 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -130,11 +130,11 @@ struct inet_timewait_sock {
__u16 tw_num;
kmemcheck_bitfield_begin(flags);
/* And these are ours. */
- __u8 tw_ipv6only:1,
- tw_transparent:1;
- /* 14 bits hole, try to pack */
+ unsigned int tw_ipv6only : 1,
+ tw_transparent : 1,
+ tw_pad : 14, /* 14 bits hole */
+ tw_ipv6_offset : 16;
kmemcheck_bitfield_end(flags);
- __u16 tw_ipv6_offset;
unsigned long tw_ttd;
struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 7f939ce29801..2bc6f6a8de68 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -92,6 +92,8 @@ static void add_conn(struct work_struct *work)
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
+ dev_set_drvdata(&conn->dev, conn);
+
if (device_add(&conn->dev) < 0) {
BT_ERR("Failed to register connection device");
return;
@@ -144,8 +146,6 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
conn->dev.class = bt_class;
conn->dev.parent = &hdev->dev;
- dev_set_drvdata(&conn->dev, conn);
-
device_initialize(&conn->dev);
INIT_WORK(&conn->work_add, add_conn);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 4b66bd579f4a..d65101d92ee5 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -555,12 +555,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
conn->feat_mask = 0;
- setup_timer(&conn->info_timer, l2cap_info_timeout,
- (unsigned long) conn);
-
spin_lock_init(&conn->lock);
rwlock_init(&conn->chan_list.lock);
+ setup_timer(&conn->info_timer, l2cap_info_timeout,
+ (unsigned long) conn);
+
conn->disc_reason = 0x13;
return conn;
@@ -783,6 +783,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
/* Default config options */
pi->conf_len = 0;
pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ skb_queue_head_init(TX_QUEUE(sk));
+ skb_queue_head_init(SREJ_QUEUE(sk));
+ INIT_LIST_HEAD(SREJ_LIST(sk));
}
static struct proto l2cap_proto = {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1da0e038df78..5ce017bf4afa 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -335,6 +335,7 @@ struct pktgen_dev {
__u32 cur_src_mac_offset;
__be32 cur_saddr;
__be32 cur_daddr;
+ __u16 ip_id;
__u16 cur_udp_dst;
__u16 cur_udp_src;
__u16 cur_queue_map;
@@ -2630,6 +2631,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
iph->protocol = IPPROTO_UDP; /* UDP */
iph->saddr = pkt_dev->cur_saddr;
iph->daddr = pkt_dev->cur_daddr;
+ iph->id = htons(pkt_dev->ip_id);
+ pkt_dev->ip_id++;
iph->frag_off = 0;
iplen = 20 + 8 + datalen;
iph->tot_len = htons(iplen);
@@ -2641,24 +2644,26 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
- if (pkt_dev->nfrags <= 0)
+ if (pkt_dev->nfrags <= 0) {
pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
- else {
+ memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
+ } else {
int frags = pkt_dev->nfrags;
- int i;
+ int i, len;
pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
if (frags > MAX_SKB_FRAGS)
frags = MAX_SKB_FRAGS;
if (datalen > frags * PAGE_SIZE) {
- skb_put(skb, datalen - frags * PAGE_SIZE);
+ len = datalen - frags * PAGE_SIZE;
+ memset(skb_put(skb, len), 0, len);
datalen = frags * PAGE_SIZE;
}
i = 0;
while (datalen > 0) {
- struct page *page = alloc_pages(GFP_KERNEL, 0);
+ struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
skb_shinfo(skb)->frags[i].page = page;
skb_shinfo(skb)->frags[i].page_offset = 0;
skb_shinfo(skb)->frags[i].size =
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f6a0af759932..26fb50e91311 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -447,6 +447,28 @@ extern int sysctl_tcp_synack_retries;
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
+/* Decide when to expire the request and when to resend SYN-ACK */
+static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
+ const int max_retries,
+ const u8 rskq_defer_accept,
+ int *expire, int *resend)
+{
+ if (!rskq_defer_accept) {
+ *expire = req->retrans >= thresh;
+ *resend = 1;
+ return;
+ }
+ *expire = req->retrans >= thresh &&
+ (!inet_rsk(req)->acked || req->retrans >= max_retries);
+ /*
+ * Do not resend while waiting for data after ACK,
+ * start to resend on end of deferring period to give
+ * last chance for data or ACK to create established socket.
+ */
+ *resend = !inet_rsk(req)->acked ||
+ req->retrans >= rskq_defer_accept - 1;
+}
+
void inet_csk_reqsk_queue_prune(struct sock *parent,
const unsigned long interval,
const unsigned long timeout,
@@ -502,9 +524,15 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
reqp=&lopt->syn_table[i];
while ((req = *reqp) != NULL) {
if (time_after_eq(now, req->expires)) {
- if ((req->retrans < thresh ||
- (inet_rsk(req)->acked && req->retrans < max_retries))
- && !req->rsk_ops->rtx_syn_ack(parent, req)) {
+ int expire = 0, resend = 0;
+
+ syn_ack_recalc(req, thresh, max_retries,
+ queue->rskq_defer_accept,
+ &expire, &resend);
+ if (!expire &&
+ (!resend ||
+ !req->rsk_ops->rtx_syn_ack(parent, req) ||
+ inet_rsk(req)->acked)) {
unsigned long timeo;
if (req->retrans++ == 0)
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 2445fedec0b8..a72f43ce33be 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -634,17 +634,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
break;
}
dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
- if (dev) {
+ if (dev)
mreq.imr_ifindex = dev->ifindex;
- dev_put(dev);
- }
} else
- dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
+ dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
err = -EADDRNOTAVAIL;
if (!dev)
break;
+ dev_put(dev);
err = -EINVAL;
if (sk->sk_bound_dev_if &&
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 206a291dff03..e0cfa633680a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -326,6 +326,43 @@ void tcp_enter_memory_pressure(struct sock *sk)
EXPORT_SYMBOL(tcp_enter_memory_pressure);
+/* Convert seconds to retransmits based on initial and max timeout */
+static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
+{
+ u8 res = 0;
+
+ if (seconds > 0) {
+ int period = timeout;
+
+ res = 1;
+ while (seconds > period && res < 255) {
+ res++;
+ timeout <<= 1;
+ if (timeout > rto_max)
+ timeout = rto_max;
+ period += timeout;
+ }
+ }
+ return res;
+}
+
+/* Convert retransmits to seconds based on initial and max timeout */
+static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
+{
+ int period = 0;
+
+ if (retrans > 0) {
+ period = timeout;
+ while (--retrans) {
+ timeout <<= 1;
+ if (timeout > rto_max)
+ timeout = rto_max;
+ period += timeout;
+ }
+ }
+ return period;
+}
+
/*
* Wait for a TCP event.
*
@@ -1405,7 +1442,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto found_ok_skb;
if (tcp_hdr(skb)->fin)
goto found_fin_ok;
- WARN_ON(!(flags & MSG_PEEK));
+ WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
+ "copied %X seq %X\n", *seq,
+ TCP_SKB_CB(skb)->seq);
}
/* Well, if we have backlog, try to process it now yet. */
@@ -2163,16 +2202,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
break;
case TCP_DEFER_ACCEPT:
- icsk->icsk_accept_queue.rskq_defer_accept = 0;
- if (val > 0) {
- /* Translate value in seconds to number of
- * retransmits */
- while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
- val > ((TCP_TIMEOUT_INIT / HZ) <<
- icsk->icsk_accept_queue.rskq_defer_accept))
- icsk->icsk_accept_queue.rskq_defer_accept++;
- icsk->icsk_accept_queue.rskq_defer_accept++;
- }
+ /* Translate value in seconds to number of retransmits */
+ icsk->icsk_accept_queue.rskq_defer_accept =
+ secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
+ TCP_RTO_MAX / HZ);
break;
case TCP_WINDOW_CLAMP:
@@ -2353,8 +2386,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
val = (val ? : sysctl_tcp_fin_timeout) / HZ;
break;
case TCP_DEFER_ACCEPT:
- val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
- ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
+ val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
+ TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
break;
case TCP_WINDOW_CLAMP:
val = tp->window_clamp;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index e320afea07fc..4c03598ed924 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -641,10 +641,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (!(flg & TCP_FLAG_ACK))
return NULL;
- /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
- if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
+ /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
+ if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
- inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--;
inet_rsk(req)->acked = 1;
return NULL;
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 68566de4bcc5..430454ee5ead 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -497,13 +497,17 @@ done:
goto e_inval;
if (val) {
+ struct net_device *dev;
+
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
goto e_inval;
- if (__dev_get_by_index(net, val) == NULL) {
+ dev = dev_get_by_index(net, val);
+ if (!dev) {
retv = -ENODEV;
break;
}
+ dev_put(dev);
}
np->mcast_oif = val;
retv = 0;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0f133c5a8d3c..3291902f0b88 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1074,6 +1074,8 @@ restart:
err = -ECONNREFUSED;
if (other->sk_state != TCP_LISTEN)
goto out_unlock;
+ if (other->sk_shutdown & RCV_SHUTDOWN)
+ goto out_unlock;
if (unix_recvq_full(other)) {
err = -EAGAIN;