diff options
160 files changed, 16426 insertions, 1857 deletions
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl index aac9357d4866..f9b9ad7894f5 100644 --- a/Documentation/DocBook/80211.tmpl +++ b/Documentation/DocBook/80211.tmpl @@ -154,8 +154,9 @@ !Finclude/net/cfg80211.h cfg80211_scan_request !Finclude/net/cfg80211.h cfg80211_scan_done !Finclude/net/cfg80211.h cfg80211_bss -!Finclude/net/cfg80211.h cfg80211_inform_bss_width_frame -!Finclude/net/cfg80211.h cfg80211_inform_bss_width +!Finclude/net/cfg80211.h cfg80211_inform_bss +!Finclude/net/cfg80211.h cfg80211_inform_bss_frame_data +!Finclude/net/cfg80211.h cfg80211_inform_bss_data !Finclude/net/cfg80211.h cfg80211_unlink_bss !Finclude/net/cfg80211.h cfg80211_find_ie !Finclude/net/cfg80211.h ieee80211_bss_get_ie diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index ebe94f2cab98..85752c81c5ec 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -384,6 +384,14 @@ tcp_mem - vector of 3 INTEGERs: min, pressure, max Defaults are calculated at boot time from amount of available memory. +tcp_min_rtt_wlen - INTEGER + The window length of the windowed min filter to track the minimum RTT. + A shorter window lets a flow more quickly pick up new (higher) + minimum RTT when it is moved to a longer path (e.g., due to traffic + engineering). A longer window makes the filter more resistant to RTT + inflations such as transient congestion. The unit is seconds. + Default: 300 + tcp_moderate_rcvbuf - BOOLEAN If set, TCP performs receive buffer auto-tuning, attempting to automatically size the buffer (no greater than tcp_rmem[2]) to @@ -425,6 +433,15 @@ tcp_orphan_retries - INTEGER you should think about lowering this value, such sockets may consume significant resources. Cf. tcp_max_orphans. +tcp_recovery - INTEGER + This value is a bitmap to enable various experimental loss recovery + features. + + RACK: 0x1 enables the RACK loss detection for fast detection of lost + retransmissions and tail drops. + + Default: 0x1 + tcp_reordering - INTEGER Initial reordering level of packets in a TCP stream. TCP stack can then dynamically adjust flow reordering level diff --git a/arch/arm/mach-gemini/board-nas4220b.c b/arch/arm/mach-gemini/board-nas4220b.c index ca8a25bb3521..18b12796acf9 100644 --- a/arch/arm/mach-gemini/board-nas4220b.c +++ b/arch/arm/mach-gemini/board-nas4220b.c @@ -18,7 +18,6 @@ #include <linux/leds.h> #include <linux/input.h> #include <linux/gpio_keys.h> -#include <linux/mdio-gpio.h> #include <linux/io.h> #include <asm/setup.h> diff --git a/arch/arm/mach-gemini/board-wbd111.c b/arch/arm/mach-gemini/board-wbd111.c index 418188cd1712..14c56f3f0ec2 100644 --- a/arch/arm/mach-gemini/board-wbd111.c +++ b/arch/arm/mach-gemini/board-wbd111.c @@ -15,7 +15,6 @@ #include <linux/input.h> #include <linux/skbuff.h> #include <linux/gpio_keys.h> -#include <linux/mdio-gpio.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <asm/mach-types.h> diff --git a/arch/arm/mach-gemini/board-wbd222.c b/arch/arm/mach-gemini/board-wbd222.c index 266b265090cd..6070282ce243 100644 --- a/arch/arm/mach-gemini/board-wbd222.c +++ b/arch/arm/mach-gemini/board-wbd222.c @@ -15,7 +15,6 @@ #include <linux/input.h> #include <linux/skbuff.h> #include <linux/gpio_keys.h> -#include <linux/mdio-gpio.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <asm/mach-types.h> diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index c29aebe1e62b..9093577755f6 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -26,7 +26,7 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg) if (bus == NULL) return -EINVAL; - return mdiobus_read(bus, ds->pd->sw_addr + addr, reg); + return mdiobus_read_nested(bus, ds->pd->sw_addr + addr, reg); } #define REG_READ(addr, reg) \ @@ -47,7 +47,7 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) if (bus == NULL) return -EINVAL; - return mdiobus_write(bus, ds->pd->sw_addr + addr, reg, val); + return mdiobus_write_nested(bus, ds->pd->sw_addr + addr, reg, val); } #define REG_WRITE(addr, reg, val) \ diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index dfca352e78e3..2c8eb6f76ebe 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -122,7 +122,7 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, .port_fdb_add = mv88e6xxx_port_fdb_add, .port_fdb_del = mv88e6xxx_port_fdb_del, - .port_fdb_getnext = mv88e6xxx_port_fdb_getnext, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, }; MODULE_ALIAS("platform:mv88e6171"); diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 796fdcbe3c6e..cbf4dd8721a6 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -349,7 +349,7 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, .port_fdb_add = mv88e6xxx_port_fdb_add, .port_fdb_del = mv88e6xxx_port_fdb_del, - .port_fdb_getnext = mv88e6xxx_port_fdb_getnext, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, }; MODULE_ALIAS("platform:mv88e6172"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 4591240eb795..b1b14f519d8b 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -11,7 +11,6 @@ * (at your option) any later version. */ -#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> @@ -21,39 +20,10 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/phy.h> -#include <linux/seq_file.h> #include <net/dsa.h> #include <net/switchdev.h> #include "mv88e6xxx.h" -/* MDIO bus access can be nested in the case of PHYs connected to the - * internal MDIO bus of the switch, which is accessed via MDIO bus of - * the Ethernet interface. Avoid lockdep false positives by using - * mutex_lock_nested(). - */ -static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) -{ - int ret; - - mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING); - ret = bus->read(bus, addr, regnum); - mutex_unlock(&bus->mdio_lock); - - return ret; -} - -static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, - u16 val) -{ - int ret; - - mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING); - ret = bus->write(bus, addr, regnum, val); - mutex_unlock(&bus->mdio_lock); - - return ret; -} - /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will * use all 32 SMI bus addresses on its SMI bus, and all switch registers * will be directly accessible on some {device address,register address} @@ -68,7 +38,7 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr) int i; for (i = 0; i < 16; i++) { - ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD); + ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD); if (ret < 0) return ret; @@ -84,7 +54,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg) int ret; if (sw_addr == 0) - return mv88e6xxx_mdiobus_read(bus, addr, reg); + return mdiobus_read_nested(bus, addr, reg); /* Wait for the bus to become free. */ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); @@ -92,8 +62,8 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg) return ret; /* Transmit the read command. */ - ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD, - SMI_CMD_OP_22_READ | (addr << 5) | reg); + ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD, + SMI_CMD_OP_22_READ | (addr << 5) | reg); if (ret < 0) return ret; @@ -103,7 +73,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg) return ret; /* Read the data. */ - ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA); + ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA); if (ret < 0) return ret; @@ -147,7 +117,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, int ret; if (sw_addr == 0) - return mv88e6xxx_mdiobus_write(bus, addr, reg, val); + return mdiobus_write_nested(bus, addr, reg, val); /* Wait for the bus to become free. */ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); @@ -155,13 +125,13 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, return ret; /* Transmit the data to write. */ - ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val); + ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val); if (ret < 0) return ret; /* Transmit the write command. */ - ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD, - SMI_CMD_OP_22_WRITE | (addr << 5) | reg); + ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD, + SMI_CMD_OP_22_WRITE | (addr << 5) | reg); if (ret < 0) return ret; @@ -876,13 +846,6 @@ static int _mv88e6xxx_atu_wait(struct dsa_switch *ds) GLOBAL_ATU_OP_BUSY); } -/* Must be called with SMI lock held */ -static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds) -{ - return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC, - GLOBAL2_SCRATCH_BUSY); -} - /* Must be called with SMI mutex held */ static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum) @@ -1259,7 +1222,13 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds, return 0; } -static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid, +static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid) +{ + return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, + vid & GLOBAL_VTU_VID_MASK); +} + +static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, struct mv88e6xxx_vtu_stu_entry *entry) { struct mv88e6xxx_vtu_stu_entry next = { 0 }; @@ -1269,11 +1238,6 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid, if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, - vid & GLOBAL_VTU_VID_MASK); - if (ret < 0) - return ret; - ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT); if (ret < 0) return ret; @@ -1485,7 +1449,12 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid, int err; mutex_lock(&ps->smi_mutex); - err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan); + + err = _mv88e6xxx_vtu_vid_write(ds, vid - 1); + if (err) + goto unlock; + + err = _mv88e6xxx_vtu_getnext(ds, &vlan); if (err) goto unlock; @@ -1514,7 +1483,11 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid) mutex_lock(&ps->smi_mutex); - err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan); + err = _mv88e6xxx_vtu_vid_write(ds, vid - 1); + if (err) + goto unlock; + + err = _mv88e6xxx_vtu_getnext(ds, &vlan); if (err) goto unlock; @@ -1549,29 +1522,6 @@ unlock: return err; } -static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid, - struct mv88e6xxx_vtu_stu_entry *entry) -{ - int err; - - do { - if (vid == 4095) - return -ENOENT; - - err = _mv88e6xxx_vtu_getnext(ds, vid, entry); - if (err) - return err; - - if (!entry->valid) - return -ENOENT; - - vid = entry->vid; - } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED && - entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED); - - return 0; -} - int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid, unsigned long *ports, unsigned long *untagged) { @@ -1584,7 +1534,12 @@ int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid, return -ENOENT; mutex_lock(&ps->smi_mutex); - err = _mv88e6xxx_vtu_getnext(ds, *vid, &next); + err = _mv88e6xxx_vtu_vid_write(ds, *vid); + if (err) + goto unlock; + + err = _mv88e6xxx_vtu_getnext(ds, &next); +unlock: mutex_unlock(&ps->smi_mutex); if (err) @@ -1732,7 +1687,6 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, } static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, - const unsigned char *addr, struct mv88e6xxx_atu_entry *entry) { struct mv88e6xxx_atu_entry next = { 0 }; @@ -1744,10 +1698,6 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, if (ret < 0) return ret; - ret = _mv88e6xxx_atu_mac_write(ds, addr); - if (ret < 0) - return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid); if (ret < 0) return ret; @@ -1785,46 +1735,69 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, return 0; } -/* get next entry for port */ -int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, - unsigned char *addr, u16 *vid, bool *is_static) +int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - struct mv88e6xxx_atu_entry next; - u16 fid = *vid; /* We use one FID per VLAN */ - int ret; + struct mv88e6xxx_vtu_stu_entry vlan = { + .vid = GLOBAL_VTU_VID_MASK, /* all ones */ + }; + int err; mutex_lock(&ps->smi_mutex); + err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid); + if (err) + goto unlock; + do { - if (is_broadcast_ether_addr(addr)) { - struct mv88e6xxx_vtu_stu_entry vtu; + struct mv88e6xxx_atu_entry addr = { + .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + }; - ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu); - if (ret < 0) - goto unlock; + err = _mv88e6xxx_vtu_getnext(ds, &vlan); + if (err) + goto unlock; - *vid = vtu.vid; - fid = vtu.fid; - } + if (!vlan.valid) + break; - ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next); - if (ret < 0) + err = _mv88e6xxx_atu_mac_write(ds, addr.mac); + if (err) goto unlock; - ether_addr_copy(addr, next.mac); + do { + err = _mv88e6xxx_atu_getnext(ds, vlan.fid, &addr); + if (err) + goto unlock; - if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED) - continue; - } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0); + if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED) + break; + + if (!addr.trunk && addr.portv_trunkid & BIT(port)) { + bool is_static = addr.state == + (is_multicast_ether_addr(addr.mac) ? + GLOBAL_ATU_DATA_STATE_MC_STATIC : + GLOBAL_ATU_DATA_STATE_UC_STATIC); + + fdb->vid = vlan.vid; + ether_addr_copy(fdb->addr, addr.mac); + fdb->ndm_state = is_static ? NUD_NOARP : + NUD_REACHABLE; + + err = cb(&fdb->obj); + if (err) + goto unlock; + } + } while (!is_broadcast_ether_addr(addr.mac)); + + } while (vlan.vid < GLOBAL_VTU_VID_MASK); - *is_static = next.state == (is_multicast_ether_addr(addr) ? - GLOBAL_ATU_DATA_STATE_MC_STATIC : - GLOBAL_ATU_DATA_STATE_UC_STATIC); unlock: mutex_unlock(&ps->smi_mutex); - return ret; + return err; } static void mv88e6xxx_bridge_work(struct work_struct *work) @@ -2097,273 +2070,9 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds) return 0; } -static int mv88e6xxx_regs_show(struct seq_file *s, void *p) -{ - struct dsa_switch *ds = s->private; - - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int reg, port; - - seq_puts(s, " GLOBAL GLOBAL2 "); - for (port = 0 ; port < ps->num_ports; port++) - seq_printf(s, " %2d ", port); - seq_puts(s, "\n"); - - for (reg = 0; reg < 32; reg++) { - seq_printf(s, "%2x: ", reg); - seq_printf(s, " %4x %4x ", - mv88e6xxx_reg_read(ds, REG_GLOBAL, reg), - mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg)); - - for (port = 0 ; port < ps->num_ports; port++) - seq_printf(s, "%4x ", - mv88e6xxx_reg_read(ds, REG_PORT(port), reg)); - seq_puts(s, "\n"); - } - - return 0; -} - -static int mv88e6xxx_regs_open(struct inode *inode, struct file *file) -{ - return single_open(file, mv88e6xxx_regs_show, inode->i_private); -} - -static const struct file_operations mv88e6xxx_regs_fops = { - .open = mv88e6xxx_regs_open, - .read = seq_read, - .llseek = no_llseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static void mv88e6xxx_atu_show_header(struct seq_file *s) -{ - seq_puts(s, "DB T/P Vec State Addr\n"); -} - -static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum, - unsigned char *addr, int data) -{ - bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK); - int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >> - GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT); - int state = data & GLOBAL_ATU_DATA_STATE_MASK; - - seq_printf(s, "%03x %5s %10pb %x %pM\n", - dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr); -} - -static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds, - int dbnum) -{ - unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - unsigned char addr[6]; - int ret, data, state; - - ret = _mv88e6xxx_atu_mac_write(ds, bcast); - if (ret < 0) - return ret; - - do { - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, - dbnum); - if (ret < 0) - return ret; - - ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB); - if (ret < 0) - return ret; - - data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA); - if (data < 0) - return data; - - state = data & GLOBAL_ATU_DATA_STATE_MASK; - if (state == GLOBAL_ATU_DATA_STATE_UNUSED) - break; - ret = _mv88e6xxx_atu_mac_read(ds, addr); - if (ret < 0) - return ret; - mv88e6xxx_atu_show_entry(s, dbnum, addr, data); - } while (state != GLOBAL_ATU_DATA_STATE_UNUSED); - - return 0; -} - -static int mv88e6xxx_atu_show(struct seq_file *s, void *p) -{ - struct dsa_switch *ds = s->private; - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int dbnum; - - mv88e6xxx_atu_show_header(s); - - for (dbnum = 0; dbnum < 255; dbnum++) { - mutex_lock(&ps->smi_mutex); - mv88e6xxx_atu_show_db(s, ds, dbnum); - mutex_unlock(&ps->smi_mutex); - } - - return 0; -} - -static int mv88e6xxx_atu_open(struct inode *inode, struct file *file) -{ - return single_open(file, mv88e6xxx_atu_show, inode->i_private); -} - -static const struct file_operations mv88e6xxx_atu_fops = { - .open = mv88e6xxx_atu_open, - .read = seq_read, - .llseek = no_llseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static void mv88e6xxx_stats_show_header(struct seq_file *s, - struct mv88e6xxx_priv_state *ps) -{ - int port; - - seq_puts(s, " Statistic "); - for (port = 0 ; port < ps->num_ports; port++) - seq_printf(s, "Port %2d ", port); - seq_puts(s, "\n"); -} - -static int mv88e6xxx_stats_show(struct seq_file *s, void *p) -{ - struct dsa_switch *ds = s->private; - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats; - int port, stat, max_stats; - uint64_t value; - - if (have_sw_in_discards(ds)) - max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats); - else - max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3; - - mv88e6xxx_stats_show_header(s, ps); - - mutex_lock(&ps->smi_mutex); - - for (stat = 0; stat < max_stats; stat++) { - seq_printf(s, "%19s: ", stats[stat].string); - for (port = 0 ; port < ps->num_ports; port++) { - _mv88e6xxx_stats_snapshot(ds, port); - value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats, - port); - seq_printf(s, "%8llu ", value); - } - seq_puts(s, "\n"); - } - mutex_unlock(&ps->smi_mutex); - - return 0; -} - -static int mv88e6xxx_stats_open(struct inode *inode, struct file *file) -{ - return single_open(file, mv88e6xxx_stats_show, inode->i_private); -} - -static const struct file_operations mv88e6xxx_stats_fops = { - .open = mv88e6xxx_stats_open, - .read = seq_read, - .llseek = no_llseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static int mv88e6xxx_device_map_show(struct seq_file *s, void *p) -{ - struct dsa_switch *ds = s->private; - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int target, ret; - - seq_puts(s, "Target Port\n"); - - mutex_lock(&ps->smi_mutex); - for (target = 0; target < 32; target++) { - ret = _mv88e6xxx_reg_write( - ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING, - target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT); - if (ret < 0) - goto out; - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2, - GLOBAL2_DEVICE_MAPPING); - seq_printf(s, " %2d %2d\n", target, - ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK); - } -out: - mutex_unlock(&ps->smi_mutex); - - return 0; -} - -static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file) -{ - return single_open(file, mv88e6xxx_device_map_show, inode->i_private); -} - -static const struct file_operations mv88e6xxx_device_map_fops = { - .open = mv88e6xxx_device_map_open, - .read = seq_read, - .llseek = no_llseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static int mv88e6xxx_scratch_show(struct seq_file *s, void *p) -{ - struct dsa_switch *ds = s->private; - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int reg, ret; - - seq_puts(s, "Register Value\n"); - - mutex_lock(&ps->smi_mutex); - for (reg = 0; reg < 0x80; reg++) { - ret = _mv88e6xxx_reg_write( - ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC, - reg << GLOBAL2_SCRATCH_REGISTER_SHIFT); - if (ret < 0) - goto out; - - ret = _mv88e6xxx_scratch_wait(ds); - if (ret < 0) - goto out; - - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2, - GLOBAL2_SCRATCH_MISC); - seq_printf(s, " %2x %2x\n", reg, - ret & GLOBAL2_SCRATCH_VALUE_MASK); - } -out: - mutex_unlock(&ps->smi_mutex); - - return 0; -} - -static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file) -{ - return single_open(file, mv88e6xxx_scratch_show, inode->i_private); -} - -static const struct file_operations mv88e6xxx_scratch_fops = { - .open = mv88e6xxx_scratch_open, - .read = seq_read, - .llseek = no_llseek, - .release = single_release, - .owner = THIS_MODULE, -}; - int mv88e6xxx_setup_common(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - char *name; mutex_init(&ps->smi_mutex); @@ -2371,24 +2080,6 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds) INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); - name = kasprintf(GFP_KERNEL, "dsa%d", ds->index); - ps->dbgfs = debugfs_create_dir(name, NULL); - kfree(name); - - debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds, - &mv88e6xxx_regs_fops); - - debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds, - &mv88e6xxx_atu_fops); - - debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds, - &mv88e6xxx_stats_fops); - - debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds, - &mv88e6xxx_device_map_fops); - - debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds, - &mv88e6xxx_scratch_fops); return 0; } diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 4ba69f339bfe..6f9ed5d45012 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -406,8 +406,6 @@ struct mv88e6xxx_priv_state { u8 port_state[DSA_MAX_PORTS]; struct work_struct bridge_work; - - struct dentry *dbgfs; }; struct mv88e6xxx_hw_stat { @@ -474,8 +472,9 @@ int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, struct switchdev_trans *trans); int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb); -int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, - unsigned char *addr, u16 *vid, bool *is_static); +int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)); int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg); int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, int reg, int val); diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 815eb94990f5..69fc8409a973 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -147,8 +147,12 @@ static void dummy_setup(struct net_device *dev) dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; - dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO; + dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST; + dev->features |= NETIF_F_ALL_TSO | NETIF_F_UFO; dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; + dev->features |= NETIF_F_GSO_ENCAP_ALL; + dev->hw_features |= dev->features; + dev->hw_enc_features |= dev->features; eth_hw_addr_random(dev); } diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index e930aa9a3cfb..67a7d520d9f5 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -170,4 +170,23 @@ config SYSTEMPORT Broadcom BCM7xxx Set Top Box family chipset using an internal Ethernet switch. +config BNXT + tristate "Broadcom NetXtreme-C/E support" + depends on PCI + select FW_LOADER + select LIBCRC32C + ---help--- + This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit + Ethernet cards. To compile this driver as a module, choose M here: + the module will be called bnxt_en. This is recommended. + +config BNXT_SRIOV + bool "Broadcom NetXtreme-C/E SR-IOV support" + depends on BNXT && PCI_IOV + default y + ---help--- + This configuration parameter enables Single Root Input Output + Virtualization support in the NetXtreme-C/E products. This + allows for virtual function acceleration in virtual environments. + endif # NET_VENDOR_BROADCOM diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index e2a958a657e0..00584d78b3e0 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_TIGON3) += tg3.o obj-$(CONFIG_BGMAC) += bgmac.o obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o +obj-$(CONFIG_BNXT) += bnxt/ diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile new file mode 100644 index 000000000000..97e78e217928 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_BNXT) += bnxt_en.o + +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c new file mode 100644 index 000000000000..9e36f83e68f0 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -0,0 +1,5728 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/module.h> + +#include <linux/stringify.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <asm/byteorder.h> +#include <asm/page.h> +#include <linux/time.h> +#include <linux/mii.h> +#include <linux/if.h> +#include <linux/if_vlan.h> +#include <net/ip.h> +#include <net/tcp.h> +#include <net/udp.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> +#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) +#include <net/vxlan.h> +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL +#include <net/busy_poll.h> +#endif +#include <linux/workqueue.h> +#include <linux/prefetch.h> +#include <linux/cache.h> +#include <linux/log2.h> +#include <linux/aer.h> +#include <linux/bitmap.h> +#include <linux/cpu_rmap.h> + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_sriov.h" +#include "bnxt_ethtool.h" + +#define BNXT_TX_TIMEOUT (5 * HZ) + +static const char version[] = + "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) +#define BNXT_RX_DMA_OFFSET NET_SKB_PAD +#define BNXT_RX_COPY_THRESH 256 + +#define BNXT_TX_PUSH_THRESH 92 + +enum board_idx { + BCM57302, + BCM57304, + BCM57404, + BCM57406, + BCM57304_VF, + BCM57404_VF, +}; + +/* indexed by enum above */ +static const struct { + char *name; +} board_info[] = { + { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, + { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" }, + { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" }, + { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" }, +}; + +static const struct pci_device_id bnxt_pci_tbl[] = { + { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, + { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, + { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, + { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, +#ifdef CONFIG_BNXT_SRIOV + { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF }, + { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF }, +#endif + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); + +static const u16 bnxt_vf_req_snif[] = { + HWRM_FUNC_CFG, + HWRM_PORT_PHY_QCFG, + HWRM_CFA_L2_FILTER_ALLOC, +}; + +static bool bnxt_vf_pciid(enum board_idx idx) +{ + return (idx == BCM57304_VF || idx == BCM57404_VF); +} + +#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) +#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) +#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) + +#define BNXT_CP_DB_REARM(db, raw_cons) \ + writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db) + +#define BNXT_CP_DB(db, raw_cons) \ + writel(DB_CP_FLAGS | RING_CMP(raw_cons), db) + +#define BNXT_CP_DB_IRQ_DIS(db) \ + writel(DB_CP_IRQ_DIS_FLAGS, db) + +static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) +{ + /* Tell compiler to fetch tx indices from memory. */ + barrier(); + + return bp->tx_ring_size - + ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); +} + +static const u16 bnxt_lhint_arr[] = { + TX_BD_FLAGS_LHINT_512_AND_SMALLER, + TX_BD_FLAGS_LHINT_512_TO_1023, + TX_BD_FLAGS_LHINT_1024_TO_2047, + TX_BD_FLAGS_LHINT_1024_TO_2047, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, +}; + +static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + struct tx_bd *txbd; + struct tx_bd_ext *txbd1; + struct netdev_queue *txq; + int i; + dma_addr_t mapping; + unsigned int length, pad = 0; + u32 len, free_size, vlan_tag_flags, cfa_action, flags; + u16 prod, last_frag; + struct pci_dev *pdev = bp->pdev; + struct bnxt_napi *bnapi; + struct bnxt_tx_ring_info *txr; + struct bnxt_sw_tx_bd *tx_buf; + + i = skb_get_queue_mapping(skb); + if (unlikely(i >= bp->tx_nr_rings)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + bnapi = bp->bnapi[i]; + txr = &bnapi->tx_ring; + txq = netdev_get_tx_queue(dev, i); + prod = txr->tx_prod; + + free_size = bnxt_tx_avail(bp, txr); + if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { + netif_tx_stop_queue(txq); + return NETDEV_TX_BUSY; + } + + length = skb->len; + len = skb_headlen(skb); + last_frag = skb_shinfo(skb)->nr_frags; + + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + + txbd->tx_bd_opaque = prod; + + tx_buf = &txr->tx_buf_ring[prod]; + tx_buf->skb = skb; + tx_buf->nr_frags = last_frag; + + vlan_tag_flags = 0; + cfa_action = 0; + if (skb_vlan_tag_present(skb)) { + vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | + skb_vlan_tag_get(skb); + /* Currently supports 8021Q, 8021AD vlan offloads + * QINQ1, QINQ2, QINQ3 vlan headers are deprecated + */ + if (skb->vlan_proto == htons(ETH_P_8021Q)) + vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; + } + + if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { + struct tx_push_bd *push = txr->tx_push; + struct tx_bd *tx_push = &push->txbd1; + struct tx_bd_ext *tx_push1 = &push->txbd2; + void *pdata = tx_push1 + 1; + int j; + + /* Set COAL_NOW to be ready quickly for the next push */ + tx_push->tx_bd_len_flags_type = + cpu_to_le32((length << TX_BD_LEN_SHIFT) | + TX_BD_TYPE_LONG_TX_BD | + TX_BD_FLAGS_LHINT_512_AND_SMALLER | + TX_BD_FLAGS_COAL_NOW | + TX_BD_FLAGS_PACKET_END | + (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_push1->tx_bd_hsize_lflags = + cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); + else + tx_push1->tx_bd_hsize_lflags = 0; + + tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); + tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + + skb_copy_from_linear_data(skb, pdata, len); + pdata += len; + for (j = 0; j < last_frag; j++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; + void *fptr; + + fptr = skb_frag_address_safe(frag); + if (!fptr) + goto normal_tx; + + memcpy(pdata, fptr, skb_frag_size(frag)); + pdata += skb_frag_size(frag); + } + + memcpy(txbd, tx_push, sizeof(*txbd)); + prod = NEXT_TX(prod); + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + memcpy(txbd, tx_push1, sizeof(*txbd)); + prod = NEXT_TX(prod); + push->doorbell = + cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); + txr->tx_prod = prod; + + netdev_tx_sent_queue(txq, skb->len); + + __iowrite64_copy(txr->tx_doorbell, push, + (length + sizeof(*push) + 8) / 8); + + tx_buf->is_push = 1; + + goto tx_done; + } + +normal_tx: + if (length < BNXT_MIN_PKT_SIZE) { + pad = BNXT_MIN_PKT_SIZE - length; + if (skb_pad(skb, pad)) { + /* SKB already freed. */ + tx_buf->skb = NULL; + return NETDEV_TX_OK; + } + length = BNXT_MIN_PKT_SIZE; + } + + mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { + dev_kfree_skb_any(skb); + tx_buf->skb = NULL; + return NETDEV_TX_OK; + } + + dma_unmap_addr_set(tx_buf, mapping, mapping); + flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | + ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); + + txbd->tx_bd_haddr = cpu_to_le64(mapping); + + prod = NEXT_TX(prod); + txbd1 = (struct tx_bd_ext *) + &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + + txbd1->tx_bd_hsize_lflags = 0; + if (skb_is_gso(skb)) { + u32 hdr_len; + + if (skb->encapsulation) + hdr_len = skb_inner_network_offset(skb) + + skb_inner_network_header_len(skb) + + inner_tcp_hdrlen(skb); + else + hdr_len = skb_transport_offset(skb) + + tcp_hdrlen(skb); + + txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | + TX_BD_FLAGS_T_IPID | + (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); + length = skb_shinfo(skb)->gso_size; + txbd1->tx_bd_mss = cpu_to_le32(length); + length += hdr_len; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + txbd1->tx_bd_hsize_lflags = + cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); + txbd1->tx_bd_mss = 0; + } + + length >>= 9; + flags |= bnxt_lhint_arr[length]; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + + txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); + txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + for (i = 0; i < last_frag; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + prod = NEXT_TX(prod); + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + + len = skb_frag_size(frag); + mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) + goto tx_dma_error; + + tx_buf = &txr->tx_buf_ring[prod]; + dma_unmap_addr_set(tx_buf, mapping, mapping); + + txbd->tx_bd_haddr = cpu_to_le64(mapping); + + flags = len << TX_BD_LEN_SHIFT; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + } + + flags &= ~TX_BD_LEN; + txbd->tx_bd_len_flags_type = + cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | + TX_BD_FLAGS_PACKET_END); + + netdev_tx_sent_queue(txq, skb->len); + + /* Sync BD data before updating doorbell */ + wmb(); + + prod = NEXT_TX(prod); + txr->tx_prod = prod; + + writel(DB_KEY_TX | prod, txr->tx_doorbell); + writel(DB_KEY_TX | prod, txr->tx_doorbell); + +tx_done: + + mmiowb(); + + if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * tx index in bnxt_tx_avail() below, because in + * bnxt_tx_int(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) + netif_tx_wake_queue(txq); + } + return NETDEV_TX_OK; + +tx_dma_error: + last_frag = i; + + /* start back at beginning and unmap skb */ + prod = txr->tx_prod; + tx_buf = &txr->tx_buf_ring[prod]; + tx_buf->skb = NULL; + dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), PCI_DMA_TODEVICE); + prod = NEXT_TX(prod); + + /* unmap remaining mapped pages */ + for (i = 0; i < last_frag; i++) { + prod = NEXT_TX(prod); + tx_buf = &txr->tx_buf_ring[prod]; + dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_TODEVICE); + } + + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) +{ + struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; + int index = bnapi->index; + struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); + u16 cons = txr->tx_cons; + struct pci_dev *pdev = bp->pdev; + int i; + unsigned int tx_bytes = 0; + + for (i = 0; i < nr_pkts; i++) { + struct bnxt_sw_tx_bd *tx_buf; + struct sk_buff *skb; + int j, last; + + tx_buf = &txr->tx_buf_ring[cons]; + cons = NEXT_TX(cons); + skb = tx_buf->skb; + tx_buf->skb = NULL; + + if (tx_buf->is_push) { + tx_buf->is_push = 0; + goto next_tx_int; + } + + dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), PCI_DMA_TODEVICE); + last = tx_buf->nr_frags; + + for (j = 0; j < last; j++) { + cons = NEXT_TX(cons); + tx_buf = &txr->tx_buf_ring[cons]; + dma_unmap_page( + &pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[j]), + PCI_DMA_TODEVICE); + } + +next_tx_int: + cons = NEXT_TX(cons); + + tx_bytes += skb->len; + dev_kfree_skb_any(skb); + } + + netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); + txr->tx_cons = cons; + + /* Need to make the tx_cons update visible to bnxt_start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that bnxt_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq)) && + (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && + txr->dev_state != BNXT_DEV_STATE_CLOSING) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } +} + +static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, + gfp_t gfp) +{ + u8 *data; + struct pci_dev *pdev = bp->pdev; + + data = kmalloc(bp->rx_buf_size, gfp); + if (!data) + return NULL; + + *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET, + bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + + if (dma_mapping_error(&pdev->dev, *mapping)) { + kfree(data); + data = NULL; + } + return data; +} + +static inline int bnxt_alloc_rx_data(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp) +{ + struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; + u8 *data; + dma_addr_t mapping; + + data = __bnxt_alloc_rx_data(bp, &mapping, gfp); + if (!data) + return -ENOMEM; + + rx_buf->data = data; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rxbd->rx_bd_haddr = cpu_to_le64(mapping); + + return 0; +} + +static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, + u8 *data) +{ + u16 prod = rxr->rx_prod; + struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; + struct rx_bd *cons_bd, *prod_bd; + + prod_rx_buf = &rxr->rx_buf_ring[prod]; + cons_rx_buf = &rxr->rx_buf_ring[cons]; + + prod_rx_buf->data = data; + + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); + + prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; + + prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; +} + +static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) +{ + u16 next, max = rxr->rx_agg_bmap_size; + + next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); + if (next >= max) + next = find_first_zero_bit(rxr->rx_agg_bmap, max); + return next; +} + +static inline int bnxt_alloc_rx_page(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp) +{ + struct rx_bd *rxbd = + &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + struct bnxt_sw_rx_agg_bd *rx_agg_buf; + struct pci_dev *pdev = bp->pdev; + struct page *page; + dma_addr_t mapping; + u16 sw_prod = rxr->rx_sw_agg_prod; + + page = alloc_page(gfp); + if (!page) + return -ENOMEM; + + mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&pdev->dev, mapping)) { + __free_page(page); + return -EIO; + } + + if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) + sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); + + __set_bit(sw_prod, rxr->rx_agg_bmap); + rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; + rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); + + rx_agg_buf->page = page; + rx_agg_buf->mapping = mapping; + rxbd->rx_bd_haddr = cpu_to_le64(mapping); + rxbd->rx_bd_opaque = sw_prod; + return 0; +} + +static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, + u32 agg_bufs) +{ + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + u16 prod = rxr->rx_agg_prod; + u16 sw_prod = rxr->rx_sw_agg_prod; + u32 i; + + for (i = 0; i < agg_bufs; i++) { + u16 cons; + struct rx_agg_cmp *agg; + struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; + struct rx_bd *prod_bd; + struct page *page; + + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + cons = agg->rx_agg_cmp_opaque; + __clear_bit(cons, rxr->rx_agg_bmap); + + if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) + sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); + + __set_bit(sw_prod, rxr->rx_agg_bmap); + prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; + cons_rx_buf = &rxr->rx_agg_ring[cons]; + + /* It is possible for sw_prod to be equal to cons, so + * set cons_rx_buf->page to NULL first. + */ + page = cons_rx_buf->page; + cons_rx_buf->page = NULL; + prod_rx_buf->page = page; + + prod_rx_buf->mapping = cons_rx_buf->mapping; + + prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + + prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); + prod_bd->rx_bd_opaque = sw_prod; + + prod = NEXT_RX_AGG(prod); + sw_prod = NEXT_RX_AGG(sw_prod); + cp_cons = NEXT_CMP(cp_cons); + } + rxr->rx_agg_prod = prod; + rxr->rx_sw_agg_prod = sw_prod; +} + +static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, u16 cons, + u16 prod, u8 *data, dma_addr_t dma_addr, + unsigned int len) +{ + int err; + struct sk_buff *skb; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, data); + return NULL; + } + + skb = build_skb(data, 0); + dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + if (!skb) { + kfree(data); + return NULL; + } + + skb_reserve(skb, BNXT_RX_OFFSET); + skb_put(skb, len); + return skb; +} + +static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, + struct sk_buff *skb, u16 cp_cons, + u32 agg_bufs) +{ + struct pci_dev *pdev = bp->pdev; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + u16 prod = rxr->rx_agg_prod; + u32 i; + + for (i = 0; i < agg_bufs; i++) { + u16 cons, frag_len; + struct rx_agg_cmp *agg; + struct bnxt_sw_rx_agg_bd *cons_rx_buf; + struct page *page; + dma_addr_t mapping; + + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + cons = agg->rx_agg_cmp_opaque; + frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & + RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; + + cons_rx_buf = &rxr->rx_agg_ring[cons]; + skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len); + __clear_bit(cons, rxr->rx_agg_bmap); + + /* It is possible for bnxt_alloc_rx_page() to allocate + * a sw_prod index that equals the cons index, so we + * need to clear the cons entry now. + */ + mapping = dma_unmap_addr(cons_rx_buf, mapping); + page = cons_rx_buf->page; + cons_rx_buf->page = NULL; + + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { + struct skb_shared_info *shinfo; + unsigned int nr_frags; + + shinfo = skb_shinfo(skb); + nr_frags = --shinfo->nr_frags; + __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); + + dev_kfree_skb(skb); + + cons_rx_buf->page = page; + + /* Update prod since possibly some pages have been + * allocated already. + */ + rxr->rx_agg_prod = prod; + bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i); + return NULL; + } + + dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + + skb->data_len += frag_len; + skb->len += frag_len; + skb->truesize += PAGE_SIZE; + + prod = NEXT_RX_AGG(prod); + cp_cons = NEXT_CMP(cp_cons); + } + rxr->rx_agg_prod = prod; + return skb; +} + +static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u8 agg_bufs, u32 *raw_cons) +{ + u16 last; + struct rx_agg_cmp *agg; + + *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); + last = RING_CMP(*raw_cons); + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; + return RX_AGG_CMP_VALID(agg, *raw_cons); +} + +static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, + unsigned int len, + dma_addr_t mapping) +{ + struct bnxt *bp = bnapi->bp; + struct pci_dev *pdev = bp->pdev; + struct sk_buff *skb; + + skb = napi_alloc_skb(&bnapi->napi, len); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&pdev->dev, mapping, + bp->rx_copy_thresh, PCI_DMA_FROMDEVICE); + + memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET); + + dma_sync_single_for_device(&pdev->dev, mapping, + bp->rx_copy_thresh, + PCI_DMA_FROMDEVICE); + + skb_put(skb, len); + return skb; +} + +static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + struct rx_tpa_start_cmp *tpa_start, + struct rx_tpa_start_cmp_ext *tpa_start1) +{ + u8 agg_id = TPA_START_AGG_ID(tpa_start); + u16 cons, prod; + struct bnxt_tpa_info *tpa_info; + struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; + struct rx_bd *prod_bd; + dma_addr_t mapping; + + cons = tpa_start->rx_tpa_start_cmp_opaque; + prod = rxr->rx_prod; + cons_rx_buf = &rxr->rx_buf_ring[cons]; + prod_rx_buf = &rxr->rx_buf_ring[prod]; + tpa_info = &rxr->rx_tpa[agg_id]; + + prod_rx_buf->data = tpa_info->data; + + mapping = tpa_info->mapping; + dma_unmap_addr_set(prod_rx_buf, mapping, mapping); + + prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + + prod_bd->rx_bd_haddr = cpu_to_le64(mapping); + + tpa_info->data = cons_rx_buf->data; + cons_rx_buf->data = NULL; + tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping); + + tpa_info->len = + le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> + RX_TPA_START_CMP_LEN_SHIFT; + if (likely(TPA_START_HASH_VALID(tpa_start))) { + u32 hash_type = TPA_START_HASH_TYPE(tpa_start); + + tpa_info->hash_type = PKT_HASH_TYPE_L4; + tpa_info->gso_type = SKB_GSO_TCPV4; + /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ + if (hash_type == 3) + tpa_info->gso_type = SKB_GSO_TCPV6; + tpa_info->rss_hash = + le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); + } else { + tpa_info->hash_type = PKT_HASH_TYPE_NONE; + tpa_info->gso_type = 0; + if (netif_msg_rx_err(bp)) + netdev_warn(bp->dev, "TPA packet without valid hash\n"); + } + tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); + tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); + + rxr->rx_prod = NEXT_RX(prod); + cons = NEXT_RX(cons); + cons_rx_buf = &rxr->rx_buf_ring[cons]; + + bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); + rxr->rx_prod = NEXT_RX(rxr->rx_prod); + cons_rx_buf->data = NULL; +} + +static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, + u16 cp_cons, u32 agg_bufs) +{ + if (agg_bufs) + bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); +} + +#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) +#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) + +static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info, + struct rx_tpa_end_cmp *tpa_end, + struct rx_tpa_end_cmp_ext *tpa_end1, + struct sk_buff *skb) +{ + struct tcphdr *th; + int payload_off, tcp_opt_len = 0; + int len, nw_off; + + NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end); + skb_shinfo(skb)->gso_size = + le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); + skb_shinfo(skb)->gso_type = tpa_info->gso_type; + payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & + RX_TPA_END_CMP_PAYLOAD_OFFSET) >> + RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; + if (TPA_END_GRO_TS(tpa_end)) + tcp_opt_len = 12; + +#ifdef CONFIG_INET + if (tpa_info->gso_type == SKB_GSO_TCPV4) { + struct iphdr *iph; + + nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - + ETH_HLEN; + skb_set_network_header(skb, nw_off); + iph = ip_hdr(skb); + skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); + } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { + struct ipv6hdr *iph; + + nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - + ETH_HLEN; + skb_set_network_header(skb, nw_off); + iph = ipv6_hdr(skb); + skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); + } else { + dev_kfree_skb_any(skb); + return NULL; + } + tcp_gro_complete(skb); + + if (nw_off) { /* tunnel */ + struct udphdr *uh = NULL; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + if (iph->protocol == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + + if (iph->nexthdr == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } + if (uh) { + if (uh->check) + skb_shinfo(skb)->gso_type |= + SKB_GSO_UDP_TUNNEL_CSUM; + else + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; + } + } +#endif + return skb; +} + +static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, + struct bnxt_napi *bnapi, + u32 *raw_cons, + struct rx_tpa_end_cmp *tpa_end, + struct rx_tpa_end_cmp_ext *tpa_end1, + bool *agg_event) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + u8 agg_id = TPA_END_AGG_ID(tpa_end); + u8 *data, agg_bufs; + u16 cp_cons = RING_CMP(*raw_cons); + unsigned int len; + struct bnxt_tpa_info *tpa_info; + dma_addr_t mapping; + struct sk_buff *skb; + + tpa_info = &rxr->rx_tpa[agg_id]; + data = tpa_info->data; + prefetch(data); + len = tpa_info->len; + mapping = tpa_info->mapping; + + agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & + RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; + + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) + return ERR_PTR(-EBUSY); + + *agg_event = true; + cp_cons = NEXT_CMP(cp_cons); + } + + if (unlikely(agg_bufs > MAX_SKB_FRAGS)) { + bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", + agg_bufs, (int)MAX_SKB_FRAGS); + return NULL; + } + + if (len <= bp->rx_copy_thresh) { + skb = bnxt_copy_skb(bnapi, data, len, mapping); + if (!skb) { + bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + return NULL; + } + } else { + u8 *new_data; + dma_addr_t new_mapping; + + new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); + if (!new_data) { + bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + return NULL; + } + + tpa_info->data = new_data; + tpa_info->mapping = new_mapping; + + skb = build_skb(data, 0); + dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + + if (!skb) { + kfree(data); + bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + return NULL; + } + skb_reserve(skb, BNXT_RX_OFFSET); + skb_put(skb, len); + } + + if (agg_bufs) { + skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); + if (!skb) { + /* Page reuse already handled by bnxt_rx_pages(). */ + return NULL; + } + } + skb->protocol = eth_type_trans(skb, bp->dev); + + if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) + skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); + + if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { + netdev_features_t features = skb->dev->features; + u16 vlan_proto = tpa_info->metadata >> + RX_CMP_FLAGS2_METADATA_TPID_SFT; + + if (((features & NETIF_F_HW_VLAN_CTAG_RX) && + vlan_proto == ETH_P_8021Q) || + ((features & NETIF_F_HW_VLAN_STAG_RX) && + vlan_proto == ETH_P_8021AD)) { + __vlan_hwaccel_put_tag(skb, htons(vlan_proto), + tpa_info->metadata & + RX_CMP_FLAGS2_METADATA_VID_MASK); + } + } + + skb_checksum_none_assert(skb); + if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = + (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; + } + + if (TPA_END_GRO(tpa_end)) + skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb); + + return skb; +} + +/* returns the following: + * 1 - 1 packet successfully received + * 0 - successful TPA_START, packet not completed yet + * -EBUSY - completion ring does not have all the agg buffers yet + * -ENOMEM - packet aborted due to out of memory + * -EIO - packet aborted due to hw error indicated in BD + */ +static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, + bool *agg_event) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct net_device *dev = bp->dev; + struct rx_cmp *rxcmp; + struct rx_cmp_ext *rxcmp1; + u32 tmp_raw_cons = *raw_cons; + u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); + struct bnxt_sw_rx_bd *rx_buf; + unsigned int len; + u8 *data, agg_bufs, cmp_type; + dma_addr_t dma_addr; + struct sk_buff *skb; + int rc = 0; + + rxcmp = (struct rx_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); + cp_cons = RING_CMP(tmp_raw_cons); + rxcmp1 = (struct rx_cmp_ext *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) + return -EBUSY; + + cmp_type = RX_CMP_TYPE(rxcmp); + + prod = rxr->rx_prod; + + if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { + bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, + (struct rx_tpa_start_cmp_ext *)rxcmp1); + + goto next_rx_no_prod; + + } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { + skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, + (struct rx_tpa_end_cmp *)rxcmp, + (struct rx_tpa_end_cmp_ext *)rxcmp1, + agg_event); + + if (unlikely(IS_ERR(skb))) + return -EBUSY; + + rc = -ENOMEM; + if (likely(skb)) { + skb_record_rx_queue(skb, bnapi->index); + skb_mark_napi_id(skb, &bnapi->napi); + if (bnxt_busy_polling(bnapi)) + netif_receive_skb(skb); + else + napi_gro_receive(&bnapi->napi, skb); + rc = 1; + } + goto next_rx_no_prod; + } + + cons = rxcmp->rx_cmp_opaque; + rx_buf = &rxr->rx_buf_ring[cons]; + data = rx_buf->data; + prefetch(data); + + agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> + RX_CMP_AGG_BUFS_SHIFT; + + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) + return -EBUSY; + + cp_cons = NEXT_CMP(cp_cons); + *agg_event = true; + } + + rx_buf->data = NULL; + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { + bnxt_reuse_rx_data(rxr, cons, data); + if (agg_bufs) + bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); + + rc = -EIO; + goto next_rx; + } + + len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; + dma_addr = dma_unmap_addr(rx_buf, mapping); + + if (len <= bp->rx_copy_thresh) { + skb = bnxt_copy_skb(bnapi, data, len, dma_addr); + bnxt_reuse_rx_data(rxr, cons, data); + if (!skb) { + rc = -ENOMEM; + goto next_rx; + } + } else { + skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len); + if (!skb) { + rc = -ENOMEM; + goto next_rx; + } + } + + if (agg_bufs) { + skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); + if (!skb) { + rc = -ENOMEM; + goto next_rx; + } + } + + if (RX_CMP_HASH_VALID(rxcmp)) { + u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); + enum pkt_hash_types type = PKT_HASH_TYPE_L4; + + /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ + if (hash_type != 1 && hash_type != 3) + type = PKT_HASH_TYPE_L3; + skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); + } + + skb->protocol = eth_type_trans(skb, dev); + + if (rxcmp1->rx_cmp_flags2 & + cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) { + netdev_features_t features = skb->dev->features; + u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); + u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; + + if (((features & NETIF_F_HW_VLAN_CTAG_RX) && + vlan_proto == ETH_P_8021Q) || + ((features & NETIF_F_HW_VLAN_STAG_RX) && + vlan_proto == ETH_P_8021AD)) + __vlan_hwaccel_put_tag(skb, htons(vlan_proto), + meta_data & + RX_CMP_FLAGS2_METADATA_VID_MASK); + } + + skb_checksum_none_assert(skb); + if (RX_CMP_L4_CS_OK(rxcmp1)) { + if (dev->features & NETIF_F_RXCSUM) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = RX_CMP_ENCAP(rxcmp1); + } + } else { + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) + cpr->rx_l4_csum_errors++; + } + + skb_record_rx_queue(skb, bnapi->index); + skb_mark_napi_id(skb, &bnapi->napi); + if (bnxt_busy_polling(bnapi)) + netif_receive_skb(skb); + else + napi_gro_receive(&bnapi->napi, skb); + rc = 1; + +next_rx: + rxr->rx_prod = NEXT_RX(prod); + +next_rx_no_prod: + *raw_cons = tmp_raw_cons; + + return rc; +} + +static int bnxt_async_event_process(struct bnxt *bp, + struct hwrm_async_event_cmpl *cmpl) +{ + u16 event_id = le16_to_cpu(cmpl->event_id); + + /* TODO CHIMP_FW: Define event id's for link change, error etc */ + switch (event_id) { + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: + set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + break; + default: + netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", + event_id); + break; + } + return 0; +} + +static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) +{ + u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; + struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; + struct hwrm_fwd_req_cmpl *fwd_req_cmpl = + (struct hwrm_fwd_req_cmpl *)txcmp; + + switch (cmpl_type) { + case CMPL_BASE_TYPE_HWRM_DONE: + seq_id = le16_to_cpu(h_cmpl->sequence_id); + if (seq_id == bp->hwrm_intr_seq_id) + bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; + else + netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); + break; + + case CMPL_BASE_TYPE_HWRM_FWD_REQ: + vf_id = le16_to_cpu(fwd_req_cmpl->source_id); + + if ((vf_id < bp->pf.first_vf_id) || + (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { + netdev_err(bp->dev, "Msg contains invalid VF id %x\n", + vf_id); + return -EINVAL; + } + + set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); + set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + break; + + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + bnxt_async_event_process(bp, + (struct hwrm_async_event_cmpl *)txcmp); + + default: + break; + } + + return 0; +} + +static irqreturn_t bnxt_msix(int irq, void *dev_instance) +{ + struct bnxt_napi *bnapi = dev_instance; + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 cons = RING_CMP(cpr->cp_raw_cons); + + prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); + napi_schedule(&bnapi->napi); + return IRQ_HANDLED; +} + +static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +{ + u32 raw_cons = cpr->cp_raw_cons; + u16 cons = RING_CMP(raw_cons); + struct tx_cmp *txcmp; + + txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + return TX_CMP_VALID(txcmp, raw_cons); +} + +#define CAG_LEGACY_INT_STATUS 0x2014 + +static irqreturn_t bnxt_inta(int irq, void *dev_instance) +{ + struct bnxt_napi *bnapi = dev_instance; + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 cons = RING_CMP(cpr->cp_raw_cons); + u32 int_status; + + prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); + + if (!bnxt_has_work(bp, cpr)) { + int_status = readl(bp->bar0 + CAG_LEGACY_INT_STATUS); + /* return if erroneous interrupt */ + if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) + return IRQ_NONE; + } + + /* disable ring IRQ */ + BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell); + + /* Return here if interrupt is shared and is disabled. */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) + return IRQ_HANDLED; + + napi_schedule(&bnapi->napi); + return IRQ_HANDLED; +} + +static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 raw_cons = cpr->cp_raw_cons; + u32 cons; + int tx_pkts = 0; + int rx_pkts = 0; + bool rx_event = false; + bool agg_event = false; + struct tx_cmp *txcmp; + + while (1) { + int rc; + + cons = RING_CMP(raw_cons); + txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + if (!TX_CMP_VALID(txcmp, raw_cons)) + break; + + if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { + tx_pkts++; + /* return full budget so NAPI will complete. */ + if (unlikely(tx_pkts > bp->tx_wake_thresh)) + rx_pkts = budget; + } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); + if (likely(rc >= 0)) + rx_pkts += rc; + else if (rc == -EBUSY) /* partial completion */ + break; + rx_event = true; + } else if (unlikely((TX_CMP_TYPE(txcmp) == + CMPL_BASE_TYPE_HWRM_DONE) || + (TX_CMP_TYPE(txcmp) == + CMPL_BASE_TYPE_HWRM_FWD_REQ) || + (TX_CMP_TYPE(txcmp) == + CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { + bnxt_hwrm_handler(bp, txcmp); + } + raw_cons = NEXT_RAW_CMP(raw_cons); + + if (rx_pkts == budget) + break; + } + + cpr->cp_raw_cons = raw_cons; + /* ACK completion ring before freeing tx ring and producing new + * buffers in rx/agg rings to prevent overflowing the completion + * ring. + */ + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + + if (tx_pkts) + bnxt_tx_int(bp, bnapi, tx_pkts); + + if (rx_event) { + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + if (agg_event) { + writel(DB_KEY_RX | rxr->rx_agg_prod, + rxr->rx_agg_doorbell); + writel(DB_KEY_RX | rxr->rx_agg_prod, + rxr->rx_agg_doorbell); + } + } + return rx_pkts; +} + +static int bnxt_poll(struct napi_struct *napi, int budget) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int work_done = 0; + + if (!bnxt_lock_napi(bnapi)) + return budget; + + while (1) { + work_done += bnxt_poll_work(bp, bnapi, budget - work_done); + + if (work_done >= budget) + break; + + if (!bnxt_has_work(bp, cpr)) { + napi_complete(napi); + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + break; + } + } + mmiowb(); + bnxt_unlock_napi(bnapi); + return work_done; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +static int bnxt_busy_poll(struct napi_struct *napi) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int rx_work, budget = 4; + + if (atomic_read(&bp->intr_sem) != 0) + return LL_FLUSH_FAILED; + + if (!bnxt_lock_poll(bnapi)) + return LL_FLUSH_BUSY; + + rx_work = bnxt_poll_work(bp, bnapi, budget); + + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + + bnxt_unlock_poll(bnapi); + return rx_work; +} +#endif + +static void bnxt_free_tx_skbs(struct bnxt *bp) +{ + int i, max_idx; + struct pci_dev *pdev = bp->pdev; + + if (!bp->bnapi) + return; + + max_idx = bp->tx_nr_pages * TX_DESC_CNT; + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr; + int j; + + if (!bnapi) + continue; + + txr = &bnapi->tx_ring; + for (j = 0; j < max_idx;) { + struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; + struct sk_buff *skb = tx_buf->skb; + int k, last; + + if (!skb) { + j++; + continue; + } + + tx_buf->skb = NULL; + + if (tx_buf->is_push) { + dev_kfree_skb(skb); + j += 2; + continue; + } + + dma_unmap_single(&pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + last = tx_buf->nr_frags; + j += 2; + for (k = 0; k < last; k++, j = NEXT_TX(j)) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; + + tx_buf = &txr->tx_buf_ring[j]; + dma_unmap_page( + &pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_frag_size(frag), PCI_DMA_TODEVICE); + } + dev_kfree_skb(skb); + } + netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); + } +} + +static void bnxt_free_rx_skbs(struct bnxt *bp) +{ + int i, max_idx, max_agg_idx; + struct pci_dev *pdev = bp->pdev; + + if (!bp->bnapi) + return; + + max_idx = bp->rx_nr_pages * RX_DESC_CNT; + max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr; + int j; + + if (!bnapi) + continue; + + rxr = &bnapi->rx_ring; + + if (rxr->rx_tpa) { + for (j = 0; j < MAX_TPA; j++) { + struct bnxt_tpa_info *tpa_info = + &rxr->rx_tpa[j]; + u8 *data = tpa_info->data; + + if (!data) + continue; + + dma_unmap_single( + &pdev->dev, + dma_unmap_addr(tpa_info, mapping), + bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + + tpa_info->data = NULL; + + kfree(data); + } + } + + for (j = 0; j < max_idx; j++) { + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; + u8 *data = rx_buf->data; + + if (!data) + continue; + + dma_unmap_single(&pdev->dev, + dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + + rx_buf->data = NULL; + + kfree(data); + } + + for (j = 0; j < max_agg_idx; j++) { + struct bnxt_sw_rx_agg_bd *rx_agg_buf = + &rxr->rx_agg_ring[j]; + struct page *page = rx_agg_buf->page; + + if (!page) + continue; + + dma_unmap_page(&pdev->dev, + dma_unmap_addr(rx_agg_buf, mapping), + PAGE_SIZE, PCI_DMA_FROMDEVICE); + + rx_agg_buf->page = NULL; + __clear_bit(j, rxr->rx_agg_bmap); + + __free_page(page); + } + } +} + +static void bnxt_free_skbs(struct bnxt *bp) +{ + bnxt_free_tx_skbs(bp); + bnxt_free_rx_skbs(bp); +} + +static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) +{ + struct pci_dev *pdev = bp->pdev; + int i; + + for (i = 0; i < ring->nr_pages; i++) { + if (!ring->pg_arr[i]) + continue; + + dma_free_coherent(&pdev->dev, ring->page_size, + ring->pg_arr[i], ring->dma_arr[i]); + + ring->pg_arr[i] = NULL; + } + if (ring->pg_tbl) { + dma_free_coherent(&pdev->dev, ring->nr_pages * 8, + ring->pg_tbl, ring->pg_tbl_map); + ring->pg_tbl = NULL; + } + if (ring->vmem_size && *ring->vmem) { + vfree(*ring->vmem); + *ring->vmem = NULL; + } +} + +static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) +{ + int i; + struct pci_dev *pdev = bp->pdev; + + if (ring->nr_pages > 1) { + ring->pg_tbl = dma_alloc_coherent(&pdev->dev, + ring->nr_pages * 8, + &ring->pg_tbl_map, + GFP_KERNEL); + if (!ring->pg_tbl) + return -ENOMEM; + } + + for (i = 0; i < ring->nr_pages; i++) { + ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, + ring->page_size, + &ring->dma_arr[i], + GFP_KERNEL); + if (!ring->pg_arr[i]) + return -ENOMEM; + + if (ring->nr_pages > 1) + ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); + } + + if (ring->vmem_size) { + *ring->vmem = vzalloc(ring->vmem_size); + if (!(*ring->vmem)) + return -ENOMEM; + } + return 0; +} + +static void bnxt_free_rx_rings(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + rxr = &bnapi->rx_ring; + + kfree(rxr->rx_tpa); + rxr->rx_tpa = NULL; + + kfree(rxr->rx_agg_bmap); + rxr->rx_agg_bmap = NULL; + + ring = &rxr->rx_ring_struct; + bnxt_free_ring(bp, ring); + + ring = &rxr->rx_agg_ring_struct; + bnxt_free_ring(bp, ring); + } +} + +static int bnxt_alloc_rx_rings(struct bnxt *bp) +{ + int i, rc, agg_rings = 0, tpa_rings = 0; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) + agg_rings = 1; + + if (bp->flags & BNXT_FLAG_TPA) + tpa_rings = 1; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + rxr = &bnapi->rx_ring; + ring = &rxr->rx_ring_struct; + + rc = bnxt_alloc_ring(bp, ring); + if (rc) + return rc; + + if (agg_rings) { + u16 mem_size; + + ring = &rxr->rx_agg_ring_struct; + rc = bnxt_alloc_ring(bp, ring); + if (rc) + return rc; + + rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; + mem_size = rxr->rx_agg_bmap_size / 8; + rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); + if (!rxr->rx_agg_bmap) + return -ENOMEM; + + if (tpa_rings) { + rxr->rx_tpa = kcalloc(MAX_TPA, + sizeof(struct bnxt_tpa_info), + GFP_KERNEL); + if (!rxr->rx_tpa) + return -ENOMEM; + } + } + } + return 0; +} + +static void bnxt_free_tx_rings(struct bnxt *bp) +{ + int i; + struct pci_dev *pdev = bp->pdev; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + txr = &bnapi->tx_ring; + + if (txr->tx_push) { + dma_free_coherent(&pdev->dev, bp->tx_push_size, + txr->tx_push, txr->tx_push_mapping); + txr->tx_push = NULL; + } + + ring = &txr->tx_ring_struct; + + bnxt_free_ring(bp, ring); + } +} + +static int bnxt_alloc_tx_rings(struct bnxt *bp) +{ + int i, j, rc; + struct pci_dev *pdev = bp->pdev; + + bp->tx_push_size = 0; + if (bp->tx_push_thresh) { + int push_size; + + push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + + bp->tx_push_thresh); + + if (push_size > 128) { + push_size = 0; + bp->tx_push_thresh = 0; + } + + bp->tx_push_size = push_size; + } + + for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + txr = &bnapi->tx_ring; + ring = &txr->tx_ring_struct; + + rc = bnxt_alloc_ring(bp, ring); + if (rc) + return rc; + + if (bp->tx_push_size) { + struct tx_bd *txbd; + dma_addr_t mapping; + + /* One pre-allocated DMA buffer to backup + * TX push operation + */ + txr->tx_push = dma_alloc_coherent(&pdev->dev, + bp->tx_push_size, + &txr->tx_push_mapping, + GFP_KERNEL); + + if (!txr->tx_push) + return -ENOMEM; + + txbd = &txr->tx_push->txbd1; + + mapping = txr->tx_push_mapping + + sizeof(struct tx_push_bd); + txbd->tx_bd_haddr = cpu_to_le64(mapping); + + memset(txbd + 1, 0, sizeof(struct tx_bd_ext)); + } + ring->queue_id = bp->q_info[j].queue_id; + if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) + j++; + } + return 0; +} + +static void bnxt_free_cp_rings(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + ring = &cpr->cp_ring_struct; + + bnxt_free_ring(bp, ring); + } +} + +static int bnxt_alloc_cp_rings(struct bnxt *bp) +{ + int i, rc; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + ring = &cpr->cp_ring_struct; + + rc = bnxt_alloc_ring(bp, ring); + if (rc) + return rc; + } + return 0; +} + +static void bnxt_init_ring_struct(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_tx_ring_info *txr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + ring = &cpr->cp_ring_struct; + ring->nr_pages = bp->cp_nr_pages; + ring->page_size = HW_CMPD_RING_SIZE; + ring->pg_arr = (void **)cpr->cp_desc_ring; + ring->dma_arr = cpr->cp_desc_mapping; + ring->vmem_size = 0; + + rxr = &bnapi->rx_ring; + ring = &rxr->rx_ring_struct; + ring->nr_pages = bp->rx_nr_pages; + ring->page_size = HW_RXBD_RING_SIZE; + ring->pg_arr = (void **)rxr->rx_desc_ring; + ring->dma_arr = rxr->rx_desc_mapping; + ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; + ring->vmem = (void **)&rxr->rx_buf_ring; + + ring = &rxr->rx_agg_ring_struct; + ring->nr_pages = bp->rx_agg_nr_pages; + ring->page_size = HW_RXBD_RING_SIZE; + ring->pg_arr = (void **)rxr->rx_agg_desc_ring; + ring->dma_arr = rxr->rx_agg_desc_mapping; + ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; + ring->vmem = (void **)&rxr->rx_agg_ring; + + txr = &bnapi->tx_ring; + ring = &txr->tx_ring_struct; + ring->nr_pages = bp->tx_nr_pages; + ring->page_size = HW_RXBD_RING_SIZE; + ring->pg_arr = (void **)txr->tx_desc_ring; + ring->dma_arr = txr->tx_desc_mapping; + ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; + ring->vmem = (void **)&txr->tx_buf_ring; + } +} + +static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) +{ + int i; + u32 prod; + struct rx_bd **rx_buf_ring; + + rx_buf_ring = (struct rx_bd **)ring->pg_arr; + for (i = 0, prod = 0; i < ring->nr_pages; i++) { + int j; + struct rx_bd *rxbd; + + rxbd = rx_buf_ring[i]; + if (!rxbd) + continue; + + for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { + rxbd->rx_bd_len_flags_type = cpu_to_le32(type); + rxbd->rx_bd_opaque = prod; + } + } +} + +static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct net_device *dev = bp->dev; + struct bnxt_napi *bnapi = bp->bnapi[ring_nr]; + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring_struct *ring; + u32 prod, type; + int i; + + if (!bnapi) + return -EINVAL; + + type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; + + if (NET_IP_ALIGN == 2) + type |= RX_BD_FLAGS_SOP; + + rxr = &bnapi->rx_ring; + ring = &rxr->rx_ring_struct; + bnxt_init_rxbd_pages(ring, type); + + prod = rxr->rx_prod; + for (i = 0; i < bp->rx_ring_size; i++) { + if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { + netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", + ring_nr, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX(prod); + } + rxr->rx_prod = prod; + ring->fw_ring_id = INVALID_HW_RING_ID; + + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) + return 0; + + ring = &rxr->rx_agg_ring_struct; + + type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; + + bnxt_init_rxbd_pages(ring, type); + + prod = rxr->rx_agg_prod; + for (i = 0; i < bp->rx_agg_ring_size; i++) { + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { + netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", + ring_nr, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX_AGG(prod); + } + rxr->rx_agg_prod = prod; + ring->fw_ring_id = INVALID_HW_RING_ID; + + if (bp->flags & BNXT_FLAG_TPA) { + if (rxr->rx_tpa) { + u8 *data; + dma_addr_t mapping; + + for (i = 0; i < MAX_TPA; i++) { + data = __bnxt_alloc_rx_data(bp, &mapping, + GFP_KERNEL); + if (!data) + return -ENOMEM; + + rxr->rx_tpa[i].data = data; + rxr->rx_tpa[i].mapping = mapping; + } + } else { + netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); + return -ENOMEM; + } + } + + return 0; +} + +static int bnxt_init_rx_rings(struct bnxt *bp) +{ + int i, rc = 0; + + for (i = 0; i < bp->rx_nr_rings; i++) { + rc = bnxt_init_one_rx_ring(bp, i); + if (rc) + break; + } + + return rc; +} + +static int bnxt_init_tx_rings(struct bnxt *bp) +{ + u16 i; + + bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, + MAX_SKB_FRAGS + 1); + + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + + ring->fw_ring_id = INVALID_HW_RING_ID; + } + + return 0; +} + +static void bnxt_free_ring_grps(struct bnxt *bp) +{ + kfree(bp->grp_info); + bp->grp_info = NULL; +} + +static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) +{ + int i; + + if (irq_re_init) { + bp->grp_info = kcalloc(bp->cp_nr_rings, + sizeof(struct bnxt_ring_grp_info), + GFP_KERNEL); + if (!bp->grp_info) + return -ENOMEM; + } + for (i = 0; i < bp->cp_nr_rings; i++) { + if (irq_re_init) + bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; + bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; + bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; + } + return 0; +} + +static void bnxt_free_vnics(struct bnxt *bp) +{ + kfree(bp->vnic_info); + bp->vnic_info = NULL; + bp->nr_vnics = 0; +} + +static int bnxt_alloc_vnics(struct bnxt *bp) +{ + int num_vnics = 1; + +#ifdef CONFIG_RFS_ACCEL + if (bp->flags & BNXT_FLAG_RFS) + num_vnics += bp->rx_nr_rings; +#endif + + bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), + GFP_KERNEL); + if (!bp->vnic_info) + return -ENOMEM; + + bp->nr_vnics = num_vnics; + return 0; +} + +static void bnxt_init_vnics(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + vnic->fw_vnic_id = INVALID_HW_RING_ID; + vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; + + if (bp->vnic_info[i].rss_hash_key) { + if (i == 0) + prandom_bytes(vnic->rss_hash_key, + HW_HASH_KEY_SIZE); + else + memcpy(vnic->rss_hash_key, + bp->vnic_info[0].rss_hash_key, + HW_HASH_KEY_SIZE); + } + } +} + +static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) +{ + int pages; + + pages = ring_size / desc_per_pg; + + if (!pages) + return 1; + + pages++; + + while (pages & (pages - 1)) + pages++; + + return pages; +} + +static void bnxt_set_tpa_flags(struct bnxt *bp) +{ + bp->flags &= ~BNXT_FLAG_TPA; + if (bp->dev->features & NETIF_F_LRO) + bp->flags |= BNXT_FLAG_LRO; + if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0)) + bp->flags |= BNXT_FLAG_GRO; +} + +/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must + * be set on entry. + */ +void bnxt_set_ring_params(struct bnxt *bp) +{ + u32 ring_size, rx_size, rx_space; + u32 agg_factor = 0, agg_ring_size = 0; + + /* 8 for CRC and VLAN */ + rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); + + rx_space = rx_size + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; + ring_size = bp->rx_ring_size; + bp->rx_agg_ring_size = 0; + bp->rx_agg_nr_pages = 0; + + if (bp->flags & BNXT_FLAG_TPA) + agg_factor = 4; + + bp->flags &= ~BNXT_FLAG_JUMBO; + if (rx_space > PAGE_SIZE) { + u32 jumbo_factor; + + bp->flags |= BNXT_FLAG_JUMBO; + jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; + if (jumbo_factor > agg_factor) + agg_factor = jumbo_factor; + } + agg_ring_size = ring_size * agg_factor; + + if (agg_ring_size) { + bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, + RX_DESC_CNT); + if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { + u32 tmp = agg_ring_size; + + bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; + agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; + netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", + tmp, agg_ring_size); + } + bp->rx_agg_ring_size = agg_ring_size; + bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; + rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); + rx_space = rx_size + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + } + + bp->rx_buf_use_size = rx_size; + bp->rx_buf_size = rx_space; + + bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); + bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; + + ring_size = bp->tx_ring_size; + bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); + bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; + + ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; + bp->cp_ring_size = ring_size; + + bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); + if (bp->cp_nr_pages > MAX_CP_PAGES) { + bp->cp_nr_pages = MAX_CP_PAGES; + bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; + netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", + ring_size, bp->cp_ring_size); + } + bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; + bp->cp_ring_mask = bp->cp_bit - 1; +} + +static void bnxt_free_vnic_attributes(struct bnxt *bp) +{ + int i; + struct bnxt_vnic_info *vnic; + struct pci_dev *pdev = bp->pdev; + + if (!bp->vnic_info) + return; + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + + kfree(vnic->fw_grp_ids); + vnic->fw_grp_ids = NULL; + + kfree(vnic->uc_list); + vnic->uc_list = NULL; + + if (vnic->mc_list) { + dma_free_coherent(&pdev->dev, vnic->mc_list_size, + vnic->mc_list, vnic->mc_list_mapping); + vnic->mc_list = NULL; + } + + if (vnic->rss_table) { + dma_free_coherent(&pdev->dev, PAGE_SIZE, + vnic->rss_table, + vnic->rss_table_dma_addr); + vnic->rss_table = NULL; + } + + vnic->rss_hash_key = NULL; + vnic->flags = 0; + } +} + +static int bnxt_alloc_vnic_attributes(struct bnxt *bp) +{ + int i, rc = 0, size; + struct bnxt_vnic_info *vnic; + struct pci_dev *pdev = bp->pdev; + int max_rings; + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + + if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { + int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; + + if (mem_size > 0) { + vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); + if (!vnic->uc_list) { + rc = -ENOMEM; + goto out; + } + } + } + + if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { + vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; + vnic->mc_list = + dma_alloc_coherent(&pdev->dev, + vnic->mc_list_size, + &vnic->mc_list_mapping, + GFP_KERNEL); + if (!vnic->mc_list) { + rc = -ENOMEM; + goto out; + } + } + + if (vnic->flags & BNXT_VNIC_RSS_FLAG) + max_rings = bp->rx_nr_rings; + else + max_rings = 1; + + vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); + if (!vnic->fw_grp_ids) { + rc = -ENOMEM; + goto out; + } + + /* Allocate rss table and hash key */ + vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &vnic->rss_table_dma_addr, + GFP_KERNEL); + if (!vnic->rss_table) { + rc = -ENOMEM; + goto out; + } + + size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); + + vnic->rss_hash_key = ((void *)vnic->rss_table) + size; + vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; + } + return 0; + +out: + return rc; +} + +static void bnxt_free_hwrm_resources(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + + dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, + bp->hwrm_cmd_resp_dma_addr); + + bp->hwrm_cmd_resp_addr = NULL; + if (bp->hwrm_dbg_resp_addr) { + dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, + bp->hwrm_dbg_resp_addr, + bp->hwrm_dbg_resp_dma_addr); + + bp->hwrm_dbg_resp_addr = NULL; + } +} + +static int bnxt_alloc_hwrm_resources(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + + bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &bp->hwrm_cmd_resp_dma_addr, + GFP_KERNEL); + if (!bp->hwrm_cmd_resp_addr) + return -ENOMEM; + bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, + HWRM_DBG_REG_BUF_SIZE, + &bp->hwrm_dbg_resp_dma_addr, + GFP_KERNEL); + if (!bp->hwrm_dbg_resp_addr) + netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); + + return 0; +} + +static void bnxt_free_stats(struct bnxt *bp) +{ + u32 size, i; + struct pci_dev *pdev = bp->pdev; + + if (!bp->bnapi) + return; + + size = sizeof(struct ctx_hw_stats); + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + if (cpr->hw_stats) { + dma_free_coherent(&pdev->dev, size, cpr->hw_stats, + cpr->hw_stats_map); + cpr->hw_stats = NULL; + } + } +} + +static int bnxt_alloc_stats(struct bnxt *bp) +{ + u32 size, i; + struct pci_dev *pdev = bp->pdev; + + size = sizeof(struct ctx_hw_stats); + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, + &cpr->hw_stats_map, + GFP_KERNEL); + if (!cpr->hw_stats) + return -ENOMEM; + + cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; + } + return 0; +} + +static void bnxt_clear_ring_indices(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_tx_ring_info *txr; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + cpr->cp_raw_cons = 0; + + txr = &bnapi->tx_ring; + txr->tx_prod = 0; + txr->tx_cons = 0; + + rxr = &bnapi->rx_ring; + rxr->rx_prod = 0; + rxr->rx_agg_prod = 0; + rxr->rx_sw_agg_prod = 0; + } +} + +static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) +{ +#ifdef CONFIG_RFS_ACCEL + int i; + + /* Under rtnl_lock and all our NAPIs have been disabled. It's + * safe to delete the hash table. + */ + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct hlist_node *tmp; + struct bnxt_ntuple_filter *fltr; + + head = &bp->ntp_fltr_hash_tbl[i]; + hlist_for_each_entry_safe(fltr, tmp, head, hash) { + hlist_del(&fltr->hash); + kfree(fltr); + } + } + if (irq_reinit) { + kfree(bp->ntp_fltr_bmap); + bp->ntp_fltr_bmap = NULL; + } + bp->ntp_fltr_count = 0; +#endif +} + +static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) +{ +#ifdef CONFIG_RFS_ACCEL + int i, rc = 0; + + if (!(bp->flags & BNXT_FLAG_RFS)) + return 0; + + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) + INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); + + bp->ntp_fltr_count = 0; + bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), + GFP_KERNEL); + + if (!bp->ntp_fltr_bmap) + rc = -ENOMEM; + + return rc; +#else + return 0; +#endif +} + +static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) +{ + bnxt_free_vnic_attributes(bp); + bnxt_free_tx_rings(bp); + bnxt_free_rx_rings(bp); + bnxt_free_cp_rings(bp); + bnxt_free_ntp_fltrs(bp, irq_re_init); + if (irq_re_init) { + bnxt_free_stats(bp); + bnxt_free_ring_grps(bp); + bnxt_free_vnics(bp); + kfree(bp->bnapi); + bp->bnapi = NULL; + } else { + bnxt_clear_ring_indices(bp); + } +} + +static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) +{ + int i, rc, size, arr_size; + void *bnapi; + + if (irq_re_init) { + /* Allocate bnapi mem pointer array and mem block for + * all queues + */ + arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * + bp->cp_nr_rings); + size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); + bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); + if (!bnapi) + return -ENOMEM; + + bp->bnapi = bnapi; + bnapi += arr_size; + for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { + bp->bnapi[i] = bnapi; + bp->bnapi[i]->index = i; + bp->bnapi[i]->bp = bp; + } + + rc = bnxt_alloc_stats(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_ntp_fltrs(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_vnics(bp); + if (rc) + goto alloc_mem_err; + } + + bnxt_init_ring_struct(bp); + + rc = bnxt_alloc_rx_rings(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_tx_rings(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_cp_rings(bp); + if (rc) + goto alloc_mem_err; + + bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | + BNXT_VNIC_UCAST_FLAG; + rc = bnxt_alloc_vnic_attributes(bp); + if (rc) + goto alloc_mem_err; + return 0; + +alloc_mem_err: + bnxt_free_mem(bp, true); + return rc; +} + +void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, + u16 cmpl_ring, u16 target_id) +{ + struct hwrm_cmd_req_hdr *req = request; + + req->cmpl_ring_req_type = + cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT)); + req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT); + req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); +} + +int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) +{ + int i, intr_process, rc; + struct hwrm_cmd_req_hdr *req = msg; + u32 *data = msg; + __le32 *resp_len, *valid; + u16 cp_ring_id, len = 0; + struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; + + req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++); + memset(resp, 0, PAGE_SIZE); + cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) & + HWRM_CMPL_RING_MASK) >> + HWRM_CMPL_RING_SFT; + intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; + + /* Write request msg to hwrm channel */ + __iowrite32_copy(bp->bar0, data, msg_len / 4); + + /* currently supports only one outstanding message */ + if (intr_process) + bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) & + HWRM_SEQ_ID_MASK; + + /* Ring channel doorbell */ + writel(1, bp->bar0 + 0x100); + + i = 0; + if (intr_process) { + /* Wait until hwrm response cmpl interrupt is processed */ + while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && + i++ < timeout) { + usleep_range(600, 800); + } + + if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { + netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", + req->cmpl_ring_req_type); + return -1; + } + } else { + /* Check if response len is updated */ + resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; + for (i = 0; i < timeout; i++) { + len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> + HWRM_RESP_LEN_SFT; + if (len) + break; + usleep_range(600, 800); + } + + if (i >= timeout) { + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", + timeout, req->cmpl_ring_req_type, + req->target_id_seq_id, *resp_len); + return -1; + } + + /* Last word of resp contains valid bit */ + valid = bp->hwrm_cmd_resp_addr + len - 4; + for (i = 0; i < timeout; i++) { + if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) + break; + usleep_range(600, 800); + } + + if (i >= timeout) { + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", + timeout, req->cmpl_ring_req_type, + req->target_id_seq_id, len, *valid); + return -1; + } + } + + rc = le16_to_cpu(resp->error_code); + if (rc) { + netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", + le16_to_cpu(resp->req_type), + le16_to_cpu(resp->seq_id), rc); + return rc; + } + return 0; +} + +int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) +{ + int rc; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, msg, msg_len, timeout); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) +{ + struct hwrm_func_drv_rgtr_input req = {0}; + int i; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); + + req.enables = + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | + FUNC_DRV_RGTR_REQ_ENABLES_VER | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + + /* TODO: current async event fwd bits are not defined and the firmware + * only checks if it is non-zero to enable async event forwarding + */ + req.async_event_fwd[0] |= cpu_to_le32(1); + req.os_type = cpu_to_le16(1); + req.ver_maj = DRV_VER_MAJ; + req.ver_min = DRV_VER_MIN; + req.ver_upd = DRV_VER_UPD; + + if (BNXT_PF(bp)) { + unsigned long vf_req_snif_bmap[4]; + u32 *data = (u32 *)vf_req_snif_bmap; + + memset(vf_req_snif_bmap, 0, 32); + for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) + __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); + + for (i = 0; i < 8; i++) { + req.vf_req_fwd[i] = cpu_to_le32(*data); + data++; + } + req.enables |= + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); + } + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) +{ + u32 rc = 0; + struct hwrm_tunnel_dst_port_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); + req.tunnel_type = tunnel_type; + + switch (tunnel_type) { + case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: + req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; + break; + case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: + req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; + break; + default: + break; + } + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", + rc); + return rc; +} + +static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, + u8 tunnel_type) +{ + u32 rc = 0; + struct hwrm_tunnel_dst_port_alloc_input req = {0}; + struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); + + req.tunnel_type = tunnel_type; + req.tunnel_dst_port_val = port; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", + rc); + goto err_out; + } + + if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN) + bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; + + else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE) + bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; +err_out: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) +{ + struct hwrm_cfa_l2_set_rx_mask_input req = {0}; + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); + req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id); + + req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + req.mask = cpu_to_le32(vnic->rx_mask); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +#ifdef CONFIG_RFS_ACCEL +static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr) +{ + struct hwrm_cfa_ntuple_filter_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); + req.ntuple_filter_id = fltr->filter_id; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +#define BNXT_NTP_FLTR_FLAGS \ + (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID) + +static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr) +{ + int rc = 0; + struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; + struct hwrm_cfa_ntuple_filter_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + struct flow_keys *keys = &fltr->fkeys; + struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); + req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0]; + + req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); + + req.ethertype = htons(ETH_P_IP); + memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); + req.ipaddr_type = 4; + req.ip_protocol = keys->basic.ip_proto; + + req.src_ipaddr[0] = keys->addrs.v4addrs.src; + req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + + req.src_port = keys->ports.src; + req.src_port_mask = cpu_to_be16(0xffff); + req.dst_port = keys->ports.dst; + req.dst_port_mask = cpu_to_be16(0xffff); + + req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + fltr->filter_id = resp->ntuple_filter_id; + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} +#endif + +static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, + u8 *mac_addr) +{ + u32 rc = 0; + struct hwrm_cfa_l2_filter_alloc_input req = {0}; + struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); + req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX | + CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); + req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); + req.enables = + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); + memcpy(req.l2_addr, mac_addr, ETH_ALEN); + req.l2_addr_mask[0] = 0xff; + req.l2_addr_mask[1] = 0xff; + req.l2_addr_mask[2] = 0xff; + req.l2_addr_mask[3] = 0xff; + req.l2_addr_mask[4] = 0xff; + req.l2_addr_mask[5] = 0xff; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = + resp->l2_filter_id; + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) +{ + u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ + int rc = 0; + + /* Any associated ntuple filters will also be cleared by firmware. */ + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < num_of_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + for (j = 0; j < vnic->uc_filter_count; j++) { + struct hwrm_cfa_l2_filter_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, + HWRM_CFA_L2_FILTER_FREE, -1, -1); + + req.l2_filter_id = vnic->fw_l2_filter_id[j]; + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + } + vnic->uc_filter_count = 0; + } + mutex_unlock(&bp->hwrm_cmd_lock); + + return rc; +} + +static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_tpa_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); + + if (tpa_flags) { + u16 mss = bp->dev->mtu - 40; + u32 nsegs, n, segs = 0, flags; + + flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | + VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | + VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | + VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | + VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; + if (tpa_flags & BNXT_FLAG_GRO) + flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; + + req.flags = cpu_to_le32(flags); + + req.enables = + cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | + VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS); + + /* Number of segs are log2 units, and first packet is not + * included as part of this units. + */ + if (mss <= PAGE_SIZE) { + n = PAGE_SIZE / mss; + nsegs = (MAX_SKB_FRAGS - 1) * n; + } else { + n = mss / PAGE_SIZE; + if (mss & (PAGE_SIZE - 1)) + n++; + nsegs = (MAX_SKB_FRAGS - n) / n; + } + + segs = ilog2(nsegs); + req.max_agg_segs = cpu_to_le16(segs); + req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); + } + req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) +{ + u32 i, j, max_rings; + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_rss_cfg_input req = {0}; + + if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); + if (set_rss) { + vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 | + BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 | + BNXT_RSS_HASH_TYPE_FLAG_IPV6 | + BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6; + + req.hash_type = cpu_to_le32(vnic->hash_type); + + if (vnic->flags & BNXT_VNIC_RSS_FLAG) + max_rings = bp->rx_nr_rings; + else + max_rings = 1; + + /* Fill the RSS indirection table with ring group ids */ + for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { + if (j == max_rings) + j = 0; + vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); + } + + req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); + req.hash_key_tbl_addr = + cpu_to_le64(vnic->rss_hash_key_dma_addr); + } + req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_plcmodes_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); + req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); + req.enables = + cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | + VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); + /* thresholds not implemented in firmware yet */ + req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); + req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); + req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id) +{ + struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); + req.rss_cos_lb_ctx_id = + cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx); + + hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; +} + +static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, i); + } + bp->rsscos_nr_ctxs = 0; +} + +static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id) +{ + int rc; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, + -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = + le16_to_cpu(resp->rss_cos_lb_ctx_id); + mutex_unlock(&bp->hwrm_cmd_lock); + + return rc; +} + +static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) +{ + int grp_idx = 0; + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); + /* Only RSS support for now TBD: COS & LB */ + req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP | + VNIC_CFG_REQ_ENABLES_RSS_RULE); + req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); + req.cos_rule = cpu_to_le16(0xffff); + if (vnic->flags & BNXT_VNIC_RSS_FLAG) + grp_idx = 0; + else if (vnic->flags & BNXT_VNIC_RFS_FLAG) + grp_idx = vnic_id - 1; + + req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); + + req.lb_rule = cpu_to_le16(0xffff); + req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN); + + if (bp->flags & BNXT_FLAG_STRIP_VLAN) + req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) +{ + u32 rc = 0; + + if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { + struct hwrm_vnic_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); + req.vnic_id = + cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return rc; + bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; + } + return rc; +} + +static void bnxt_hwrm_vnic_free(struct bnxt *bp) +{ + u16 i; + + for (i = 0; i < bp->nr_vnics; i++) + bnxt_hwrm_vnic_free_one(bp, i); +} + +static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id, + u16 end_grp_id) +{ + u32 rc = 0, i, j; + struct hwrm_vnic_alloc_input req = {0}; + struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + /* map ring groups to this vnic */ + for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) { + if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) { + netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", + j, (end_grp_id - start_grp_id)); + break; + } + bp->vnic_info[vnic_id].fw_grp_ids[j] = + bp->grp_info[i].fw_grp_id; + } + + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + if (vnic_id == 0) + req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) +{ + u16 i; + u32 rc = 0; + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < bp->rx_nr_rings; i++) { + struct hwrm_ring_grp_alloc_input req = {0}; + struct hwrm_ring_grp_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); + + req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); + req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id); + req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id); + req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx); + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + + bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id); + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) +{ + u16 i; + u32 rc = 0; + struct hwrm_ring_grp_free_input req = {0}; + + if (!bp->grp_info) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < bp->cp_nr_rings; i++) { + if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) + continue; + req.ring_group_id = + cpu_to_le32(bp->grp_info[i].fw_grp_id); + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int hwrm_ring_alloc_send_msg(struct bnxt *bp, + struct bnxt_ring_struct *ring, + u32 ring_type, u32 map_index, + u32 stats_ctx_id) +{ + int rc = 0, err = 0; + struct hwrm_ring_alloc_input req = {0}; + struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + u16 ring_id; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); + + req.enables = 0; + if (ring->nr_pages > 1) { + req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); + /* Page size is in log2 units */ + req.page_size = BNXT_PAGE_SHIFT; + req.page_tbl_depth = 1; + } else { + req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); + } + req.fbo = 0; + /* Association of ring index with doorbell index and MSIX number */ + req.logical_id = cpu_to_le16(map_index); + + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; + /* Association of transmit ring with completion ring */ + req.cmpl_ring_id = + cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); + req.length = cpu_to_le32(bp->tx_ring_mask + 1); + req.stat_ctx_id = cpu_to_le32(stats_ctx_id); + req.queue_id = cpu_to_le16(ring->queue_id); + break; + case HWRM_RING_ALLOC_RX: + req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req.length = cpu_to_le32(bp->rx_ring_mask + 1); + break; + case HWRM_RING_ALLOC_AGG: + req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); + break; + case HWRM_RING_ALLOC_CMPL: + req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL; + req.length = cpu_to_le32(bp->cp_ring_mask + 1); + if (bp->flags & BNXT_FLAG_USING_MSIX) + req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + break; + default: + netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", + ring_type); + return -1; + } + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + err = le16_to_cpu(resp->error_code); + ring_id = le16_to_cpu(resp->ring_id); + mutex_unlock(&bp->hwrm_cmd_lock); + + if (rc || err) { + switch (ring_type) { + case RING_FREE_REQ_RING_TYPE_CMPL: + netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", + rc, err); + return -1; + + case RING_FREE_REQ_RING_TYPE_RX: + netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", + rc, err); + return -1; + + case RING_FREE_REQ_RING_TYPE_TX: + netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", + rc, err); + return -1; + + default: + netdev_err(bp->dev, "Invalid ring\n"); + return -1; + } + } + ring->fw_ring_id = ring_id; + return rc; +} + +static int bnxt_hwrm_ring_alloc(struct bnxt *bp) +{ + int i, rc = 0; + + if (bp->cp_nr_rings) { + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + rc = hwrm_ring_alloc_send_msg(bp, ring, + HWRM_RING_ALLOC_CMPL, i, + INVALID_STATS_CTX_ID); + if (rc) + goto err_out; + cpr->cp_doorbell = bp->bar1 + i * 0x80; + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; + } + } + + if (bp->tx_nr_rings) { + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx; + + rc = hwrm_ring_alloc_send_msg(bp, ring, + HWRM_RING_ALLOC_TX, i, + fw_stats_ctx); + if (rc) + goto err_out; + txr->tx_doorbell = bp->bar1 + i * 0x80; + } + } + + if (bp->rx_nr_rings) { + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + + rc = hwrm_ring_alloc_send_msg(bp, ring, + HWRM_RING_ALLOC_RX, i, + INVALID_STATS_CTX_ID); + if (rc) + goto err_out; + rxr->rx_doorbell = bp->bar1 + i * 0x80; + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id; + } + } + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_ring_struct *ring = + &rxr->rx_agg_ring_struct; + + rc = hwrm_ring_alloc_send_msg(bp, ring, + HWRM_RING_ALLOC_AGG, + bp->rx_nr_rings + i, + INVALID_STATS_CTX_ID); + if (rc) + goto err_out; + + rxr->rx_agg_doorbell = + bp->bar1 + (bp->rx_nr_rings + i) * 0x80; + writel(DB_KEY_RX | rxr->rx_agg_prod, + rxr->rx_agg_doorbell); + bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id; + } + } +err_out: + return rc; +} + +static int hwrm_ring_free_send_msg(struct bnxt *bp, + struct bnxt_ring_struct *ring, + u32 ring_type, int cmpl_ring_id) +{ + int rc; + struct hwrm_ring_free_input req = {0}; + struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; + u16 error_code; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1); + req.ring_type = ring_type; + req.ring_id = cpu_to_le16(ring->fw_ring_id); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + error_code = le16_to_cpu(resp->error_code); + mutex_unlock(&bp->hwrm_cmd_lock); + + if (rc || error_code) { + switch (ring_type) { + case RING_FREE_REQ_RING_TYPE_CMPL: + netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", + rc); + return rc; + case RING_FREE_REQ_RING_TYPE_RX: + netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", + rc); + return rc; + case RING_FREE_REQ_RING_TYPE_TX: + netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", + rc); + return rc; + default: + netdev_err(bp->dev, "Invalid ring\n"); + return -1; + } + } + return 0; +} + +static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) +{ + int i, rc = 0; + + if (!bp->bnapi) + return 0; + + if (bp->tx_nr_rings) { + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg( + bp, ring, + RING_FREE_REQ_RING_TYPE_TX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + } + } + } + + if (bp->rx_nr_rings) { + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg( + bp, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].rx_fw_ring_id = + INVALID_HW_RING_ID; + } + } + } + + if (bp->rx_agg_nr_pages) { + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_ring_struct *ring = + &rxr->rx_agg_ring_struct; + u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg( + bp, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].agg_fw_ring_id = + INVALID_HW_RING_ID; + } + } + } + + if (bp->cp_nr_rings) { + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg( + bp, ring, + RING_FREE_REQ_RING_TYPE_CMPL, + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].cp_fw_ring_id = + INVALID_HW_RING_ID; + } + } + } + + return rc; +} + +int bnxt_hwrm_set_coal(struct bnxt *bp) +{ + int i, rc = 0; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + u16 max_buf, max_buf_irq; + u16 buf_tmr, buf_tmr_irq; + u32 flags; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, + -1, -1); + + /* Each rx completion (2 records) should be DMAed immediately */ + max_buf = min_t(u16, bp->coal_bufs / 4, 2); + /* max_buf must not be zero */ + max_buf = clamp_t(u16, max_buf, 1, 63); + max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63); + buf_tmr = max_t(u16, bp->coal_ticks / 4, 1); + buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1); + + flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + + /* RING_IDLE generates more IRQs for lower latency. Enable it only + * if coal_ticks is less than 25 us. + */ + if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25) + flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; + + req.flags = cpu_to_le16(flags); + req.num_cmpl_dma_aggr = cpu_to_le16(max_buf); + req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq); + req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr); + req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq); + req.int_lat_tmr_min = cpu_to_le16(buf_tmr); + req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks); + req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < bp->cp_nr_rings; i++) { + req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) +{ + int rc = 0, i; + struct hwrm_stat_ctx_free_input req = {0}; + + if (!bp->bnapi) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { + req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + + cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; + } + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) +{ + int rc = 0, i; + struct hwrm_stat_ctx_alloc_input req = {0}; + struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); + + req.update_period_ms = cpu_to_le32(1000); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + + cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); + + bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + +static int bnxt_hwrm_func_qcaps(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_func_qcaps_input req = {0}; + struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); + req.fid = cpu_to_le16(0xffff); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto hwrm_func_qcaps_exit; + + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + pf->fw_fid = le16_to_cpu(resp->fid); + pf->port_id = le16_to_cpu(resp->port_id); + memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); + pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); + pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); + pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); + pf->max_pf_tx_rings = pf->max_tx_rings; + pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); + pf->max_pf_rx_rings = pf->max_rx_rings; + pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); + pf->max_vnics = le16_to_cpu(resp->max_vnics); + pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); + pf->first_vf_id = le16_to_cpu(resp->first_vf_id); + pf->max_vfs = le16_to_cpu(resp->max_vfs); + pf->max_encap_records = le32_to_cpu(resp->max_encap_records); + pf->max_decap_records = le32_to_cpu(resp->max_decap_records); + pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); + pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); + pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); + pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); + } else { +#ifdef CONFIG_BNXT_SRIOV + struct bnxt_vf_info *vf = &bp->vf; + + vf->fw_fid = le16_to_cpu(resp->fid); + memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); + if (!is_valid_ether_addr(vf->mac_addr)) + random_ether_addr(vf->mac_addr); + + vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); + vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); + vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); + vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); + vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); + vf->max_vnics = le16_to_cpu(resp->max_vnics); + vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); +#endif + } + + bp->tx_push_thresh = 0; + if (resp->flags & + cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)) + bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; + +hwrm_func_qcaps_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_func_reset(struct bnxt *bp) +{ + struct hwrm_func_reset_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); + req.enables = 0; + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); +} + +static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_queue_qportcfg_input req = {0}; + struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + u8 i, *qptr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto qportcfg_exit; + + if (!resp->max_configurable_queues) { + rc = -EINVAL; + goto qportcfg_exit; + } + bp->max_tc = resp->max_configurable_queues; + if (bp->max_tc > BNXT_MAX_QUEUE) + bp->max_tc = BNXT_MAX_QUEUE; + + qptr = &resp->queue_id0; + for (i = 0; i < bp->max_tc; i++) { + bp->q_info[i].queue_id = *qptr++; + bp->q_info[i].queue_profile = *qptr++; + } + +qportcfg_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_ver_get(struct bnxt *bp) +{ + int rc; + struct hwrm_ver_get_input req = {0}; + struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); + req.hwrm_intf_maj = HWRM_VERSION_MAJOR; + req.hwrm_intf_min = HWRM_VERSION_MINOR; + req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto hwrm_ver_get_exit; + + memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); + + if (req.hwrm_intf_maj != resp->hwrm_intf_maj || + req.hwrm_intf_min != resp->hwrm_intf_min || + req.hwrm_intf_upd != resp->hwrm_intf_upd) { + netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n", + resp->hwrm_intf_maj, resp->hwrm_intf_min, + resp->hwrm_intf_upd, req.hwrm_intf_maj, + req.hwrm_intf_min, req.hwrm_intf_upd); + netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n"); + } + snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d", + resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, + resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); + +hwrm_ver_get_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) +{ + if (bp->vxlan_port_cnt) { + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); + } + bp->vxlan_port_cnt = 0; + if (bp->nge_port_cnt) { + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); + } + bp->nge_port_cnt = 0; +} + +static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) +{ + int rc, i; + u32 tpa_flags = 0; + + if (set_tpa) + tpa_flags = bp->flags & BNXT_FLAG_TPA; + for (i = 0; i < bp->nr_vnics; i++) { + rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); + if (rc) { + netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", + rc, i); + return rc; + } + } + return 0; +} + +static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->nr_vnics; i++) + bnxt_hwrm_vnic_set_rss(bp, i, false); +} + +static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, + bool irq_re_init) +{ + if (bp->vnic_info) { + bnxt_hwrm_clear_vnic_filter(bp); + /* clear all RSS setting before free vnic ctx */ + bnxt_hwrm_clear_vnic_rss(bp); + bnxt_hwrm_vnic_ctx_free(bp); + /* before free the vnic, undo the vnic tpa settings */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, false); + bnxt_hwrm_vnic_free(bp); + } + bnxt_hwrm_ring_free(bp, close_path); + bnxt_hwrm_ring_grp_free(bp); + if (irq_re_init) { + bnxt_hwrm_stat_ctx_free(bp); + bnxt_hwrm_free_tunnel_ports(bp); + } +} + +static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) +{ + int rc; + + /* allocate context for vnic */ + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", + vnic_id, rc); + goto vnic_setup_err; + } + bp->rsscos_nr_ctxs++; + + /* configure default vnic, ring grp */ + rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", + vnic_id, rc); + goto vnic_setup_err; + } + + /* Enable RSS hashing on vnic */ + rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", + vnic_id, rc); + goto vnic_setup_err; + } + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", + vnic_id, rc); + } + } + +vnic_setup_err: + return rc; +} + +static int bnxt_alloc_rfs_vnics(struct bnxt *bp) +{ +#ifdef CONFIG_RFS_ACCEL + int i, rc = 0; + + for (i = 0; i < bp->rx_nr_rings; i++) { + u16 vnic_id = i + 1; + u16 ring_id = i; + + if (vnic_id >= bp->nr_vnics) + break; + + bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; + rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", + vnic_id, rc); + break; + } + rc = bnxt_setup_vnic(bp, vnic_id); + if (rc) + break; + } + return rc; +#else + return 0; +#endif +} + +static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) +{ + int rc = 0; + + if (irq_re_init) { + rc = bnxt_hwrm_stat_ctx_alloc(bp); + if (rc) { + netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", + rc); + goto err_out; + } + } + + rc = bnxt_hwrm_ring_alloc(bp); + if (rc) { + netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_hwrm_ring_grp_alloc(bp); + if (rc) { + netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); + goto err_out; + } + + /* default vnic 0 */ + rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings); + if (rc) { + netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_setup_vnic(bp, 0); + if (rc) + goto err_out; + + if (bp->flags & BNXT_FLAG_RFS) { + rc = bnxt_alloc_rfs_vnics(bp); + if (rc) + goto err_out; + } + + if (bp->flags & BNXT_FLAG_TPA) { + rc = bnxt_set_tpa(bp, true); + if (rc) + goto err_out; + } + + if (BNXT_VF(bp)) + bnxt_update_vf_mac(bp); + + /* Filter for default vnic 0 */ + rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); + if (rc) { + netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); + goto err_out; + } + bp->vnic_info[0].uc_filter_count = 1; + + bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST | + CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + + if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) + bp->vnic_info[0].rx_mask |= + CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + if (rc) { + netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_hwrm_set_coal(bp); + if (rc) + netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", + rc); + + return 0; + +err_out: + bnxt_hwrm_resource_free(bp, 0, true); + + return rc; +} + +static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) +{ + bnxt_hwrm_resource_free(bp, 1, irq_re_init); + return 0; +} + +static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) +{ + bnxt_init_rx_rings(bp); + bnxt_init_tx_rings(bp); + bnxt_init_ring_grps(bp, irq_re_init); + bnxt_init_vnics(bp); + + return bnxt_init_chip(bp, irq_re_init); +} + +static void bnxt_disable_int(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + +static void bnxt_enable_int(struct bnxt *bp) +{ + int i; + + atomic_set(&bp->intr_sem, 0); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + +static int bnxt_set_real_num_queues(struct bnxt *bp) +{ + int rc; + struct net_device *dev = bp->dev; + + rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); + if (rc) + return rc; + + rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); + if (rc) + return rc; + +#ifdef CONFIG_RFS_ACCEL + if (bp->rx_nr_rings) + dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); + if (!dev->rx_cpu_rmap) + rc = -ENOMEM; +#endif + + return rc; +} + +static int bnxt_setup_msix(struct bnxt *bp) +{ + struct msix_entry *msix_ent; + struct net_device *dev = bp->dev; + int i, total_vecs, rc = 0; + const int len = sizeof(bp->irq_tbl[0].name); + + bp->flags &= ~BNXT_FLAG_USING_MSIX; + total_vecs = bp->cp_nr_rings; + + msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); + if (!msix_ent) + return -ENOMEM; + + for (i = 0; i < total_vecs; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs); + if (total_vecs < 0) { + rc = -ENODEV; + goto msix_setup_exit; + } + + bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); + if (bp->irq_tbl) { + int tcs; + + /* Trim rings based upon num of vectors allocated */ + bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings); + bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings); + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + tcs = netdev_get_num_tc(dev); + if (tcs > 1) { + bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; + if (bp->tx_nr_rings_per_tc == 0) { + netdev_reset_tc(dev); + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + } else { + int i, off, count; + + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; + for (i = 0; i < tcs; i++) { + count = bp->tx_nr_rings_per_tc; + off = i * count; + netdev_set_tc_queue(dev, i, count, off); + } + } + } + bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); + + for (i = 0; i < bp->cp_nr_rings; i++) { + bp->irq_tbl[i].vector = msix_ent[i].vector; + snprintf(bp->irq_tbl[i].name, len, + "%s-%s-%d", dev->name, "TxRx", i); + bp->irq_tbl[i].handler = bnxt_msix; + } + rc = bnxt_set_real_num_queues(bp); + if (rc) + goto msix_setup_exit; + } else { + rc = -ENOMEM; + goto msix_setup_exit; + } + bp->flags |= BNXT_FLAG_USING_MSIX; + kfree(msix_ent); + return 0; + +msix_setup_exit: + netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc); + pci_disable_msix(bp->pdev); + kfree(msix_ent); + return rc; +} + +static int bnxt_setup_inta(struct bnxt *bp) +{ + int rc; + const int len = sizeof(bp->irq_tbl[0].name); + + if (netdev_get_num_tc(bp->dev)) + netdev_reset_tc(bp->dev); + + bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); + if (!bp->irq_tbl) { + rc = -ENOMEM; + return rc; + } + bp->rx_nr_rings = 1; + bp->tx_nr_rings = 1; + bp->cp_nr_rings = 1; + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bp->irq_tbl[0].vector = bp->pdev->irq; + snprintf(bp->irq_tbl[0].name, len, + "%s-%s-%d", bp->dev->name, "TxRx", 0); + bp->irq_tbl[0].handler = bnxt_inta; + rc = bnxt_set_real_num_queues(bp); + return rc; +} + +static int bnxt_setup_int_mode(struct bnxt *bp) +{ + int rc = 0; + + if (bp->flags & BNXT_FLAG_MSIX_CAP) + rc = bnxt_setup_msix(bp); + + if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { + /* fallback to INTA */ + rc = bnxt_setup_inta(bp); + } + return rc; +} + +static void bnxt_free_irq(struct bnxt *bp) +{ + struct bnxt_irq *irq; + int i; + +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); + bp->dev->rx_cpu_rmap = NULL; +#endif + if (!bp->irq_tbl) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + irq = &bp->irq_tbl[i]; + if (irq->requested) + free_irq(irq->vector, bp->bnapi[i]); + irq->requested = 0; + } + if (bp->flags & BNXT_FLAG_USING_MSIX) + pci_disable_msix(bp->pdev); + kfree(bp->irq_tbl); + bp->irq_tbl = NULL; +} + +static int bnxt_request_irq(struct bnxt *bp) +{ + int i, rc = 0; + unsigned long flags = 0; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; +#endif + + if (!(bp->flags & BNXT_FLAG_USING_MSIX)) + flags = IRQF_SHARED; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_irq *irq = &bp->irq_tbl[i]; +#ifdef CONFIG_RFS_ACCEL + if (rmap && (i < bp->rx_nr_rings)) { + rc = irq_cpu_rmap_add(rmap, irq->vector); + if (rc) + netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", + i); + } +#endif + rc = request_irq(irq->vector, irq->handler, flags, irq->name, + bp->bnapi[i]); + if (rc) + break; + + irq->requested = 1; + } + return rc; +} + +static void bnxt_del_napi(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + + napi_hash_del(&bnapi->napi); + netif_napi_del(&bnapi->napi); + } +} + +static void bnxt_init_napi(struct bnxt *bp) +{ + int i; + struct bnxt_napi *bnapi; + + if (bp->flags & BNXT_FLAG_USING_MSIX) { + for (i = 0; i < bp->cp_nr_rings; i++) { + bnapi = bp->bnapi[i]; + netif_napi_add(bp->dev, &bnapi->napi, + bnxt_poll, 64); + napi_hash_add(&bnapi->napi); + } + } else { + bnapi = bp->bnapi[0]; + netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); + napi_hash_add(&bnapi->napi); + } +} + +static void bnxt_disable_napi(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + napi_disable(&bp->bnapi[i]->napi); + bnxt_disable_poll(bp->bnapi[i]); + } +} + +static void bnxt_enable_napi(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + bnxt_enable_poll(bp->bnapi[i]); + napi_enable(&bp->bnapi[i]->napi); + } +} + +static void bnxt_tx_disable(struct bnxt *bp) +{ + int i; + struct bnxt_napi *bnapi; + struct bnxt_tx_ring_info *txr; + struct netdev_queue *txq; + + if (bp->bnapi) { + for (i = 0; i < bp->tx_nr_rings; i++) { + bnapi = bp->bnapi[i]; + txr = &bnapi->tx_ring; + txq = netdev_get_tx_queue(bp->dev, i); + __netif_tx_lock(txq, smp_processor_id()); + txr->dev_state = BNXT_DEV_STATE_CLOSING; + __netif_tx_unlock(txq); + } + } + /* Stop all TX queues */ + netif_tx_disable(bp->dev); + netif_carrier_off(bp->dev); +} + +static void bnxt_tx_enable(struct bnxt *bp) +{ + int i; + struct bnxt_napi *bnapi; + struct bnxt_tx_ring_info *txr; + struct netdev_queue *txq; + + for (i = 0; i < bp->tx_nr_rings; i++) { + bnapi = bp->bnapi[i]; + txr = &bnapi->tx_ring; + txq = netdev_get_tx_queue(bp->dev, i); + txr->dev_state = 0; + } + netif_tx_wake_all_queues(bp->dev); + if (bp->link_info.link_up) + netif_carrier_on(bp->dev); +} + +static void bnxt_report_link(struct bnxt *bp) +{ + if (bp->link_info.link_up) { + const char *duplex; + const char *flow_ctrl; + u16 speed; + + netif_carrier_on(bp->dev); + if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) + duplex = "full"; + else + duplex = "half"; + if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) + flow_ctrl = "ON - receive & transmit"; + else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) + flow_ctrl = "ON - transmit"; + else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) + flow_ctrl = "ON - receive"; + else + flow_ctrl = "none"; + speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); + netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", + speed, duplex, flow_ctrl); + } else { + netif_carrier_off(bp->dev); + netdev_err(bp->dev, "NIC Link is Down\n"); + } +} + +static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) +{ + int rc = 0; + struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcfg_input req = {0}; + struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + u8 link_up = link_info->link_up; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; + } + + memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); + link_info->phy_link_status = resp->link; + link_info->duplex = resp->duplex; + link_info->pause = resp->pause; + link_info->auto_mode = resp->auto_mode; + link_info->auto_pause_setting = resp->auto_pause; + link_info->force_pause_setting = resp->force_pause; + link_info->duplex_setting = resp->duplex_setting; + if (link_info->phy_link_status == BNXT_LINK_LINK) + link_info->link_speed = le16_to_cpu(resp->link_speed); + else + link_info->link_speed = 0; + link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); + link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed); + link_info->support_speeds = le16_to_cpu(resp->support_speeds); + link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); + link_info->preemphasis = le32_to_cpu(resp->preemphasis); + link_info->phy_ver[0] = resp->phy_maj; + link_info->phy_ver[1] = resp->phy_min; + link_info->phy_ver[2] = resp->phy_bld; + link_info->media_type = resp->media_type; + link_info->transceiver = resp->transceiver_type; + link_info->phy_addr = resp->phy_addr; + + /* TODO: need to add more logic to report VF link */ + if (chng_link_state) { + if (link_info->phy_link_status == BNXT_LINK_LINK) + link_info->link_up = 1; + else + link_info->link_up = 0; + if (link_up != link_info->link_up) + bnxt_report_link(bp); + } else { + /* alwasy link down if not require to update link state */ + link_info->link_up = 0; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + +static void +bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) +{ + if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) + req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) + req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; + req->enables |= + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); + } else { + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) + req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) + req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; + req->enables |= + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); + } +} + +static void bnxt_hwrm_set_link_common(struct bnxt *bp, + struct hwrm_port_phy_cfg_input *req) +{ + u8 autoneg = bp->link_info.autoneg; + u16 fw_link_speed = bp->link_info.req_link_speed; + u32 advertising = bp->link_info.advertising; + + if (autoneg & BNXT_AUTONEG_SPEED) { + req->auto_mode |= + PORT_PHY_CFG_REQ_AUTO_MODE_MASK; + + req->enables |= cpu_to_le32( + PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); + req->auto_link_speed_mask = cpu_to_le16(advertising); + + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); + req->flags |= + cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); + } else { + req->force_link_speed = cpu_to_le16(fw_link_speed); + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); + } + + /* currently don't support half duplex */ + req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL; + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX); + /* tell chimp that the setting takes effect immediately */ + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); +} + +int bnxt_hwrm_set_pause(struct bnxt *bp) +{ + struct hwrm_port_phy_cfg_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); + bnxt_hwrm_set_pause_common(bp, &req); + + if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || + bp->link_info.force_link_chng) + bnxt_hwrm_set_link_common(bp, &req); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { + /* since changing of pause setting doesn't trigger any link + * change event, the driver needs to update the current pause + * result upon successfully return of the phy_cfg command + */ + bp->link_info.pause = + bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; + bp->link_info.auto_pause_setting = 0; + if (!bp->link_info.force_link_chng) + bnxt_report_link(bp); + } + bp->link_info.force_link_chng = false; + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause) +{ + struct hwrm_port_phy_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); + if (set_pause) + bnxt_hwrm_set_pause_common(bp, &req); + + bnxt_hwrm_set_link_common(bp, &req); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_update_phy_setting(struct bnxt *bp) +{ + int rc; + bool update_link = false; + bool update_pause = false; + struct bnxt_link_info *link_info = &bp->link_info; + + rc = bnxt_update_link(bp, true); + if (rc) { + netdev_err(bp->dev, "failed to update link (rc: %x)\n", + rc); + return rc; + } + if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && + link_info->auto_pause_setting != link_info->req_flow_ctrl) + update_pause = true; + if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && + link_info->force_pause_setting != link_info->req_flow_ctrl) + update_pause = true; + if (link_info->req_duplex != link_info->duplex_setting) + update_link = true; + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + if (BNXT_AUTO_MODE(link_info->auto_mode)) + update_link = true; + if (link_info->req_link_speed != link_info->force_link_speed) + update_link = true; + } else { + if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) + update_link = true; + if (link_info->advertising != link_info->auto_link_speeds) + update_link = true; + if (link_info->req_link_speed != link_info->auto_link_speed) + update_link = true; + } + + if (update_link) + rc = bnxt_hwrm_set_link_setting(bp, update_pause); + else if (update_pause) + rc = bnxt_hwrm_set_pause(bp); + if (rc) { + netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", + rc); + return rc; + } + + return rc; +} + +static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +{ + int rc = 0; + + netif_carrier_off(bp->dev); + if (irq_re_init) { + rc = bnxt_setup_int_mode(bp); + if (rc) { + netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", + rc); + return rc; + } + } + if ((bp->flags & BNXT_FLAG_RFS) && + !(bp->flags & BNXT_FLAG_USING_MSIX)) { + /* disable RFS if falling back to INTA */ + bp->dev->hw_features &= ~NETIF_F_NTUPLE; + bp->flags &= ~BNXT_FLAG_RFS; + } + + rc = bnxt_alloc_mem(bp, irq_re_init); + if (rc) { + netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); + goto open_err_free_mem; + } + + if (irq_re_init) { + bnxt_init_napi(bp); + rc = bnxt_request_irq(bp); + if (rc) { + netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); + goto open_err; + } + } + + bnxt_enable_napi(bp); + + rc = bnxt_init_nic(bp, irq_re_init); + if (rc) { + netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); + goto open_err; + } + + if (link_re_init) { + rc = bnxt_update_phy_setting(bp); + if (rc) + goto open_err; + } + + if (irq_re_init) { +#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) + vxlan_get_rx_port(bp->dev); +#endif + if (!bnxt_hwrm_tunnel_dst_port_alloc( + bp, htons(0x17c1), + TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE)) + bp->nge_port_cnt = 1; + } + + bp->state = BNXT_STATE_OPEN; + bnxt_enable_int(bp); + /* Enable TX queues */ + bnxt_tx_enable(bp); + mod_timer(&bp->timer, jiffies + bp->current_interval); + + return 0; + +open_err: + bnxt_disable_napi(bp); + bnxt_del_napi(bp); + +open_err_free_mem: + bnxt_free_skbs(bp); + bnxt_free_irq(bp); + bnxt_free_mem(bp, true); + return rc; +} + +/* rtnl_lock held */ +int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +{ + int rc = 0; + + rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); + if (rc) { + netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); + dev_close(bp->dev); + } + return rc; +} + +static int bnxt_open(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + rc = bnxt_hwrm_func_reset(bp); + if (rc) { + netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", + rc); + rc = -1; + return rc; + } + return __bnxt_open_nic(bp, true, true); +} + +static void bnxt_disable_int_sync(struct bnxt *bp) +{ + int i; + + atomic_inc(&bp->intr_sem); + if (!netif_running(bp->dev)) + return; + + bnxt_disable_int(bp); + for (i = 0; i < bp->cp_nr_rings; i++) + synchronize_irq(bp->irq_tbl[i].vector); +} + +int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +{ + int rc = 0; + +#ifdef CONFIG_BNXT_SRIOV + if (bp->sriov_cfg) { + rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, + !bp->sriov_cfg, + BNXT_SRIOV_CFG_WAIT_TMO); + if (rc) + netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); + } +#endif + /* Change device state to avoid TX queue wake up's */ + bnxt_tx_disable(bp); + + bp->state = BNXT_STATE_CLOSED; + cancel_work_sync(&bp->sp_task); + + /* Flush rings before disabling interrupts */ + bnxt_shutdown_nic(bp, irq_re_init); + + /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ + + bnxt_disable_napi(bp); + bnxt_disable_int_sync(bp); + del_timer_sync(&bp->timer); + bnxt_free_skbs(bp); + + if (irq_re_init) { + bnxt_free_irq(bp); + bnxt_del_napi(bp); + } + bnxt_free_mem(bp, irq_re_init); + return rc; +} + +static int bnxt_close(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + bnxt_close_nic(bp, true, true); + return 0; +} + +/* rtnl_lock held */ +static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + /* fallthru */ + case SIOCGMIIREG: { + if (!netif_running(dev)) + return -EAGAIN; + + return 0; + } + + case SIOCSMIIREG: + if (!netif_running(dev)) + return -EAGAIN; + + return 0; + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; +} + +static struct rtnl_link_stats64 * +bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + u32 i; + struct bnxt *bp = netdev_priv(dev); + + memset(stats, 0, sizeof(struct rtnl_link_stats64)); + + if (!bp->bnapi) + return stats; + + /* TODO check if we need to synchronize with bnxt_close path */ + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct ctx_hw_stats *hw_stats = cpr->hw_stats; + + stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); + stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); + stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); + + stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); + stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); + stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); + + stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); + stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); + stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); + + stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); + stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); + stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); + + stats->rx_missed_errors += + le64_to_cpu(hw_stats->rx_discard_pkts); + + stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); + + stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts); + + stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); + } + + return stats; +} + +static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) +{ + struct net_device *dev = bp->dev; + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct netdev_hw_addr *ha; + u8 *haddr; + int mc_count = 0; + bool update = false; + int off = 0; + + netdev_for_each_mc_addr(ha, dev) { + if (mc_count >= BNXT_MAX_MC_ADDRS) { + *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + return false; + } + haddr = ha->addr; + if (!ether_addr_equal(haddr, vnic->mc_list + off)) { + memcpy(vnic->mc_list + off, haddr, ETH_ALEN); + update = true; + } + off += ETH_ALEN; + mc_count++; + } + if (mc_count) + *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; + + if (mc_count != vnic->mc_list_count) { + vnic->mc_list_count = mc_count; + update = true; + } + return update; +} + +static bool bnxt_uc_list_updated(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct netdev_hw_addr *ha; + int off = 0; + + if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) + return true; + + netdev_for_each_uc_addr(ha, dev) { + if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) + return true; + + off += ETH_ALEN; + } + return false; +} + +static void bnxt_set_rx_mode(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + u32 mask = vnic->rx_mask; + bool mc_update = false; + bool uc_update; + + if (!netif_running(dev)) + return; + + mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | + CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | + CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); + + /* Only allow PF to be in promiscuous mode */ + if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp)) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + + uc_update = bnxt_uc_list_updated(bp); + + if (dev->flags & IFF_ALLMULTI) { + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + } else { + mc_update = bnxt_mc_list_updated(bp, &mask); + } + + if (mask != vnic->rx_mask || uc_update || mc_update) { + vnic->rx_mask = mask; + + set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + } +} + +static void bnxt_cfg_rx_mode(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct netdev_hw_addr *ha; + int i, off = 0, rc; + bool uc_update; + + netif_addr_lock_bh(dev); + uc_update = bnxt_uc_list_updated(bp); + netif_addr_unlock_bh(dev); + + if (!uc_update) + goto skip_uc; + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 1; i < vnic->uc_filter_count; i++) { + struct hwrm_cfa_l2_filter_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, + -1); + + req.l2_filter_id = vnic->fw_l2_filter_id[i]; + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + } + mutex_unlock(&bp->hwrm_cmd_lock); + + vnic->uc_filter_count = 1; + + netif_addr_lock_bh(dev); + if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + } else { + netdev_for_each_uc_addr(ha, dev) { + memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); + off += ETH_ALEN; + vnic->uc_filter_count++; + } + } + netif_addr_unlock_bh(dev); + + for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { + rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); + if (rc) { + netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", + rc); + vnic->uc_filter_count = i; + } + } + +skip_uc: + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + if (rc) + netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", + rc); +} + +static netdev_features_t bnxt_fix_features(struct net_device *dev, + netdev_features_t features) +{ + return features; +} + +static int bnxt_set_features(struct net_device *dev, netdev_features_t features) +{ + struct bnxt *bp = netdev_priv(dev); + u32 flags = bp->flags; + u32 changes; + int rc = 0; + bool re_init = false; + bool update_tpa = false; + + flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; + if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0)) + flags |= BNXT_FLAG_GRO; + if (features & NETIF_F_LRO) + flags |= BNXT_FLAG_LRO; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + flags |= BNXT_FLAG_STRIP_VLAN; + + if (features & NETIF_F_NTUPLE) + flags |= BNXT_FLAG_RFS; + + changes = flags ^ bp->flags; + if (changes & BNXT_FLAG_TPA) { + update_tpa = true; + if ((bp->flags & BNXT_FLAG_TPA) == 0 || + (flags & BNXT_FLAG_TPA) == 0) + re_init = true; + } + + if (changes & ~BNXT_FLAG_TPA) + re_init = true; + + if (flags != bp->flags) { + u32 old_flags = bp->flags; + + bp->flags = flags; + + if (!netif_running(dev)) { + if (update_tpa) + bnxt_set_ring_params(bp); + return rc; + } + + if (re_init) { + bnxt_close_nic(bp, false, false); + if (update_tpa) + bnxt_set_ring_params(bp); + + return bnxt_open_nic(bp, false, false); + } + if (update_tpa) { + rc = bnxt_set_tpa(bp, + (flags & BNXT_FLAG_TPA) ? + true : false); + if (rc) + bp->flags = old_flags; + } + } + return rc; +} + +static void bnxt_dbg_dump_states(struct bnxt *bp) +{ + int i; + struct bnxt_napi *bnapi; + struct bnxt_tx_ring_info *txr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + + for (i = 0; i < bp->cp_nr_rings; i++) { + bnapi = bp->bnapi[i]; + txr = &bnapi->tx_ring; + rxr = &bnapi->rx_ring; + cpr = &bnapi->cp_ring; + if (netif_msg_drv(bp)) { + netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", + i, txr->tx_ring_struct.fw_ring_id, + txr->tx_prod, txr->tx_cons); + netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", + i, rxr->rx_ring_struct.fw_ring_id, + rxr->rx_prod, + rxr->rx_agg_ring_struct.fw_ring_id, + rxr->rx_agg_prod, rxr->rx_sw_agg_prod); + netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", + i, cpr->cp_ring_struct.fw_ring_id, + cpr->cp_raw_cons); + } + } +} + +static void bnxt_reset_task(struct bnxt *bp) +{ + bnxt_dbg_dump_states(bp); + if (netif_running(bp->dev)) + bnxt_tx_disable(bp); /* prevent tx timout again */ +} + +static void bnxt_tx_timeout(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bnxt_poll_controller(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_irq *irq = &bp->irq_tbl[i]; + + disable_irq(irq->vector); + irq->handler(irq->vector, bp->bnapi[i]); + enable_irq(irq->vector); + } +} +#endif + +static void bnxt_timer(unsigned long data) +{ + struct bnxt *bp = (struct bnxt *)data; + struct net_device *dev = bp->dev; + + if (!netif_running(dev)) + return; + + if (atomic_read(&bp->intr_sem) != 0) + goto bnxt_restart_timer; + +bnxt_restart_timer: + mod_timer(&bp->timer, jiffies + bp->current_interval); +} + +static void bnxt_cfg_ntp_filters(struct bnxt *); + +static void bnxt_sp_task(struct work_struct *work) +{ + struct bnxt *bp = container_of(work, struct bnxt, sp_task); + int rc; + + if (bp->state != BNXT_STATE_OPEN) + return; + + if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) + bnxt_cfg_rx_mode(bp); + + if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) + bnxt_cfg_ntp_filters(bp); + if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { + rc = bnxt_update_link(bp, true); + if (rc) + netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", + rc); + } + if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) + bnxt_hwrm_exec_fwd_req(bp); + if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { + bnxt_hwrm_tunnel_dst_port_alloc( + bp, bp->vxlan_port, + TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); + } + if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); + } + if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) + bnxt_reset_task(bp); +} + +static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) +{ + int rc; + struct bnxt *bp = netdev_priv(dev); + + SET_NETDEV_DEV(dev, &pdev->dev); + + /* enable device (incl. PCI PM wakeup), and bus-mastering */ + rc = pci_enable_device(pdev); + if (rc) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + goto init_err; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, + "Cannot find PCI device base address, aborting\n"); + rc = -ENODEV; + goto init_err_disable; + } + + rc = pci_request_regions(pdev, DRV_MODULE_NAME); + if (rc) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto init_err_disable; + } + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { + dev_err(&pdev->dev, "System does not support DMA, aborting\n"); + goto init_err_disable; + } + + pci_set_master(pdev); + + bp->dev = dev; + bp->pdev = pdev; + + bp->bar0 = pci_ioremap_bar(pdev, 0); + if (!bp->bar0) { + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); + rc = -ENOMEM; + goto init_err_release; + } + + bp->bar1 = pci_ioremap_bar(pdev, 2); + if (!bp->bar1) { + dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); + rc = -ENOMEM; + goto init_err_release; + } + + bp->bar2 = pci_ioremap_bar(pdev, 4); + if (!bp->bar2) { + dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); + rc = -ENOMEM; + goto init_err_release; + } + + INIT_WORK(&bp->sp_task, bnxt_sp_task); + + spin_lock_init(&bp->ntp_fltr_lock); + + bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; + bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; + + bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4); + bp->coal_bufs = 20; + bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1); + bp->coal_bufs_irq = 2; + + init_timer(&bp->timer); + bp->timer.data = (unsigned long)bp; + bp->timer.function = bnxt_timer; + bp->current_interval = BNXT_TIMER_INTERVAL; + + bp->state = BNXT_STATE_CLOSED; + + return 0; + +init_err_release: + if (bp->bar2) { + pci_iounmap(pdev, bp->bar2); + bp->bar2 = NULL; + } + + if (bp->bar1) { + pci_iounmap(pdev, bp->bar1); + bp->bar1 = NULL; + } + + if (bp->bar0) { + pci_iounmap(pdev, bp->bar0); + bp->bar0 = NULL; + } + + pci_release_regions(pdev); + +init_err_disable: + pci_disable_device(pdev); + +init_err: + return rc; +} + +/* rtnl_lock held */ +static int bnxt_change_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + return 0; +} + +/* rtnl_lock held */ +static int bnxt_change_mtu(struct net_device *dev, int new_mtu) +{ + struct bnxt *bp = netdev_priv(dev); + + if (new_mtu < 60 || new_mtu > 9000) + return -EINVAL; + + if (netif_running(dev)) + bnxt_close_nic(bp, false, false); + + dev->mtu = new_mtu; + bnxt_set_ring_params(bp); + + if (netif_running(dev)) + return bnxt_open_nic(bp, false, false); + + return 0; +} + +static int bnxt_setup_tc(struct net_device *dev, u8 tc) +{ + struct bnxt *bp = netdev_priv(dev); + + if (tc > bp->max_tc) { + netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", + tc, bp->max_tc); + return -EINVAL; + } + + if (netdev_get_num_tc(dev) == tc) + return 0; + + if (tc) { + int max_rx_rings, max_tx_rings; + + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + if (bp->tx_nr_rings_per_tc * tc > max_tx_rings) + return -ENOMEM; + } + + /* Needs to close the device and do hw resource re-allocations */ + if (netif_running(bp->dev)) + bnxt_close_nic(bp, true, false); + + if (tc) { + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; + netdev_set_num_tc(dev, tc); + } else { + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + netdev_reset_tc(dev); + } + bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); + bp->num_stat_ctxs = bp->cp_nr_rings; + + if (netif_running(bp->dev)) + return bnxt_open_nic(bp, true, false); + + return 0; +} + +#ifdef CONFIG_RFS_ACCEL +static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, + struct bnxt_ntuple_filter *f2) +{ + struct flow_keys *keys1 = &f1->fkeys; + struct flow_keys *keys2 = &f2->fkeys; + + if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && + keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && + keys1->ports.ports == keys2->ports.ports && + keys1->basic.ip_proto == keys2->basic.ip_proto && + keys1->basic.n_proto == keys2->basic.n_proto && + ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr)) + return true; + + return false; +} + +static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ntuple_filter *fltr, *new_fltr; + struct flow_keys *fkeys; + struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); + int rc = 0, idx; + struct hlist_head *head; + + if (skb->encapsulation) + return -EPROTONOSUPPORT; + + new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); + if (!new_fltr) + return -ENOMEM; + + fkeys = &new_fltr->fkeys; + if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { + rc = -EPROTONOSUPPORT; + goto err_free; + } + + if ((fkeys->basic.n_proto != htons(ETH_P_IP)) || + ((fkeys->basic.ip_proto != IPPROTO_TCP) && + (fkeys->basic.ip_proto != IPPROTO_UDP))) { + rc = -EPROTONOSUPPORT; + goto err_free; + } + + memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); + + idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; + head = &bp->ntp_fltr_hash_tbl[idx]; + rcu_read_lock(); + hlist_for_each_entry_rcu(fltr, head, hash) { + if (bnxt_fltr_match(fltr, new_fltr)) { + rcu_read_unlock(); + rc = 0; + goto err_free; + } + } + rcu_read_unlock(); + + spin_lock_bh(&bp->ntp_fltr_lock); + new_fltr->sw_id = bitmap_find_free_region(bp->ntp_fltr_bmap, + BNXT_NTP_FLTR_MAX_FLTR, 0); + if (new_fltr->sw_id < 0) { + spin_unlock_bh(&bp->ntp_fltr_lock); + rc = -ENOMEM; + goto err_free; + } + + new_fltr->flow_id = flow_id; + new_fltr->rxq = rxq_index; + hlist_add_head_rcu(&new_fltr->hash, head); + bp->ntp_fltr_count++; + spin_unlock_bh(&bp->ntp_fltr_lock); + + set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + + return new_fltr->sw_id; + +err_free: + kfree(new_fltr); + return rc; +} + +static void bnxt_cfg_ntp_filters(struct bnxt *bp) +{ + int i; + + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct hlist_node *tmp; + struct bnxt_ntuple_filter *fltr; + int rc; + + head = &bp->ntp_fltr_hash_tbl[i]; + hlist_for_each_entry_safe(fltr, tmp, head, hash) { + bool del = false; + + if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { + if (rps_may_expire_flow(bp->dev, fltr->rxq, + fltr->flow_id, + fltr->sw_id)) { + bnxt_hwrm_cfa_ntuple_filter_free(bp, + fltr); + del = true; + } + } else { + rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, + fltr); + if (rc) + del = true; + else + set_bit(BNXT_FLTR_VALID, &fltr->state); + } + + if (del) { + spin_lock_bh(&bp->ntp_fltr_lock); + hlist_del_rcu(&fltr->hash); + bp->ntp_fltr_count--; + spin_unlock_bh(&bp->ntp_fltr_lock); + synchronize_rcu(); + clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); + kfree(fltr); + } + } + } +} + +#else + +static void bnxt_cfg_ntp_filters(struct bnxt *bp) +{ +} + +#endif /* CONFIG_RFS_ACCEL */ + +static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + if (sa_family != AF_INET6 && sa_family != AF_INET) + return; + + if (bp->vxlan_port_cnt && bp->vxlan_port != port) + return; + + bp->vxlan_port_cnt++; + if (bp->vxlan_port_cnt == 1) { + bp->vxlan_port = port; + set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + } +} + +static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + if (sa_family != AF_INET6 && sa_family != AF_INET) + return; + + if (bp->vxlan_port_cnt && bp->vxlan_port == port) { + bp->vxlan_port_cnt--; + + if (bp->vxlan_port_cnt == 0) { + set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + } + } +} + +static const struct net_device_ops bnxt_netdev_ops = { + .ndo_open = bnxt_open, + .ndo_start_xmit = bnxt_start_xmit, + .ndo_stop = bnxt_close, + .ndo_get_stats64 = bnxt_get_stats64, + .ndo_set_rx_mode = bnxt_set_rx_mode, + .ndo_do_ioctl = bnxt_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = bnxt_change_mac_addr, + .ndo_change_mtu = bnxt_change_mtu, + .ndo_fix_features = bnxt_fix_features, + .ndo_set_features = bnxt_set_features, + .ndo_tx_timeout = bnxt_tx_timeout, +#ifdef CONFIG_BNXT_SRIOV + .ndo_get_vf_config = bnxt_get_vf_config, + .ndo_set_vf_mac = bnxt_set_vf_mac, + .ndo_set_vf_vlan = bnxt_set_vf_vlan, + .ndo_set_vf_rate = bnxt_set_vf_bw, + .ndo_set_vf_link_state = bnxt_set_vf_link_state, + .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bnxt_poll_controller, +#endif + .ndo_setup_tc = bnxt_setup_tc, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = bnxt_rx_flow_steer, +#endif + .ndo_add_vxlan_port = bnxt_add_vxlan_port, + .ndo_del_vxlan_port = bnxt_del_vxlan_port, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = bnxt_busy_poll, +#endif +}; + +static void bnxt_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(dev); + + if (BNXT_PF(bp)) + bnxt_sriov_disable(bp); + + unregister_netdev(dev); + cancel_work_sync(&bp->sp_task); + bp->sp_event = 0; + + bnxt_free_hwrm_resources(bp); + pci_iounmap(pdev, bp->bar2); + pci_iounmap(pdev, bp->bar1); + pci_iounmap(pdev, bp->bar0); + free_netdev(dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static int bnxt_probe_phy(struct bnxt *bp) +{ + int rc = 0; + struct bnxt_link_info *link_info = &bp->link_info; + char phy_ver[PHY_VER_STR_LEN]; + + rc = bnxt_update_link(bp, false); + if (rc) { + netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", + rc); + return rc; + } + + /*initialize the ethool setting copy with NVM settings */ + if (BNXT_AUTO_MODE(link_info->auto_mode)) + link_info->autoneg |= BNXT_AUTONEG_SPEED; + + if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { + if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH) + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + link_info->req_flow_ctrl = link_info->auto_pause_setting; + } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { + link_info->req_flow_ctrl = link_info->force_pause_setting; + } + link_info->req_duplex = link_info->duplex_setting; + if (link_info->autoneg & BNXT_AUTONEG_SPEED) + link_info->req_link_speed = link_info->auto_link_speed; + else + link_info->req_link_speed = link_info->force_link_speed; + link_info->advertising = link_info->auto_link_speeds; + snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d", + link_info->phy_ver[0], + link_info->phy_ver[1], + link_info->phy_ver[2]); + strcat(bp->fw_ver_str, phy_ver); + return rc; +} + +static int bnxt_get_max_irq(struct pci_dev *pdev) +{ + u16 ctrl; + + if (!pdev->msix_cap) + return 1; + + pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; +} + +void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx) +{ + int max_rings = 0; + + if (BNXT_PF(bp)) { + *max_tx = bp->pf.max_pf_tx_rings; + *max_rx = bp->pf.max_pf_rx_rings; + max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); + max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs); + } else { +#ifdef CONFIG_BNXT_SRIOV + *max_tx = bp->vf.max_tx_rings; + *max_rx = bp->vf.max_rx_rings; + max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); + max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs); +#endif + } + if (bp->flags & BNXT_FLAG_AGG_RINGS) + *max_rx >>= 1; + + *max_rx = min_t(int, *max_rx, max_rings); + *max_tx = min_t(int, *max_tx, max_rings); +} + +static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int version_printed; + struct net_device *dev; + struct bnxt *bp; + int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings; + + if (version_printed++ == 0) + pr_info("%s", version); + + max_irqs = bnxt_get_max_irq(pdev); + dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); + if (!dev) + return -ENOMEM; + + bp = netdev_priv(dev); + + if (bnxt_vf_pciid(ent->driver_data)) + bp->flags |= BNXT_FLAG_VF; + + if (pdev->msix_cap) { + bp->flags |= BNXT_FLAG_MSIX_CAP; + if (BNXT_PF(bp)) + bp->flags |= BNXT_FLAG_RFS; + } + + rc = bnxt_init_board(pdev, dev); + if (rc < 0) + goto init_err_free; + + dev->netdev_ops = &bnxt_netdev_ops; + dev->watchdog_timeo = BNXT_TX_TIMEOUT; + dev->ethtool_ops = &bnxt_ethtool_ops; + + pci_set_drvdata(pdev, dev); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | + NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO; + + if (bp->flags & BNXT_FLAG_RFS) + dev->hw_features |= NETIF_F_NTUPLE; + + dev->hw_enc_features = + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | + NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; + dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + dev->features |= dev->hw_features | NETIF_F_HIGHDMA; + dev->priv_flags |= IFF_UNICAST_FLT; + +#ifdef CONFIG_BNXT_SRIOV + init_waitqueue_head(&bp->sriov_cfg_wait); +#endif + rc = bnxt_alloc_hwrm_resources(bp); + if (rc) + goto init_err; + + mutex_init(&bp->hwrm_cmd_lock); + bnxt_hwrm_ver_get(bp); + + rc = bnxt_hwrm_func_drv_rgtr(bp); + if (rc) + goto init_err; + + /* Get the MAX capabilities for this function */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", + rc); + rc = -1; + goto init_err; + } + + rc = bnxt_hwrm_queue_qportcfg(bp); + if (rc) { + netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", + rc); + rc = -1; + goto init_err; + } + + bnxt_set_tpa_flags(bp); + bnxt_set_ring_params(bp); + dflt_rings = netif_get_num_default_rss_queues(); + if (BNXT_PF(bp)) { + memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); + bp->pf.max_irqs = max_irqs; + } else { +#if defined(CONFIG_BNXT_SRIOV) + memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); + bp->vf.max_irqs = max_irqs; +#endif + } + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); + bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); + bp->num_stat_ctxs = bp->cp_nr_rings; + + if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) + bp->flags |= BNXT_FLAG_STRIP_VLAN; + + rc = bnxt_probe_phy(bp); + if (rc) + goto init_err; + + rc = register_netdev(dev); + if (rc) + goto init_err; + + netdev_info(dev, "%s found at mem %lx, node addr %pM\n", + board_info[ent->driver_data].name, + (long)pci_resource_start(pdev, 0), dev->dev_addr); + + return 0; + +init_err: + pci_iounmap(pdev, bp->bar0); + pci_release_regions(pdev); + pci_disable_device(pdev); + +init_err_free: + free_netdev(dev); + return rc; +} + +static struct pci_driver bnxt_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = bnxt_pci_tbl, + .probe = bnxt_init_one, + .remove = bnxt_remove_one, +#if defined(CONFIG_BNXT_SRIOV) + .sriov_configure = bnxt_sriov_configure, +#endif +}; + +module_pci_driver(bnxt_pci_driver); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h new file mode 100644 index 000000000000..5ee3cc28a421 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -0,0 +1,1081 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_H +#define BNXT_H + +#define DRV_MODULE_NAME "bnxt_en" +#define DRV_MODULE_VERSION "0.1.24" + +#define DRV_VER_MAJ 0 +#define DRV_VER_MIN 1 +#define DRV_VER_UPD 24 + +struct tx_bd { + __le32 tx_bd_len_flags_type; + #define TX_BD_TYPE (0x3f << 0) + #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0) + #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0) + #define TX_BD_FLAGS_PACKET_END (1 << 6) + #define TX_BD_FLAGS_NO_CMPL (1 << 7) + #define TX_BD_FLAGS_BD_CNT (0x1f << 8) + #define TX_BD_FLAGS_BD_CNT_SHIFT 8 + #define TX_BD_FLAGS_LHINT (3 << 13) + #define TX_BD_FLAGS_LHINT_SHIFT 13 + #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13) + #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13) + #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13) + #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13) + #define TX_BD_FLAGS_COAL_NOW (1 << 15) + #define TX_BD_LEN (0xffff << 16) + #define TX_BD_LEN_SHIFT 16 + + u32 tx_bd_opaque; + __le64 tx_bd_haddr; +} __packed; + +struct tx_bd_ext { + __le32 tx_bd_hsize_lflags; + #define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0) + #define TX_BD_FLAGS_IP_CKSUM (1 << 1) + #define TX_BD_FLAGS_NO_CRC (1 << 2) + #define TX_BD_FLAGS_STAMP (1 << 3) + #define TX_BD_FLAGS_T_IP_CHKSUM (1 << 4) + #define TX_BD_FLAGS_LSO (1 << 5) + #define TX_BD_FLAGS_IPID_FMT (1 << 6) + #define TX_BD_FLAGS_T_IPID (1 << 7) + #define TX_BD_HSIZE (0xff << 16) + #define TX_BD_HSIZE_SHIFT 16 + + __le32 tx_bd_mss; + __le32 tx_bd_cfa_action; + #define TX_BD_CFA_ACTION (0xffff << 16) + #define TX_BD_CFA_ACTION_SHIFT 16 + + __le32 tx_bd_cfa_meta; + #define TX_BD_CFA_META_MASK 0xfffffff + #define TX_BD_CFA_META_VID_MASK 0xfff + #define TX_BD_CFA_META_PRI_MASK (0xf << 12) + #define TX_BD_CFA_META_PRI_SHIFT 12 + #define TX_BD_CFA_META_TPID_MASK (3 << 16) + #define TX_BD_CFA_META_TPID_SHIFT 16 + #define TX_BD_CFA_META_KEY (0xf << 28) + #define TX_BD_CFA_META_KEY_SHIFT 28 + #define TX_BD_CFA_META_KEY_VLAN (1 << 28) +}; + +struct rx_bd { + __le32 rx_bd_len_flags_type; + #define RX_BD_TYPE (0x3f << 0) + #define RX_BD_TYPE_RX_PACKET_BD 0x4 + #define RX_BD_TYPE_RX_BUFFER_BD 0x5 + #define RX_BD_TYPE_RX_AGG_BD 0x6 + #define RX_BD_TYPE_16B_BD_SIZE (0 << 4) + #define RX_BD_TYPE_32B_BD_SIZE (1 << 4) + #define RX_BD_TYPE_48B_BD_SIZE (2 << 4) + #define RX_BD_TYPE_64B_BD_SIZE (3 << 4) + #define RX_BD_FLAGS_SOP (1 << 6) + #define RX_BD_FLAGS_EOP (1 << 7) + #define RX_BD_FLAGS_BUFFERS (3 << 8) + #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8) + #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8) + #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8) + #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8) + #define RX_BD_LEN (0xffff << 16) + #define RX_BD_LEN_SHIFT 16 + + u32 rx_bd_opaque; + __le64 rx_bd_haddr; +}; + +struct tx_cmp { + __le32 tx_cmp_flags_type; + #define CMP_TYPE (0x3f << 0) + #define CMP_TYPE_TX_L2_CMP 0 + #define CMP_TYPE_RX_L2_CMP 17 + #define CMP_TYPE_RX_AGG_CMP 18 + #define CMP_TYPE_RX_L2_TPA_START_CMP 19 + #define CMP_TYPE_RX_L2_TPA_END_CMP 21 + #define CMP_TYPE_STATUS_CMP 32 + #define CMP_TYPE_REMOTE_DRIVER_REQ 34 + #define CMP_TYPE_REMOTE_DRIVER_RESP 36 + #define CMP_TYPE_ERROR_STATUS 48 + #define CMPL_BASE_TYPE_STAT_EJECT (0x1aUL << 0) + #define CMPL_BASE_TYPE_HWRM_DONE (0x20UL << 0) + #define CMPL_BASE_TYPE_HWRM_FWD_REQ (0x22UL << 0) + #define CMPL_BASE_TYPE_HWRM_FWD_RESP (0x24UL << 0) + #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + + #define TX_CMP_FLAGS_ERROR (1 << 6) + #define TX_CMP_FLAGS_PUSH (1 << 7) + + u32 tx_cmp_opaque; + __le32 tx_cmp_errors_v; + #define TX_CMP_V (1 << 0) + #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1) + #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0 + #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2 + #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4 + #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5 + #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4) + #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5) + #define TX_CMP_ERRORS_DMA_ERROR (1 << 6) + #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7) + + __le32 tx_cmp_unsed_3; +}; + +struct rx_cmp { + __le32 rx_cmp_len_flags_type; + #define RX_CMP_CMP_TYPE (0x3f << 0) + #define RX_CMP_FLAGS_ERROR (1 << 6) + #define RX_CMP_FLAGS_PLACEMENT (7 << 7) + #define RX_CMP_FLAGS_RSS_VALID (1 << 10) + #define RX_CMP_FLAGS_UNUSED (1 << 11) + #define RX_CMP_FLAGS_ITYPES_SHIFT 12 + #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12) + #define RX_CMP_FLAGS_ITYPE_IP (1 << 12) + #define RX_CMP_FLAGS_ITYPE_TCP (2 << 12) + #define RX_CMP_FLAGS_ITYPE_UDP (3 << 12) + #define RX_CMP_FLAGS_ITYPE_FCOE (4 << 12) + #define RX_CMP_FLAGS_ITYPE_ROCE (5 << 12) + #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS (8 << 12) + #define RX_CMP_FLAGS_ITYPE_PTP_W_TS (9 << 12) + #define RX_CMP_LEN (0xffff << 16) + #define RX_CMP_LEN_SHIFT 16 + + u32 rx_cmp_opaque; + __le32 rx_cmp_misc_v1; + #define RX_CMP_V1 (1 << 0) + #define RX_CMP_AGG_BUFS (0x1f << 1) + #define RX_CMP_AGG_BUFS_SHIFT 1 + #define RX_CMP_RSS_HASH_TYPE (0x7f << 9) + #define RX_CMP_RSS_HASH_TYPE_SHIFT 9 + #define RX_CMP_PAYLOAD_OFFSET (0xff << 16) + #define RX_CMP_PAYLOAD_OFFSET_SHIFT 16 + + __le32 rx_cmp_rss_hash; +}; + +#define RX_CMP_HASH_VALID(rxcmp) \ + ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID)) + +#define RX_CMP_HASH_TYPE(rxcmp) \ + ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\ + RX_CMP_RSS_HASH_TYPE_SHIFT) + +struct rx_cmp_ext { + __le32 rx_cmp_flags2; + #define RX_CMP_FLAGS2_IP_CS_CALC 0x1 + #define RX_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) + #define RX_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) + #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) + #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) + __le32 rx_cmp_meta_data; + #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff + #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000 + #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16 + __le32 rx_cmp_cfa_code_errors_v2; + #define RX_CMP_V (1 << 0) + #define RX_CMPL_ERRORS_MASK (0x7fff << 1) + #define RX_CMPL_ERRORS_SFT 1 + #define RX_CMPL_ERRORS_BUFFER_ERROR_MASK (0x7 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) + #define RX_CMPL_ERRORS_IP_CS_ERROR (0x1 << 4) + #define RX_CMPL_ERRORS_L4_CS_ERROR (0x1 << 5) + #define RX_CMPL_ERRORS_T_IP_CS_ERROR (0x1 << 6) + #define RX_CMPL_ERRORS_T_L4_CS_ERROR (0x1 << 7) + #define RX_CMPL_ERRORS_CRC_ERROR (0x1 << 8) + #define RX_CMPL_ERRORS_T_PKT_ERROR_MASK (0x7 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6 << 9) + #define RX_CMPL_ERRORS_PKT_ERROR_MASK (0xf << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8 << 12) + + #define RX_CMPL_CFA_CODE_MASK (0xffff << 16) + #define RX_CMPL_CFA_CODE_SFT 16 + + __le32 rx_cmp_unused3; +}; + +#define RX_CMP_L2_ERRORS \ + cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR) + +#define RX_CMP_L4_CS_BITS \ + (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC)) + +#define RX_CMP_L4_CS_ERR_BITS \ + (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR)) + +#define RX_CMP_L4_CS_OK(rxcmp1) \ + (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) && \ + !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)) + +#define RX_CMP_ENCAP(rxcmp1) \ + ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \ + RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3) + +struct rx_agg_cmp { + __le32 rx_agg_cmp_len_flags_type; + #define RX_AGG_CMP_TYPE (0x3f << 0) + #define RX_AGG_CMP_LEN (0xffff << 16) + #define RX_AGG_CMP_LEN_SHIFT 16 + u32 rx_agg_cmp_opaque; + __le32 rx_agg_cmp_v; + #define RX_AGG_CMP_V (1 << 0) + __le32 rx_agg_cmp_unused; +}; + +struct rx_tpa_start_cmp { + __le32 rx_tpa_start_cmp_len_flags_type; + #define RX_TPA_START_CMP_TYPE (0x3f << 0) + #define RX_TPA_START_CMP_FLAGS (0x3ff << 6) + #define RX_TPA_START_CMP_FLAGS_SHIFT 6 + #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7 + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) + #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10) + #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12) + #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12 + #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12) + #define RX_TPA_START_CMP_LEN (0xffff << 16) + #define RX_TPA_START_CMP_LEN_SHIFT 16 + + u32 rx_tpa_start_cmp_opaque; + __le32 rx_tpa_start_cmp_misc_v1; + #define RX_TPA_START_CMP_V1 (0x1 << 0) + #define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9) + #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9 + #define RX_TPA_START_CMP_AGG_ID (0x7f << 25) + #define RX_TPA_START_CMP_AGG_ID_SHIFT 25 + + __le32 rx_tpa_start_cmp_rss_hash; +}; + +#define TPA_START_HASH_VALID(rx_tpa_start) \ + ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID)) + +#define TPA_START_HASH_TYPE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_RSS_HASH_TYPE) >> \ + RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) + +#define TPA_START_AGG_ID(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT) + +struct rx_tpa_start_cmp_ext { + __le32 rx_tpa_start_cmp_flags2; + #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0) + #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) + #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) + #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) + + __le32 rx_tpa_start_cmp_metadata; + __le32 rx_tpa_start_cmp_cfa_code_v2; + #define RX_TPA_START_CMP_V2 (0x1 << 0) + #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) + #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 + __le32 rx_tpa_start_cmp_unused5; +}; + +struct rx_tpa_end_cmp { + __le32 rx_tpa_end_cmp_len_flags_type; + #define RX_TPA_END_CMP_TYPE (0x3f << 0) + #define RX_TPA_END_CMP_FLAGS (0x3ff << 6) + #define RX_TPA_END_CMP_FLAGS_SHIFT 6 + #define RX_TPA_END_CMP_FLAGS_PLACEMENT (0x7 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT 7 + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) + #define RX_TPA_END_CMP_FLAGS_RSS_VALID (0x1 << 10) + #define RX_TPA_END_CMP_FLAGS_ITYPES (0xf << 12) + #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT 12 + #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP (0x2 << 12) + #define RX_TPA_END_CMP_LEN (0xffff << 16) + #define RX_TPA_END_CMP_LEN_SHIFT 16 + + u32 rx_tpa_end_cmp_opaque; + __le32 rx_tpa_end_cmp_misc_v1; + #define RX_TPA_END_CMP_V1 (0x1 << 0) + #define RX_TPA_END_CMP_AGG_BUFS (0x3f << 1) + #define RX_TPA_END_CMP_AGG_BUFS_SHIFT 1 + #define RX_TPA_END_CMP_TPA_SEGS (0xff << 8) + #define RX_TPA_END_CMP_TPA_SEGS_SHIFT 8 + #define RX_TPA_END_CMP_PAYLOAD_OFFSET (0xff << 16) + #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16 + #define RX_TPA_END_CMP_AGG_ID (0x7f << 25) + #define RX_TPA_END_CMP_AGG_ID_SHIFT 25 + + __le32 rx_tpa_end_cmp_tsdelta; + #define RX_TPA_END_GRO_TS (0x1 << 31) +}; + +#define TPA_END_AGG_ID(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT) + +#define TPA_END_TPA_SEGS(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT) + +#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO \ + cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO & \ + RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS) + +#define TPA_END_GRO(rx_tpa_end) \ + ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type & \ + RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO) + +#define TPA_END_GRO_TS(rx_tpa_end) \ + ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS)) + +struct rx_tpa_end_cmp_ext { + __le32 rx_tpa_end_cmp_dup_acks; + #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0) + + __le32 rx_tpa_end_cmp_seg_len; + #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0) + + __le32 rx_tpa_end_cmp_errors_v2; + #define RX_TPA_END_CMP_V2 (0x1 << 0) + #define RX_TPA_END_CMP_ERRORS (0x7fff << 1) + #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 + + u32 rx_tpa_end_cmp_start_opaque; +}; + +#define DB_IDX_MASK 0xffffff +#define DB_IDX_VALID (0x1 << 26) +#define DB_IRQ_DIS (0x1 << 27) +#define DB_KEY_TX (0x0 << 28) +#define DB_KEY_RX (0x1 << 28) +#define DB_KEY_CP (0x2 << 28) +#define DB_KEY_ST (0x3 << 28) +#define DB_KEY_TX_PUSH (0x4 << 28) +#define DB_LONG_TX_PUSH (0x2 << 24) + +#define INVALID_HW_RING_ID ((u16)-1) + +#define BNXT_RSS_HASH_TYPE_FLAG_IPV4 0x01 +#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 0x02 +#define BNXT_RSS_HASH_TYPE_FLAG_IPV6 0x04 +#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6 0x08 + +/* The hardware supports certain page sizes. Use the supported page sizes + * to allocate the rings. + */ +#if (PAGE_SHIFT < 12) +#define BNXT_PAGE_SHIFT 12 +#elif (PAGE_SHIFT <= 13) +#define BNXT_PAGE_SHIFT PAGE_SHIFT +#elif (PAGE_SHIFT < 16) +#define BNXT_PAGE_SHIFT 13 +#else +#define BNXT_PAGE_SHIFT 16 +#endif + +#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT) + +#define BNXT_MIN_PKT_SIZE 45 + +#define BNXT_NUM_TESTS(bp) 0 + +#define BNXT_DEFAULT_RX_RING_SIZE 1023 +#define BNXT_DEFAULT_TX_RING_SIZE 512 + +#define MAX_TPA 64 + +#define MAX_RX_PAGES 8 +#define MAX_RX_AGG_PAGES 32 +#define MAX_TX_PAGES 8 +#define MAX_CP_PAGES 64 + +#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd)) +#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd)) +#define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp)) + +#define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT) +#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT) + +#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT) + +#define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT) +#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT) + +#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT) + +#define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1) +#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1) +#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1) + +#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) +#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1)) + +#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) +#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1)) + +#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) +#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1)) + +#define TX_CMP_VALID(txcmp, raw_cons) \ + (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) == \ + !((raw_cons) & bp->cp_bit)) + +#define RX_CMP_VALID(rxcmp1, raw_cons) \ + (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\ + !((raw_cons) & bp->cp_bit)) + +#define RX_AGG_CMP_VALID(agg, raw_cons) \ + (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \ + !((raw_cons) & bp->cp_bit)) + +#define TX_CMP_TYPE(txcmp) \ + (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE) + +#define RX_CMP_TYPE(rxcmp) \ + (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE) + +#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask) + +#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask) + +#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask) + +#define ADV_RAW_CMP(idx, n) ((idx) + (n)) +#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1) +#define RING_CMP(idx) ((idx) & bp->cp_ring_mask) +#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) + +#define HWRM_CMD_TIMEOUT 500 +#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) +#define HWRM_RESP_ERR_CODE_MASK 0xffff +#define HWRM_RESP_LEN_MASK 0xffff0000 +#define HWRM_RESP_LEN_SFT 16 +#define HWRM_RESP_VALID_MASK 0xff000000 +#define BNXT_HWRM_REQ_MAX_SIZE 128 +#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ + BNXT_HWRM_REQ_MAX_SIZE) + +struct bnxt_sw_tx_bd { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + u8 is_gso; + u8 is_push; + unsigned short nr_frags; +}; + +struct bnxt_sw_rx_bd { + u8 *data; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +struct bnxt_sw_rx_agg_bd { + struct page *page; + dma_addr_t mapping; +}; + +struct bnxt_ring_struct { + int nr_pages; + int page_size; + void **pg_arr; + dma_addr_t *dma_arr; + + __le64 *pg_tbl; + dma_addr_t pg_tbl_map; + + int vmem_size; + void **vmem; + + u16 fw_ring_id; /* Ring id filled by Chimp FW */ + u8 queue_id; +}; + +struct tx_push_bd { + __le32 doorbell; + struct tx_bd txbd1; + struct tx_bd_ext txbd2; +}; + +struct bnxt_tx_ring_info { + u16 tx_prod; + u16 tx_cons; + void __iomem *tx_doorbell; + + struct tx_bd *tx_desc_ring[MAX_TX_PAGES]; + struct bnxt_sw_tx_bd *tx_buf_ring; + + dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; + + struct tx_push_bd *tx_push; + dma_addr_t tx_push_mapping; + +#define BNXT_DEV_STATE_CLOSING 0x1 + u32 dev_state; + + struct bnxt_ring_struct tx_ring_struct; +}; + +struct bnxt_tpa_info { + u8 *data; + dma_addr_t mapping; + u16 len; + unsigned short gso_type; + u32 flags2; + u32 metadata; + enum pkt_hash_types hash_type; + u32 rss_hash; +}; + +struct bnxt_rx_ring_info { + u16 rx_prod; + u16 rx_agg_prod; + u16 rx_sw_agg_prod; + void __iomem *rx_doorbell; + void __iomem *rx_agg_doorbell; + + struct rx_bd *rx_desc_ring[MAX_RX_PAGES]; + struct bnxt_sw_rx_bd *rx_buf_ring; + + struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES]; + struct bnxt_sw_rx_agg_bd *rx_agg_ring; + + unsigned long *rx_agg_bmap; + u16 rx_agg_bmap_size; + + dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; + dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; + + struct bnxt_tpa_info *rx_tpa; + + struct bnxt_ring_struct rx_ring_struct; + struct bnxt_ring_struct rx_agg_ring_struct; +}; + +struct bnxt_cp_ring_info { + u32 cp_raw_cons; + void __iomem *cp_doorbell; + + struct tx_cmp *cp_desc_ring[MAX_CP_PAGES]; + + dma_addr_t cp_desc_mapping[MAX_CP_PAGES]; + + struct ctx_hw_stats *hw_stats; + dma_addr_t hw_stats_map; + u32 hw_stats_ctx_id; + u64 rx_l4_csum_errors; + + struct bnxt_ring_struct cp_ring_struct; +}; + +struct bnxt_napi { + struct napi_struct napi; + struct bnxt *bp; + + int index; + struct bnxt_cp_ring_info cp_ring; + struct bnxt_rx_ring_info rx_ring; + struct bnxt_tx_ring_info tx_ring; + +#ifdef CONFIG_NET_RX_BUSY_POLL + atomic_t poll_state; +#endif +}; + +#ifdef CONFIG_NET_RX_BUSY_POLL +enum bnxt_poll_state_t { + BNXT_STATE_IDLE = 0, + BNXT_STATE_NAPI, + BNXT_STATE_POLL, + BNXT_STATE_DISABLE, +}; +#endif + +struct bnxt_irq { + irq_handler_t handler; + unsigned int vector; + u8 requested; + char name[IFNAMSIZ + 2]; +}; + +#define HWRM_RING_ALLOC_TX 0x1 +#define HWRM_RING_ALLOC_RX 0x2 +#define HWRM_RING_ALLOC_AGG 0x4 +#define HWRM_RING_ALLOC_CMPL 0x8 + +#define INVALID_STATS_CTX_ID -1 + +struct hwrm_cmd_req_hdr { +#define HWRM_CMPL_RING_MASK 0xffff0000 +#define HWRM_CMPL_RING_SFT 16 + __le32 cmpl_ring_req_type; +#define HWRM_SEQ_ID_MASK 0xffff +#define HWRM_SEQ_ID_INVALID -1 +#define HWRM_RESP_LEN_OFFSET 4 +#define HWRM_TARGET_FID_MASK 0xffff0000 +#define HWRM_TARGET_FID_SFT 16 + __le32 target_id_seq_id; + __le64 resp_addr; +}; + +struct bnxt_ring_grp_info { + u16 fw_stats_ctx; + u16 fw_grp_id; + u16 rx_fw_ring_id; + u16 agg_fw_ring_id; + u16 cp_fw_ring_id; +}; + +struct bnxt_vnic_info { + u16 fw_vnic_id; /* returned by Chimp during alloc */ + u16 fw_rss_cos_lb_ctx; + u16 fw_l2_ctx_id; +#define BNXT_MAX_UC_ADDRS 4 + __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS]; + /* index 0 always dev_addr */ + u16 uc_filter_count; + u8 *uc_list; + + u16 *fw_grp_ids; + u16 hash_type; + dma_addr_t rss_table_dma_addr; + __le16 *rss_table; + dma_addr_t rss_hash_key_dma_addr; + u64 *rss_hash_key; + u32 rx_mask; + + u8 *mc_list; + int mc_list_size; + int mc_list_count; + dma_addr_t mc_list_mapping; +#define BNXT_MAX_MC_ADDRS 16 + + u32 flags; +#define BNXT_VNIC_RSS_FLAG 1 +#define BNXT_VNIC_RFS_FLAG 2 +#define BNXT_VNIC_MCAST_FLAG 4 +#define BNXT_VNIC_UCAST_FLAG 8 +}; + +#if defined(CONFIG_BNXT_SRIOV) +struct bnxt_vf_info { + u16 fw_fid; + u8 mac_addr[ETH_ALEN]; + u16 max_rsscos_ctxs; + u16 max_cp_rings; + u16 max_tx_rings; + u16 max_rx_rings; + u16 max_l2_ctxs; + u16 max_irqs; + u16 max_vnics; + u16 max_stat_ctxs; + u16 vlan; + u32 flags; +#define BNXT_VF_QOS 0x1 +#define BNXT_VF_SPOOFCHK 0x2 +#define BNXT_VF_LINK_FORCED 0x4 +#define BNXT_VF_LINK_UP 0x8 + u32 func_flags; /* func cfg flags */ + u32 min_tx_rate; + u32 max_tx_rate; + void *hwrm_cmd_req_addr; + dma_addr_t hwrm_cmd_req_dma_addr; +}; +#endif + +struct bnxt_pf_info { +#define BNXT_FIRST_PF_FID 1 +#define BNXT_FIRST_VF_FID 128 + u32 fw_fid; + u8 port_id; + u8 mac_addr[ETH_ALEN]; + u16 max_rsscos_ctxs; + u16 max_cp_rings; + u16 max_tx_rings; /* HW assigned max tx rings for this PF */ + u16 max_pf_tx_rings; /* runtime max tx rings owned by PF */ + u16 max_rx_rings; /* HW assigned max rx rings for this PF */ + u16 max_pf_rx_rings; /* runtime max rx rings owned by PF */ + u16 max_irqs; + u16 max_l2_ctxs; + u16 max_vnics; + u16 max_stat_ctxs; + u32 first_vf_id; + u16 active_vfs; + u16 max_vfs; + u32 max_encap_records; + u32 max_decap_records; + u32 max_tx_em_flows; + u32 max_tx_wm_flows; + u32 max_rx_em_flows; + u32 max_rx_wm_flows; + unsigned long *vf_event_bmap; + u16 hwrm_cmd_req_pages; + void *hwrm_cmd_req_addr[4]; + dma_addr_t hwrm_cmd_req_dma_addr[4]; + struct bnxt_vf_info *vf; +}; + +struct bnxt_ntuple_filter { + struct hlist_node hash; + u8 src_mac_addr[ETH_ALEN]; + struct flow_keys fkeys; + __le64 filter_id; + u16 sw_id; + u16 rxq; + u32 flow_id; + unsigned long state; +#define BNXT_FLTR_VALID 0 +#define BNXT_FLTR_UPDATE 1 +}; + +#define BNXT_ALL_COPPER_ETHTOOL_SPEED \ + (ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | \ + ADVERTISED_10000baseT_Full) + +struct bnxt_link_info { + u8 media_type; + u8 transceiver; + u8 phy_addr; + u8 phy_link_status; +#define BNXT_LINK_NO_LINK PORT_PHY_QCFG_RESP_LINK_NO_LINK +#define BNXT_LINK_SIGNAL PORT_PHY_QCFG_RESP_LINK_SIGNAL +#define BNXT_LINK_LINK PORT_PHY_QCFG_RESP_LINK_LINK + u8 wire_speed; + u8 loop_back; + u8 link_up; + u8 duplex; +#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF +#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL + u8 pause; +#define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX +#define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX +#define BNXT_LINK_PAUSE_BOTH (PORT_PHY_QCFG_RESP_PAUSE_RX | \ + PORT_PHY_QCFG_RESP_PAUSE_TX) + u8 auto_pause_setting; + u8 force_pause_setting; + u8 duplex_setting; + u8 auto_mode; +#define BNXT_AUTO_MODE(mode) ((mode) > BNXT_LINK_AUTO_NONE && \ + (mode) <= BNXT_LINK_AUTO_MSK) +#define BNXT_LINK_AUTO_NONE PORT_PHY_QCFG_RESP_AUTO_MODE_NONE +#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS +#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED +#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW +#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK +#define PHY_VER_LEN 3 + u8 phy_ver[PHY_VER_LEN]; + u16 link_speed; +#define BNXT_LINK_SPEED_100MB PORT_PHY_QCFG_RESP_LINK_SPEED_100MB +#define BNXT_LINK_SPEED_1GB PORT_PHY_QCFG_RESP_LINK_SPEED_1GB +#define BNXT_LINK_SPEED_2GB PORT_PHY_QCFG_RESP_LINK_SPEED_2GB +#define BNXT_LINK_SPEED_2_5GB PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB +#define BNXT_LINK_SPEED_10GB PORT_PHY_QCFG_RESP_LINK_SPEED_10GB +#define BNXT_LINK_SPEED_20GB PORT_PHY_QCFG_RESP_LINK_SPEED_20GB +#define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB +#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB +#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB + u16 support_speeds; + u16 auto_link_speeds; +#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB +#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB +#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB +#define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB +#define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB +#define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB +#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB +#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB +#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB + u16 auto_link_speed; + u16 force_link_speed; + u32 preemphasis; + + /* copy of requested setting from ethtool cmd */ + u8 autoneg; +#define BNXT_AUTONEG_SPEED 1 +#define BNXT_AUTONEG_FLOW_CTRL 2 + u8 req_duplex; + u8 req_flow_ctrl; + u16 req_link_speed; + u32 advertising; + bool force_link_chng; + /* a copy of phy_qcfg output used to report link + * info to VF + */ + struct hwrm_port_phy_qcfg_output phy_qcfg_resp; +}; + +#define BNXT_MAX_QUEUE 8 + +struct bnxt_queue_info { + u8 queue_id; + u8 queue_profile; +}; + +struct bnxt { + void __iomem *bar0; + void __iomem *bar1; + void __iomem *bar2; + + u32 reg_base; + + struct net_device *dev; + struct pci_dev *pdev; + + atomic_t intr_sem; + + u32 flags; + #define BNXT_FLAG_DCB_ENABLED 0x1 + #define BNXT_FLAG_VF 0x2 + #define BNXT_FLAG_LRO 0x4 + #define BNXT_FLAG_GRO 0x8 + #define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO) + #define BNXT_FLAG_JUMBO 0x10 + #define BNXT_FLAG_STRIP_VLAN 0x20 + #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \ + BNXT_FLAG_LRO) + #define BNXT_FLAG_USING_MSIX 0x40 + #define BNXT_FLAG_MSIX_CAP 0x80 + #define BNXT_FLAG_RFS 0x100 + #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ + BNXT_FLAG_RFS | \ + BNXT_FLAG_STRIP_VLAN) + +#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) +#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) + + struct bnxt_napi **bnapi; + + u32 rx_buf_size; + u32 rx_buf_use_size; /* useable size */ + u32 rx_ring_size; + u32 rx_agg_ring_size; + u32 rx_copy_thresh; + u32 rx_ring_mask; + u32 rx_agg_ring_mask; + int rx_nr_pages; + int rx_agg_nr_pages; + int rx_nr_rings; + int rsscos_nr_ctxs; + + u32 tx_ring_size; + u32 tx_ring_mask; + int tx_nr_pages; + int tx_nr_rings; + int tx_nr_rings_per_tc; + + int tx_wake_thresh; + int tx_push_thresh; + int tx_push_size; + + u32 cp_ring_size; + u32 cp_ring_mask; + u32 cp_bit; + int cp_nr_pages; + int cp_nr_rings; + + int num_stat_ctxs; + struct bnxt_ring_grp_info *grp_info; + struct bnxt_vnic_info *vnic_info; + int nr_vnics; + + u8 max_tc; + struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; + + unsigned int current_interval; +#define BNXT_TIMER_INTERVAL (HZ / 2) + + struct timer_list timer; + + int state; +#define BNXT_STATE_CLOSED 0 +#define BNXT_STATE_OPEN 1 + + struct bnxt_irq *irq_tbl; + u8 mac_addr[ETH_ALEN]; + + u32 msg_enable; + + u16 hwrm_cmd_seq; + u32 hwrm_intr_seq_id; + void *hwrm_cmd_resp_addr; + dma_addr_t hwrm_cmd_resp_dma_addr; + void *hwrm_dbg_resp_addr; + dma_addr_t hwrm_dbg_resp_dma_addr; +#define HWRM_DBG_REG_BUF_SIZE 128 + struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ + struct hwrm_ver_get_output ver_resp; +#define FW_VER_STR_LEN 32 +#define BC_HWRM_STR_LEN 21 +#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN) + char fw_ver_str[FW_VER_STR_LEN]; + __be16 vxlan_port; + u8 vxlan_port_cnt; + __le16 vxlan_fw_dst_port_id; + u8 nge_port_cnt; + __le16 nge_fw_dst_port_id; + u16 coal_ticks; + u16 coal_ticks_irq; + u16 coal_bufs; + u16 coal_bufs_irq; + +#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) +#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25) + + struct work_struct sp_task; + unsigned long sp_event; +#define BNXT_RX_MASK_SP_EVENT 0 +#define BNXT_RX_NTP_FLTR_SP_EVENT 1 +#define BNXT_LINK_CHNG_SP_EVENT 2 +#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 4 +#define BNXT_VXLAN_ADD_PORT_SP_EVENT 8 +#define BNXT_VXLAN_DEL_PORT_SP_EVENT 16 +#define BNXT_RESET_TASK_SP_EVENT 32 +#define BNXT_RST_RING_SP_EVENT 64 + + struct bnxt_pf_info pf; +#ifdef CONFIG_BNXT_SRIOV + int nr_vfs; + struct bnxt_vf_info vf; + wait_queue_head_t sriov_cfg_wait; + bool sriov_cfg; +#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000) +#endif + +#define BNXT_NTP_FLTR_MAX_FLTR 4096 +#define BNXT_NTP_FLTR_HASH_SIZE 512 +#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1) + struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE]; + spinlock_t ntp_fltr_lock; /* for hash table add, del */ + + unsigned long *ntp_fltr_bmap; + int ntp_fltr_count; + + struct bnxt_link_info link_info; +}; + +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +/* called from the NAPI poll routine to get ownership of a bnapi */ +static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) +{ + int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_NAPI); + + return rc == BNXT_STATE_IDLE; +} + +static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +/* called from the busy poll routine to get ownership of a bnapi */ +static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) +{ + int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_POLL); + + return rc == BNXT_STATE_IDLE; +} + +static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) +{ + return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL; +} + +static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) +{ + int old; + + while (1) { + old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_DISABLE); + if (old == BNXT_STATE_IDLE) + break; + usleep_range(500, 5000); + } +} + +#else + +static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) +{ + return true; +} + +static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) +{ + return false; +} + +static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) +{ + return false; +} + +static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) +{ +} + +#endif + +void bnxt_set_ring_params(struct bnxt *); +void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); +int _hwrm_send_message(struct bnxt *, void *, u32, int); +int hwrm_send_message(struct bnxt *, void *, u32, int); +int bnxt_hwrm_set_coal(struct bnxt *); +int bnxt_hwrm_set_pause(struct bnxt *); +int bnxt_hwrm_set_link_setting(struct bnxt *, bool); +int bnxt_open_nic(struct bnxt *, bool, bool); +int bnxt_close_nic(struct bnxt *, bool, bool); +void bnxt_get_max_rings(struct bnxt *, int *, int *); +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c new file mode 100644 index 000000000000..45bd628eaf3a --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -0,0 +1,1149 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/ethtool.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/etherdevice.h> +#include <linux/crc32.h> +#include <linux/firmware.h> +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_ethtool.h" +#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ +#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ +#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) + +static u32 bnxt_get_msglevel(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + return bp->msg_enable; +} + +static void bnxt_set_msglevel(struct net_device *dev, u32 value) +{ + struct bnxt *bp = netdev_priv(dev); + + bp->msg_enable = value; +} + +static int bnxt_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct bnxt *bp = netdev_priv(dev); + + memset(coal, 0, sizeof(*coal)); + + coal->rx_coalesce_usecs = + max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1); + coal->rx_max_coalesced_frames = bp->coal_bufs / 2; + coal->rx_coalesce_usecs_irq = + max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1); + coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2; + + return 0; +} + +static int bnxt_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs); + bp->coal_bufs = coal->rx_max_coalesced_frames * 2; + bp->coal_ticks_irq = + BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq); + bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2; + + if (netif_running(dev)) + rc = bnxt_hwrm_set_coal(bp); + + return rc; +} + +#define BNXT_NUM_STATS 21 + +static int bnxt_get_sset_count(struct net_device *dev, int sset) +{ + struct bnxt *bp = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return BNXT_NUM_STATS * bp->cp_nr_rings; + default: + return -EOPNOTSUPP; + } +} + +static void bnxt_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *buf) +{ + u32 i, j = 0; + struct bnxt *bp = netdev_priv(dev); + u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings; + u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; + + memset(buf, 0, buf_size); + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + __le64 *hw_stats = (__le64 *)cpr->hw_stats; + int k; + + for (k = 0; k < stat_fields; j++, k++) + buf[j] = le64_to_cpu(hw_stats[k]); + buf[j++] = cpr->rx_l4_csum_errors; + } +} + +static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + struct bnxt *bp = netdev_priv(dev); + u32 i; + + switch (stringset) { + /* The number of strings must match BNXT_NUM_STATS defined above. */ + case ETH_SS_STATS: + for (i = 0; i < bp->cp_nr_rings; i++) { + sprintf(buf, "[%d]: rx_ucast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_mcast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_bcast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_discards", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_drops", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_ucast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_mcast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_bcast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_ucast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_mcast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_bcast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_discards", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_drops", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_ucast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_mcast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_bcast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tpa_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tpa_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tpa_events", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tpa_aborts", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_l4_csum_errors", i); + buf += ETH_GSTRING_LEN; + } + break; + default: + netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", + stringset); + break; + } +} + +static void bnxt_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bnxt *bp = netdev_priv(dev); + + ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; + ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; + ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; + + ering->rx_pending = bp->rx_ring_size; + ering->rx_jumbo_pending = bp->rx_agg_ring_size; + ering->tx_pending = bp->tx_ring_size; +} + +static int bnxt_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bnxt *bp = netdev_priv(dev); + + if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || + (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || + (ering->tx_pending <= MAX_SKB_FRAGS)) + return -EINVAL; + + if (netif_running(dev)) + bnxt_close_nic(bp, false, false); + + bp->rx_ring_size = ering->rx_pending; + bp->tx_ring_size = ering->tx_pending; + bnxt_set_ring_params(bp); + + if (netif_running(dev)) + return bnxt_open_nic(bp, false, false); + + return 0; +} + +static void bnxt_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct bnxt *bp = netdev_priv(dev); + int max_rx_rings, max_tx_rings, tcs; + + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + tcs = netdev_get_num_tc(dev); + if (tcs > 1) + max_tx_rings /= tcs; + + channel->max_rx = max_rx_rings; + channel->max_tx = max_tx_rings; + channel->max_other = 0; + channel->max_combined = 0; + channel->rx_count = bp->rx_nr_rings; + channel->tx_count = bp->tx_nr_rings_per_tc; +} + +static int bnxt_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct bnxt *bp = netdev_priv(dev); + int max_rx_rings, max_tx_rings, tcs; + u32 rc = 0; + + if (channel->other_count || channel->combined_count || + !channel->rx_count || !channel->tx_count) + return -EINVAL; + + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + tcs = netdev_get_num_tc(dev); + if (tcs > 1) + max_tx_rings /= tcs; + + if (channel->rx_count > max_rx_rings || + channel->tx_count > max_tx_rings) + return -EINVAL; + + if (netif_running(dev)) { + if (BNXT_PF(bp)) { + /* TODO CHIMP_FW: Send message to all VF's + * before PF unload + */ + } + rc = bnxt_close_nic(bp, true, false); + if (rc) { + netdev_err(bp->dev, "Set channel failure rc :%x\n", + rc); + return rc; + } + } + + bp->rx_nr_rings = channel->rx_count; + bp->tx_nr_rings_per_tc = channel->tx_count; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + if (tcs > 1) + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; + bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); + bp->num_stat_ctxs = bp->cp_nr_rings; + + if (netif_running(dev)) { + rc = bnxt_open_nic(bp, true, false); + if ((!rc) && BNXT_PF(bp)) { + /* TODO CHIMP_FW: Send message to all VF's + * to renable + */ + } + } + + return rc; +} + +#ifdef CONFIG_RFS_ACCEL +static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + int i, j = 0; + + cmd->data = bp->ntp_fltr_count; + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct bnxt_ntuple_filter *fltr; + + head = &bp->ntp_fltr_hash_tbl[i]; + rcu_read_lock(); + hlist_for_each_entry_rcu(fltr, head, hash) { + if (j == cmd->rule_cnt) + break; + rule_locs[j++] = fltr->sw_id; + } + rcu_read_unlock(); + if (j == cmd->rule_cnt) + break; + } + cmd->rule_cnt = j; + return 0; +} + +static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fs = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct bnxt_ntuple_filter *fltr; + struct flow_keys *fkeys; + int i, rc = -EINVAL; + + if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR) + return rc; + + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + + head = &bp->ntp_fltr_hash_tbl[i]; + rcu_read_lock(); + hlist_for_each_entry_rcu(fltr, head, hash) { + if (fltr->sw_id == fs->location) + goto fltr_found; + } + rcu_read_unlock(); + } + return rc; + +fltr_found: + fkeys = &fltr->fkeys; + if (fkeys->basic.ip_proto == IPPROTO_TCP) + fs->flow_type = TCP_V4_FLOW; + else if (fkeys->basic.ip_proto == IPPROTO_UDP) + fs->flow_type = UDP_V4_FLOW; + else + goto fltr_err; + + fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; + fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); + + fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; + fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); + + fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); + + fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + + fs->ring_cookie = fltr->rxq; + rc = 0; + +fltr_err: + rcu_read_unlock(); + + return rc; +} + +static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = bp->rx_nr_rings; + break; + + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bp->ntp_fltr_count; + cmd->data = BNXT_NTP_FLTR_MAX_FLTR; + break; + + case ETHTOOL_GRXCLSRLALL: + rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); + break; + + case ETHTOOL_GRXCLSRULE: + rc = bnxt_grxclsrule(bp, cmd); + break; + + default: + rc = -EOPNOTSUPP; + break; + } + + return rc; +} +#endif + +static u32 bnxt_get_rxfh_indir_size(struct net_device *dev) +{ + return HW_HASH_INDEX_SIZE; +} + +static u32 bnxt_get_rxfh_key_size(struct net_device *dev) +{ + return HW_HASH_KEY_SIZE; +} + +static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + int i = 0; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (indir) + for (i = 0; i < HW_HASH_INDEX_SIZE; i++) + indir[i] = le16_to_cpu(vnic->rss_table[i]); + + if (key) + memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); + + return 0; +} + +static void bnxt_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct bnxt *bp = netdev_priv(dev); + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); + info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + info->testinfo_len = BNXT_NUM_TESTS(bp); + /* TODO CHIMP_FW: eeprom dump details */ + info->eedump_len = 0; + /* TODO CHIMP FW: reg dump details */ + info->regdump_len = 0; +} + +static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->support_speeds; + u32 speed_mask = 0; + + if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) + speed_mask |= SUPPORTED_100baseT_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) + speed_mask |= SUPPORTED_1000baseT_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) + speed_mask |= SUPPORTED_2500baseX_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) + speed_mask |= SUPPORTED_10000baseT_Full; + /* TODO: support 25GB, 50GB with different cable type */ + if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB) + speed_mask |= SUPPORTED_20000baseMLD2_Full | + SUPPORTED_20000baseKR2_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) + speed_mask |= SUPPORTED_40000baseKR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseLR4_Full; + + return speed_mask; +} + +static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->auto_link_speeds; + u32 speed_mask = 0; + + /* TODO: support 25GB, 40GB, 50GB with different cable type */ + /* set the advertised speeds */ + if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) + speed_mask |= ADVERTISED_100baseT_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) + speed_mask |= ADVERTISED_1000baseT_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) + speed_mask |= ADVERTISED_2500baseX_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) + speed_mask |= ADVERTISED_10000baseT_Full; + /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/ + if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB) + speed_mask |= ADVERTISED_20000baseMLD2_Full | + ADVERTISED_20000baseKR2_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) + speed_mask |= ADVERTISED_40000baseKR4_Full | + ADVERTISED_40000baseCR4_Full | + ADVERTISED_40000baseSR4_Full | + ADVERTISED_40000baseLR4_Full; + return speed_mask; +} + +u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) +{ + switch (fw_link_speed) { + case BNXT_LINK_SPEED_100MB: + return SPEED_100; + case BNXT_LINK_SPEED_1GB: + return SPEED_1000; + case BNXT_LINK_SPEED_2_5GB: + return SPEED_2500; + case BNXT_LINK_SPEED_10GB: + return SPEED_10000; + case BNXT_LINK_SPEED_20GB: + return SPEED_20000; + case BNXT_LINK_SPEED_25GB: + return SPEED_25000; + case BNXT_LINK_SPEED_40GB: + return SPEED_40000; + case BNXT_LINK_SPEED_50GB: + return SPEED_50000; + default: + return SPEED_UNKNOWN; + } +} + +static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + u16 ethtool_speed; + + cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); + + if (link_info->auto_link_speeds) + cmd->supported |= SUPPORTED_Autoneg; + + if (BNXT_AUTO_MODE(link_info->auto_mode)) { + cmd->advertising = + bnxt_fw_to_ethtool_advertised_spds(link_info); + cmd->advertising |= ADVERTISED_Autoneg; + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->autoneg = AUTONEG_DISABLE; + cmd->advertising = 0; + } + if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { + if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == + BNXT_LINK_PAUSE_BOTH) { + cmd->advertising |= ADVERTISED_Pause; + cmd->supported |= SUPPORTED_Pause; + } else { + cmd->advertising |= ADVERTISED_Asym_Pause; + cmd->supported |= SUPPORTED_Asym_Pause; + if (link_info->auto_pause_setting & + BNXT_LINK_PAUSE_RX) + cmd->advertising |= ADVERTISED_Pause; + } + } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { + if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) == + BNXT_LINK_PAUSE_BOTH) { + cmd->supported |= SUPPORTED_Pause; + } else { + cmd->supported |= SUPPORTED_Asym_Pause; + if (link_info->force_pause_setting & + BNXT_LINK_PAUSE_RX) + cmd->supported |= SUPPORTED_Pause; + } + } + + cmd->port = PORT_NONE; + if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + cmd->port = PORT_TP; + cmd->supported |= SUPPORTED_TP; + cmd->advertising |= ADVERTISED_TP; + } else { + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + + if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) + cmd->port = PORT_DA; + else if (link_info->media_type == + PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) + cmd->port = PORT_FIBRE; + } + + if (link_info->phy_link_status == BNXT_LINK_LINK) { + if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; + } else { + cmd->duplex = DUPLEX_UNKNOWN; + } + ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); + ethtool_cmd_speed_set(cmd, ethtool_speed); + if (link_info->transceiver == + PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL) + cmd->transceiver = XCVR_INTERNAL; + else + cmd->transceiver = XCVR_EXTERNAL; + cmd->phy_address = link_info->phy_addr; + + return 0; +} + +static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed) +{ + switch (ethtool_speed) { + case SPEED_100: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB; + case SPEED_1000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB; + case SPEED_2500: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB; + case SPEED_10000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB; + case SPEED_20000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB; + case SPEED_25000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB; + case SPEED_40000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB; + case SPEED_50000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB; + default: + netdev_err(dev, "unsupported speed!\n"); + break; + } + return 0; +} + +static u16 bnxt_get_fw_auto_link_speeds(u32 advertising) +{ + u16 fw_speed_mask = 0; + + /* only support autoneg at speed 100, 1000, and 10000 */ + if (advertising & (ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half)) { + fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; + } + if (advertising & (ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseT_Half)) { + fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; + } + if (advertising & ADVERTISED_10000baseT_Full) + fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; + + return fw_speed_mask; +} + +static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + int rc = 0; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + u32 speed, fw_advertising = 0; + bool set_pause = false; + + if (BNXT_VF(bp)) + return rc; + + if (cmd->autoneg == AUTONEG_ENABLE) { + if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + netdev_err(dev, "Media type doesn't support autoneg\n"); + rc = -EINVAL; + goto set_setting_exit; + } + if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED | + ADVERTISED_Autoneg | + ADVERTISED_TP | + ADVERTISED_Pause | + ADVERTISED_Asym_Pause)) { + netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", + cmd->advertising); + rc = -EINVAL; + goto set_setting_exit; + } + fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising); + if (fw_advertising & ~link_info->support_speeds) { + netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n", + cmd->advertising); + rc = -EINVAL; + goto set_setting_exit; + } + link_info->autoneg |= BNXT_AUTONEG_SPEED; + if (!fw_advertising) + link_info->advertising = link_info->support_speeds; + else + link_info->advertising = fw_advertising; + /* any change to autoneg will cause link change, therefore the + * driver should put back the original pause setting in autoneg + */ + set_pause = true; + } else { + /* TODO: currently don't support half duplex */ + if (cmd->duplex == DUPLEX_HALF) { + netdev_err(dev, "HALF DUPLEX is not supported!\n"); + rc = -EINVAL; + goto set_setting_exit; + } + /* If received a request for an unknown duplex, assume full*/ + if (cmd->duplex == DUPLEX_UNKNOWN) + cmd->duplex = DUPLEX_FULL; + speed = ethtool_cmd_speed(cmd); + link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); + link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; + link_info->autoneg &= ~BNXT_AUTONEG_SPEED; + link_info->advertising = 0; + } + + if (netif_running(dev)) + rc = bnxt_hwrm_set_link_setting(bp, set_pause); + +set_setting_exit: + return rc; +} + +static void bnxt_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + + if (BNXT_VF(bp)) + return; + epause->autoneg = !!(link_info->auto_pause_setting & + BNXT_LINK_PAUSE_BOTH); + epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0); + epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0); +} + +static int bnxt_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + int rc = 0; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + + if (BNXT_VF(bp)) + return rc; + + if (epause->autoneg) { + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; + } else { + /* when transition from auto pause to force pause, + * force a link change + */ + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + link_info->force_link_chng = true; + link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; + link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH; + } + if (epause->rx_pause) + link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; + else + link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX; + + if (epause->tx_pause) + link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; + else + link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX; + + if (netif_running(dev)) + rc = bnxt_hwrm_set_pause(bp); + return rc; +} + +static u32 bnxt_get_link(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + /* TODO: handle MF, VF, driver close case */ + return bp->link_info.link_up; +} + +static int bnxt_flash_nvram(struct net_device *dev, + u16 dir_type, + u16 dir_ordinal, + u16 dir_ext, + u16 dir_attr, + const u8 *data, + size_t data_len) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + struct hwrm_nvm_write_input req = {0}; + dma_addr_t dma_handle; + u8 *kmem; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); + + req.dir_type = cpu_to_le16(dir_type); + req.dir_ordinal = cpu_to_le16(dir_ordinal); + req.dir_ext = cpu_to_le16(dir_ext); + req.dir_attr = cpu_to_le16(dir_attr); + req.dir_data_length = cpu_to_le32(data_len); + + kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, + GFP_KERNEL); + if (!kmem) { + netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", + (unsigned)data_len); + return -ENOMEM; + } + memcpy(kmem, data, data_len); + req.host_src_addr = cpu_to_le64(dma_handle); + + rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); + dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); + + return rc; +} + +static int bnxt_flash_firmware(struct net_device *dev, + u16 dir_type, + const u8 *fw_data, + size_t fw_size) +{ + int rc = 0; + u16 code_type; + u32 stored_crc; + u32 calculated_crc; + struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; + + switch (dir_type) { + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + code_type = CODE_BOOT; + break; + default: + netdev_err(dev, "Unsupported directory entry type: %u\n", + dir_type); + return -EINVAL; + } + if (fw_size < sizeof(struct bnxt_fw_header)) { + netdev_err(dev, "Invalid firmware file size: %u\n", + (unsigned int)fw_size); + return -EINVAL; + } + if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { + netdev_err(dev, "Invalid firmware signature: %08X\n", + le32_to_cpu(header->signature)); + return -EINVAL; + } + if (header->code_type != code_type) { + netdev_err(dev, "Expected firmware type: %d, read: %d\n", + code_type, header->code_type); + return -EINVAL; + } + if (header->device != DEVICE_CUMULUS_FAMILY) { + netdev_err(dev, "Expected firmware device family %d, read: %d\n", + DEVICE_CUMULUS_FAMILY, header->device); + return -EINVAL; + } + /* Confirm the CRC32 checksum of the file: */ + stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - + sizeof(stored_crc))); + calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); + if (calculated_crc != stored_crc) { + netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", + (unsigned long)stored_crc, + (unsigned long)calculated_crc); + return -EINVAL; + } + /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */ + rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, + 0, 0, fw_data, fw_size); + if (rc == 0) { /* Firmware update successful */ + /* TODO: Notify processor it needs to reset itself + */ + } + return rc; +} + +static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_AVS: + case BNX_DIR_TYPE_EXP_ROM_MBA: + case BNX_DIR_TYPE_PCIE: + case BNX_DIR_TYPE_TSCF_UCODE: + case BNX_DIR_TYPE_EXT_PHY: + case BNX_DIR_TYPE_CCM: + case BNX_DIR_TYPE_ISCSI_BOOT: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_executable(u16 dir_type) +{ + return bnxt_dir_type_is_ape_bin_format(dir_type) || + bnxt_dir_type_is_unprotected_exec_format(dir_type); +} + +static int bnxt_flash_firmware_from_file(struct net_device *dev, + u16 dir_type, + const char *filename) +{ + const struct firmware *fw; + int rc; + + if (bnxt_dir_type_is_executable(dir_type) == false) + return -EINVAL; + + rc = request_firmware(&fw, filename, &dev->dev); + if (rc != 0) { + netdev_err(dev, "Error %d requesting firmware file: %s\n", + rc, filename); + return rc; + } + if (bnxt_dir_type_is_ape_bin_format(dir_type) == true) + rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); + else + rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, + 0, 0, fw->data, fw->size); + release_firmware(fw); + return rc; +} + +static int bnxt_flash_package_from_file(struct net_device *dev, + char *filename) +{ + netdev_err(dev, "packages are not yet supported\n"); + return -EINVAL; +} + +static int bnxt_flash_device(struct net_device *dev, + struct ethtool_flash *flash) +{ + if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { + netdev_err(dev, "flashdev not supported from a virtual function\n"); + return -EINVAL; + } + + if (flash->region == ETHTOOL_FLASH_ALL_REGIONS) + return bnxt_flash_package_from_file(dev, flash->data); + + return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); +} + +static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + struct hwrm_nvm_get_dir_info_input req = {0}; + struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + *entries = le32_to_cpu(output->entries); + *length = le32_to_cpu(output->entry_length); + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_get_eeprom_len(struct net_device *dev) +{ + /* The -1 return value allows the entire 32-bit range of offsets to be + * passed via the ethtool command-line utility. + */ + return -1; +} + +static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + u32 dir_entries; + u32 entry_length; + u8 *buf; + size_t buflen; + dma_addr_t dma_handle; + struct hwrm_nvm_get_dir_entries_input req = {0}; + + rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); + if (rc != 0) + return rc; + + /* Insert 2 bytes of directory info (count and size of entries) */ + if (len < 2) + return -EINVAL; + + *data++ = dir_entries; + *data++ = entry_length; + len -= 2; + memset(data, 0xff, len); + + buflen = dir_entries * entry_length; + buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, + GFP_KERNEL); + if (!buf) { + netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", + (unsigned)buflen); + return -ENOMEM; + } + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); + req.host_dest_addr = cpu_to_le64(dma_handle); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == 0) + memcpy(data, buf, len > buflen ? buflen : len); + dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); + return rc; +} + +static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, + u32 length, u8 *data) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + u8 *buf; + dma_addr_t dma_handle; + struct hwrm_nvm_read_input req = {0}; + + buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, + GFP_KERNEL); + if (!buf) { + netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", + (unsigned)length); + return -ENOMEM; + } + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); + req.host_dest_addr = cpu_to_le64(dma_handle); + req.dir_idx = cpu_to_le16(index); + req.offset = cpu_to_le32(offset); + req.len = cpu_to_le32(length); + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == 0) + memcpy(data, buf, length); + dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); + return rc; +} + +static int bnxt_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + u32 index; + u32 offset; + + if (eeprom->offset == 0) /* special offset value to get directory */ + return bnxt_get_nvram_directory(dev, eeprom->len, data); + + index = eeprom->offset >> 24; + offset = eeprom->offset & 0xffffff; + + if (index == 0) { + netdev_err(dev, "unsupported index value: %d\n", index); + return -EINVAL; + } + + return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); +} + +static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_nvm_erase_dir_entry_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); + req.dir_idx = cpu_to_le16(index); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct bnxt *bp = netdev_priv(dev); + u8 index, dir_op; + u16 type, ext, ordinal, attr; + + if (!BNXT_PF(bp)) { + netdev_err(dev, "NVM write not supported from a virtual function\n"); + return -EINVAL; + } + + type = eeprom->magic >> 16; + + if (type == 0xffff) { /* special value for directory operations */ + index = eeprom->magic & 0xff; + dir_op = eeprom->magic >> 8; + if (index == 0) + return -EINVAL; + switch (dir_op) { + case 0x0e: /* erase */ + if (eeprom->offset != ~eeprom->magic) + return -EINVAL; + return bnxt_erase_nvram_directory(dev, index - 1); + default: + return -EINVAL; + } + } + + /* Create or re-write an NVM item: */ + if (bnxt_dir_type_is_executable(type) == true) + return -EINVAL; + ext = eeprom->magic & 0xffff; + ordinal = eeprom->offset >> 16; + attr = eeprom->offset & 0xffff; + + return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data, + eeprom->len); +} + +const struct ethtool_ops bnxt_ethtool_ops = { + .get_settings = bnxt_get_settings, + .set_settings = bnxt_set_settings, + .get_pauseparam = bnxt_get_pauseparam, + .set_pauseparam = bnxt_set_pauseparam, + .get_drvinfo = bnxt_get_drvinfo, + .get_coalesce = bnxt_get_coalesce, + .set_coalesce = bnxt_set_coalesce, + .get_msglevel = bnxt_get_msglevel, + .set_msglevel = bnxt_set_msglevel, + .get_sset_count = bnxt_get_sset_count, + .get_strings = bnxt_get_strings, + .get_ethtool_stats = bnxt_get_ethtool_stats, + .set_ringparam = bnxt_set_ringparam, + .get_ringparam = bnxt_get_ringparam, + .get_channels = bnxt_get_channels, + .set_channels = bnxt_set_channels, +#ifdef CONFIG_RFS_ACCEL + .get_rxnfc = bnxt_get_rxnfc, +#endif + .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, + .get_rxfh_key_size = bnxt_get_rxfh_key_size, + .get_rxfh = bnxt_get_rxfh, + .flash_device = bnxt_flash_device, + .get_eeprom_len = bnxt_get_eeprom_len, + .get_eeprom = bnxt_get_eeprom, + .set_eeprom = bnxt_set_eeprom, + .get_link = bnxt_get_link, +}; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h new file mode 100644 index 000000000000..98fa81e08b58 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -0,0 +1,17 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_ETHTOOL_H +#define BNXT_ETHTOOL_H + +extern const struct ethtool_ops bnxt_ethtool_ops; + +u32 bnxt_fw_to_ethtool_speed(u16); + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h new file mode 100644 index 000000000000..e0aac65c6d82 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h @@ -0,0 +1,104 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef __BNXT_FW_HDR_H__ +#define __BNXT_FW_HDR_H__ + +#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */ + +enum SUPPORTED_FAMILY { + DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */ + DEVICE_5705_FAMILY, /* 1 - Bachelor */ + DEVICE_SHASTA_FAMILY, /* 2 - 5751 */ + DEVICE_5706_FAMILY, /* 3 - Teton */ + DEVICE_5714_FAMILY, /* 4 - Hamilton */ + DEVICE_STANFORD_FAMILY, /* 5 - 5755 */ + DEVICE_STANFORD_ME_FAMILY, /* 6 - 5756 */ + DEVICE_SOLEDAD_FAMILY, /* 7 - 5761[E] */ + DEVICE_CILAI_FAMILY, /* 8 - 57780/60/90/91 */ + DEVICE_ASPEN_FAMILY, /* 9 - 57781/85/61/65/91/95 */ + DEVICE_ASPEN_PLUS_FAMILY, /* 10 - 57786 */ + DEVICE_LOGAN_FAMILY, /* 11 - Any device in the Logan family + */ + DEVICE_LOGAN_5762, /* 12 - Logan Enterprise (aka Columbia) + */ + DEVICE_LOGAN_57767, /* 13 - Logan Client */ + DEVICE_LOGAN_57787, /* 14 - Logan Consumer */ + DEVICE_LOGAN_5725, /* 15 - Logan Server (TruManage-enabled) + */ + DEVICE_SAWTOOTH_FAMILY, /* 16 - 5717/18 */ + DEVICE_COTOPAXI_FAMILY, /* 17 - 5719 */ + DEVICE_SNAGGLETOOTH_FAMILY, /* 18 - 5720 */ + DEVICE_CUMULUS_FAMILY, /* 19 - Cumulus/Whitney */ + MAX_DEVICE_FAMILY +}; + +enum SUPPORTED_CODE { + CODE_ASF1, /* 0 - ASF VERSION 1.03 <deprecated> */ + CODE_ASF2, /* 1 - ASF VERSION 2.00 <deprecated> */ + CODE_PASSTHRU, /* 2 - PassThru <deprecated> */ + CODE_PT_SEC, /* 3 - PassThru with security <deprecated> */ + CODE_UMP, /* 4 - UMP <deprecated> */ + CODE_BOOT, /* 5 - Bootcode */ + CODE_DASH, /* 6 - TruManage (DASH + ASF + PMCI) + * Management firmwares + */ + CODE_MCTP_PASSTHRU, /* 7 - NCSI / MCTP Passt-hrough firmware */ + CODE_PM_OFFLOAD, /* 8 - Power-Management Proxy Offload firmwares + */ + CODE_MDNS_SD_OFFLOAD, /* 9 - Multicast DNS Service Discovery Proxys + * Offload firmware + */ + CODE_DISC_OFFLOAD, /* 10 - Discovery Offload firmware */ + CODE_MUSTANG, /* 11 - I2C Error reporting APE firmwares + * <deprecated> + */ + CODE_ARP_BATCH, /* 12 - ARP Batch firmware */ + CODE_SMASH, /* 13 - TruManage (SMASH + DCMI/IPMI + PMCI) + * Management firmware + */ + CODE_APE_DIAG, /* 14 - APE Test Diag firmware */ + CODE_APE_PATCH, /* 15 - APE Patch firmware */ + CODE_TANG_PATCH, /* 16 - TANG Patch firmware */ + CODE_KONG_FW, /* 17 - KONG firmware */ + CODE_KONG_PATCH, /* 18 - KONG Patch firmware */ + CODE_BONO_FW, /* 19 - BONO firmware */ + CODE_BONO_PATCH, /* 20 - BONO Patch firmware */ + + MAX_CODE_TYPE, +}; + +enum SUPPORTED_MEDIA { + MEDIA_COPPER, /* 0 */ + MEDIA_FIBER, /* 1 */ + MEDIA_NONE, /* 2 */ + MEDIA_COPPER_FIBER, /* 3 */ + MAX_MEDIA_TYPE, +}; + +struct bnxt_fw_header { + __le32 signature; /* constains the constant value of + * BNXT_Firmware_Bin_Signatures + */ + u8 flags; /* reserved for ChiMP use */ + u8 code_type; /* enum SUPPORTED_CODE */ + u8 device; /* enum SUPPORTED_FAMILY */ + u8 media; /* enum SUPPORTED_MEDIA */ + u8 version[16]; /* the null terminated version string to + * indicate the version of the + * file, this will be copied from the binary + * file version string + */ + u8 build; + u8 revision; + u8 minor_ver; + u8 major_ver; +}; + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h new file mode 100644 index 000000000000..70fc8253c07f --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -0,0 +1,4046 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_HSI_H +#define BNXT_HSI_H + +/* per-context HW statistics -- chip view */ +struct ctx_hw_stats { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 tpa_pkts; + __le64 tpa_bytes; + __le64 tpa_events; + __le64 tpa_aborts; +}; + +/* Statistics Ejection Buffer Completion Record (16 bytes) */ +struct eject_cmpl { + __le16 type; + #define EJECT_CMPL_TYPE_MASK 0x3fUL + #define EJECT_CMPL_TYPE_SFT 0 + #define EJECT_CMPL_TYPE_STAT_EJECT (0x1aUL << 0) + __le16 len; + __le32 opaque; + __le32 v; + #define EJECT_CMPL_V 0x1UL + __le32 unused_2; +}; + +/* HWRM Completion Record (16 bytes) */ +struct hwrm_cmpl { + __le16 type; + #define HWRM_CMPL_TYPE_MASK 0x3fUL + #define HWRM_CMPL_TYPE_SFT 0 + #define HWRM_CMPL_TYPE_HWRM_DONE (0x20UL << 0) + __le16 sequence_id; + __le32 unused_1; + __le32 v; + #define HWRM_CMPL_V 0x1UL + __le32 unused_3; +}; + +/* HWRM Forwarded Request (16 bytes) */ +struct hwrm_fwd_req_cmpl { + __le16 req_len_type; + #define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL + #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0 + #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (0x22UL << 0) + #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL + #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6 + __le16 source_id; + __le32 unused_0; + __le32 req_buf_addr_v[2]; + #define HWRM_FWD_REQ_CMPL_V 0x1UL + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 +}; + +/* HWRM Forwarded Response (16 bytes) */ +struct hwrm_fwd_resp_cmpl { + __le16 type; + #define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL + #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0 + #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP (0x24UL << 0) + __le16 source_id; + __le16 resp_len; + __le16 unused_1; + __le32 resp_buf_addr_v[2]; + #define HWRM_FWD_RESP_CMPL_V 0x1UL + #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL + #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1 +}; + +/* HWRM Asynchronous Event Completion Record (16 bytes) */ +struct hwrm_async_event_cmpl { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; +}; + +/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */ +struct hwrm_async_event_cmpl_link_status_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL +}; + +/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */ +struct hwrm_async_event_cmpl_link_mtu_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */ +struct hwrm_async_event_cmpl_link_speed_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1 + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16 +}; + +/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */ +struct hwrm_async_event_cmpl_dcb_config_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */ +struct hwrm_async_event_cmpl_port_conn_not_allowed { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */ +struct hwrm_async_event_cmpl_func_drvr_unload { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */ +struct hwrm_async_event_cmpl_func_drvr_load { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */ +struct hwrm_async_event_cmpl_pf_drvr_unload { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */ +struct hwrm_async_event_cmpl_pf_drvr_load { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */ +struct hwrm_async_event_cmpl_vf_flr { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR (0x30UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */ +struct hwrm_async_event_cmpl_vf_mac_addr_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */ +struct hwrm_async_event_cmpl_hwrm_error { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0) + __le32 event_data2; + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0) + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL +}; + +/* HW Resource Manager Specification 0.7.8 */ +#define HWRM_VERSION_MAJOR 0 +#define HWRM_VERSION_MINOR 7 +#define HWRM_VERSION_UPDATE 8 + +#define HWRM_VERSION_STR "0.7.8" +/* Following is the signature for HWRM message field that indicates not + * applicable (All F's). Need to cast it the size of the field if needed. + */ +#define HWRM_NA_SIGNATURE ((__le32)(-1)) +#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ +#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */ +#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ +#define HW_HASH_KEY_SIZE 40 +#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ +/* Input (16 bytes) */ +struct input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (8 bytes) */ +struct output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + +/* Command numbering (8 bytes) */ +struct cmd_nums { + __le16 req_type; + #define HWRM_VER_GET (0x0UL) + #define HWRM_FUNC_DISABLE (0x10UL) + #define HWRM_FUNC_RESET (0x11UL) + #define HWRM_FUNC_GETFID (0x12UL) + #define HWRM_FUNC_VF_ALLOC (0x13UL) + #define HWRM_FUNC_VF_FREE (0x14UL) + #define HWRM_FUNC_QCAPS (0x15UL) + #define HWRM_FUNC_QCFG (0x16UL) + #define HWRM_FUNC_CFG (0x17UL) + #define HWRM_FUNC_QSTATS (0x18UL) + #define HWRM_FUNC_CLR_STATS (0x19UL) + #define HWRM_FUNC_DRV_UNRGTR (0x1aUL) + #define HWRM_FUNC_VF_RESC_FREE (0x1bUL) + #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL) + #define HWRM_FUNC_DRV_RGTR (0x1dUL) + #define HWRM_FUNC_DRV_QVER (0x1eUL) + #define HWRM_FUNC_BUF_RGTR (0x1fUL) + #define HWRM_FUNC_VF_CFG (0x20UL) + #define HWRM_PORT_PHY_CFG (0x20UL) + #define HWRM_PORT_MAC_CFG (0x21UL) + #define HWRM_PORT_ENABLE (0x22UL) + #define HWRM_PORT_QSTATS (0x23UL) + #define HWRM_PORT_LPBK_QSTATS (0x24UL) + #define HWRM_PORT_CLR_STATS (0x25UL) + #define HWRM_PORT_LPBK_CLR_STATS (0x26UL) + #define HWRM_PORT_PHY_QCFG (0x27UL) + #define HWRM_PORT_MAC_QCFG (0x28UL) + #define HWRM_PORT_BLINK_LED (0x29UL) + #define HWRM_QUEUE_QPORTCFG (0x30UL) + #define HWRM_QUEUE_QCFG (0x31UL) + #define HWRM_QUEUE_CFG (0x32UL) + #define HWRM_QUEUE_BUFFERS_QCFG (0x33UL) + #define HWRM_QUEUE_BUFFERS_CFG (0x34UL) + #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL) + #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL) + #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL) + #define HWRM_QUEUE_PRI2COS_CFG (0x38UL) + #define HWRM_QUEUE_COS2BW_QCFG (0x39UL) + #define HWRM_QUEUE_COS2BW_CFG (0x3aUL) + #define HWRM_VNIC_ALLOC (0x40UL) + #define HWRM_VNIC_FREE (0x41UL) + #define HWRM_VNIC_CFG (0x42UL) + #define HWRM_VNIC_QCFG (0x43UL) + #define HWRM_VNIC_TPA_CFG (0x44UL) + #define HWRM_VNIC_TPA_QCFG (0x45UL) + #define HWRM_VNIC_RSS_CFG (0x46UL) + #define HWRM_VNIC_RSS_QCFG (0x47UL) + #define HWRM_VNIC_PLCMODES_CFG (0x48UL) + #define HWRM_VNIC_PLCMODES_QCFG (0x49UL) + #define HWRM_RING_ALLOC (0x50UL) + #define HWRM_RING_FREE (0x51UL) + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL) + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL) + #define HWRM_RING_RESET (0x5eUL) + #define HWRM_RING_GRP_ALLOC (0x60UL) + #define HWRM_RING_GRP_FREE (0x61UL) + #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL) + #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL) + #define HWRM_ARB_GRP_ALLOC (0x80UL) + #define HWRM_ARB_GRP_CFG (0x81UL) + #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL) + #define HWRM_CFA_L2_FILTER_FREE (0x91UL) + #define HWRM_CFA_L2_FILTER_CFG (0x92UL) + #define HWRM_CFA_L2_SET_RX_MASK (0x93UL) + #define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING (0x94UL) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL) + #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL) + #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL) + #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL) + #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL) + #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL) + #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL) + #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL) + #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL) + #define HWRM_STAT_CTX_ALLOC (0xb0UL) + #define HWRM_STAT_CTX_FREE (0xb1UL) + #define HWRM_STAT_CTX_QUERY (0xb2UL) + #define HWRM_STAT_CTX_CLR_STATS (0xb3UL) + #define HWRM_FW_RESET (0xc0UL) + #define HWRM_FW_QSTATUS (0xc1UL) + #define HWRM_EXEC_FWD_RESP (0xd0UL) + #define HWRM_REJECT_FWD_RESP (0xd1UL) + #define HWRM_FWD_RESP (0xd2UL) + #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL) + #define HWRM_TEMP_MONITOR_QUERY (0xe0UL) + #define HWRM_MGMT_L2_FILTER_ALLOC (0x100UL) + #define HWRM_MGMT_L2_FILTER_FREE (0x101UL) + #define HWRM_DBG_READ_DIRECT (0xff10UL) + #define HWRM_DBG_READ_INDIRECT (0xff11UL) + #define HWRM_DBG_WRITE_DIRECT (0xff12UL) + #define HWRM_DBG_WRITE_INDIRECT (0xff13UL) + #define HWRM_DBG_DUMP (0xff14UL) + #define HWRM_NVM_MODIFY (0xfff4UL) + #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL) + #define HWRM_NVM_GET_DEV_INFO (0xfff6UL) + #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL) + #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL) + #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL) + #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL) + #define HWRM_NVM_GET_DIR_INFO (0xfffbUL) + #define HWRM_NVM_RAW_DUMP (0xfffcUL) + #define HWRM_NVM_READ (0xfffdUL) + #define HWRM_NVM_WRITE (0xfffeUL) + #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL) + __le16 unused_0[3]; +}; + +/* Return Codes (8 bytes) */ +struct ret_codes { + __le16 error_code; + #define HWRM_ERR_CODE_SUCCESS (0x0UL) + #define HWRM_ERR_CODE_FAIL (0x1UL) + #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL) + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL) + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL) + #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL) + #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL) + #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL) + #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL) + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL) + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_err_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 opaque_0; + __le16 opaque_1; + u8 opaque_2; + u8 valid; +}; + +/* Port Tx Statistics Formats (408 bytes) */ +struct tx_port_stats { + __le64 tx_64b_frames; + __le64 tx_65b_127b_frames; + __le64 tx_128b_255b_frames; + __le64 tx_256b_511b_frames; + __le64 tx_512b_1023b_frames; + __le64 tx_1024b_1518_frames; + __le64 tx_good_vlan_frames; + __le64 tx_1519b_2047_frames; + __le64 tx_2048b_4095b_frames; + __le64 tx_4096b_9216b_frames; + __le64 tx_9217b_16383b_frames; + __le64 tx_good_frames; + __le64 tx_total_frames; + __le64 tx_ucast_frames; + __le64 tx_mcast_frames; + __le64 tx_bcast_frames; + __le64 tx_pause_frames; + __le64 tx_pfc_frames; + __le64 tx_jabber_frames; + __le64 tx_fcs_err_frames; + __le64 tx_control_frames; + __le64 tx_oversz_frames; + __le64 tx_single_dfrl_frames; + __le64 tx_multi_dfrl_frames; + __le64 tx_single_coll_frames; + __le64 tx_multi_coll_frames; + __le64 tx_late_coll_frames; + __le64 tx_excessive_coll_frames; + __le64 tx_frag_frames; + __le64 tx_err; + __le64 tx_tagged_frames; + __le64 tx_dbl_tagged_frames; + __le64 tx_runt_frames; + __le64 tx_fifo_underruns; + __le64 tx_pfc_ena_frames_pri0; + __le64 tx_pfc_ena_frames_pri1; + __le64 tx_pfc_ena_frames_pri2; + __le64 tx_pfc_ena_frames_pri3; + __le64 tx_pfc_ena_frames_pri4; + __le64 tx_pfc_ena_frames_pri5; + __le64 tx_pfc_ena_frames_pri6; + __le64 tx_pfc_ena_frames_pri7; + __le64 tx_eee_lpi_events; + __le64 tx_eee_lpi_duration; + __le64 tx_llfc_logical_msgs; + __le64 tx_hcfc_msgs; + __le64 tx_total_collisions; + __le64 tx_bytes; + __le64 tx_xthol_frames; + __le64 tx_stat_discard; + __le64 tx_stat_error; +}; + +/* Port Rx Statistics Formats (528 bytes) */ +struct rx_port_stats { + __le64 rx_64b_frames; + __le64 rx_65b_127b_frames; + __le64 rx_128b_255b_frames; + __le64 rx_256b_511b_frames; + __le64 rx_512b_1023b_frames; + __le64 rx_1024b_1518_frames; + __le64 rx_good_vlan_frames; + __le64 rx_1519b_2047b_frames; + __le64 rx_2048b_4095b_frames; + __le64 rx_4096b_9216b_frames; + __le64 rx_9217b_16383b_frames; + __le64 rx_total_frames; + __le64 rx_ucast_frames; + __le64 rx_mcast_frames; + __le64 rx_bcast_frames; + __le64 rx_fcs_err_frames; + __le64 rx_ctrl_frames; + __le64 rx_pause_frames; + __le64 rx_pfc_frames; + __le64 rx_unsupported_opcode_frames; + __le64 rx_unsupported_da_pausepfc_frames; + __le64 rx_wrong_sa_frames; + __le64 rx_align_err_frames; + __le64 rx_oor_len_frames; + __le64 rx_code_err_frames; + __le64 rx_false_carrier_frames; + __le64 rx_ovrsz_frames; + __le64 rx_jbr_frames; + __le64 rx_mtu_err_frames; + __le64 rx_match_crc_frames; + __le64 rx_promiscuous_frames; + __le64 rx_tagged_frames; + __le64 rx_double_tagged_frames; + __le64 rx_trunc_frames; + __le64 rx_good_frames; + __le64 rx_pfc_xon2xoff_frames_pri0; + __le64 rx_pfc_xon2xoff_frames_pri1; + __le64 rx_pfc_xon2xoff_frames_pri2; + __le64 rx_pfc_xon2xoff_frames_pri3; + __le64 rx_pfc_xon2xoff_frames_pri4; + __le64 rx_pfc_xon2xoff_frames_pri5; + __le64 rx_pfc_xon2xoff_frames_pri6; + __le64 rx_pfc_xon2xoff_frames_pri7; + __le64 rx_pfc_ena_frames_pri0; + __le64 rx_pfc_ena_frames_pri1; + __le64 rx_pfc_ena_frames_pri2; + __le64 rx_pfc_ena_frames_pri3; + __le64 rx_pfc_ena_frames_pri4; + __le64 rx_pfc_ena_frames_pri5; + __le64 rx_pfc_ena_frames_pri6; + __le64 rx_pfc_ena_frames_pri7; + __le64 rx_sch_crc_err_frames; + __le64 rx_undrsz_frames; + __le64 rx_frag_frames; + __le64 rx_eee_lpi_events; + __le64 rx_eee_lpi_duration; + __le64 rx_llfc_physical_msgs; + __le64 rx_llfc_logical_msgs; + __le64 rx_llfc_msgs_with_crc_err; + __le64 rx_hcfc_msgs; + __le64 rx_hcfc_msgs_with_crc_err; + __le64 rx_bytes; + __le64 rx_runt_bytes; + __le64 rx_runt_frames; + __le64 rx_stat_discard; + __le64 rx_stat_err; +}; + +/* hwrm_ver_get */ +/* Input (24 bytes) */ +struct hwrm_ver_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 hwrm_intf_maj; + u8 hwrm_intf_min; + u8 hwrm_intf_upd; + u8 unused_0[5]; +}; + +/* Output (128 bytes) */ +struct hwrm_ver_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hwrm_intf_maj; + u8 hwrm_intf_min; + u8 hwrm_intf_upd; + u8 hwrm_intf_rsvd; + u8 hwrm_fw_maj; + u8 hwrm_fw_min; + u8 hwrm_fw_bld; + u8 hwrm_fw_rsvd; + u8 ape_fw_maj; + u8 ape_fw_min; + u8 ape_fw_bld; + u8 ape_fw_rsvd; + u8 kong_fw_maj; + u8 kong_fw_min; + u8 kong_fw_bld; + u8 kong_fw_rsvd; + u8 tang_fw_maj; + u8 tang_fw_min; + u8 tang_fw_bld; + u8 tang_fw_rsvd; + u8 bono_fw_maj; + u8 bono_fw_min; + u8 bono_fw_bld; + u8 bono_fw_rsvd; + char hwrm_fw_name[16]; + char ape_fw_name[16]; + char kong_fw_name[16]; + char tang_fw_name[16]; + char bono_fw_name[16]; + __le16 chip_num; + u8 chip_rev; + u8 chip_metal; + u8 chip_bond_id; + u8 unused_0; + __le16 max_req_win_len; + __le16 max_resp_len; + __le16 def_req_timeout; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_disable */ +/* Input (24 bytes) */ +struct hwrm_func_disable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID 0x1UL + __le16 vf_id; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_func_disable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_reset */ +/* Input (24 bytes) */ +struct hwrm_func_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL + __le16 vf_id; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_func_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_getfid */ +/* Input (24 bytes) */ +struct hwrm_func_getfid_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL + __le16 pci_id; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_func_getfid_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_func_vf_alloc */ +/* Input (24 bytes) */ +struct hwrm_func_vf_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vf_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 first_vf_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_func_vf_free */ +/* Input (24 bytes) */ +struct hwrm_func_vf_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vf_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_vf_cfg */ +/* Input (24 bytes) */ +struct hwrm_func_vf_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL + __le16 mtu; + __le16 guest_vlan; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vf_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_qcaps */ +/* Input (24 bytes) */ +struct hwrm_func_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 unused_0[3]; +}; + +/* Output (80 bytes) */ +struct hwrm_func_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le32 flags; + #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL + u8 perm_mac_address[6]; + __le16 max_rsscos_ctx; + __le16 max_cmpl_rings; + __le16 max_tx_rings; + __le16 max_rx_rings; + __le16 max_l2_ctxs; + __le16 max_vnics; + __le16 first_vf_id; + __le16 max_vfs; + __le16 max_stat_ctx; + __le32 max_encap_records; + __le32 max_decap_records; + __le32 max_tx_em_flows; + __le32 max_tx_wm_flows; + __le32 max_rx_em_flows; + __le32 max_rx_wm_flows; + __le32 max_mcast_filters; + __le32 max_flow_id; + __le32 max_hw_ring_grps; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_func_cfg */ +/* Input (88 bytes) */ +struct hwrm_func_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0; + u8 unused_1; + __le32 flags; + #define FUNC_CFG_REQ_FLAGS_PROM_MODE 0x1UL + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK 0x2UL + #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK 0x4UL + #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH 0x8UL + #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH 0x10UL + #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE 0x20UL + #define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL + #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL + #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL + __le32 enables; + #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL + #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL + #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL + #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL + #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL + #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL + #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL + #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL + #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL + #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL + #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL + #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL + #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL + #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL + #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL + #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL + #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL + #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL + __le16 mtu; + __le16 mru; + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + u8 dflt_mac_addr[6]; + __le16 dflt_vlan; + __be32 dflt_ip_addr[4]; + __le32 min_bw; + __le32 max_bw; + __le16 async_event_cr; + u8 vlan_antispoof_mode; + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK (0x0UL << 0) + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN (0x1UL << 0) + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0) + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0) + u8 allowed_vlan_pris; + #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK (0x0UL << 0) + #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN (0x1UL << 0) + #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE (0x2UL << 0) + #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0) + u8 evb_mode; + #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0) + #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0) + #define FUNC_CFG_REQ_EVB_MODE_VEPA (0x2UL << 0) + u8 unused_2; + __le16 num_mcast_filters; +}; + +/* Output (16 bytes) */ +struct hwrm_func_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_qstats */ +/* Input (24 bytes) */ +struct hwrm_func_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 unused_0[3]; +}; + +/* Output (176 bytes) */ +struct hwrm_func_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_err_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_err_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_clr_stats */ +/* Input (24 bytes) */ +struct hwrm_func_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_func_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_vf_resc_free */ +/* Input (24 bytes) */ +struct hwrm_func_vf_resc_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vf_resc_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_vf_vnic_ids_query */ +/* Input (32 bytes) */ +struct hwrm_func_vf_vnic_ids_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0; + u8 unused_1; + __le32 max_vnic_id_cnt; + __le64 vnic_id_tbl_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vf_vnic_ids_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id_cnt; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_func_drv_rgtr */ +/* Input (80 bytes) */ +struct hwrm_func_drv_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + __le32 enables; + #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL + #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL + #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL + __le16 os_type; + u8 ver_maj; + u8 ver_min; + u8 ver_upd; + u8 unused_0; + __le16 unused_1; + __le32 timestamp; + __le32 unused_2; + __le32 vf_req_fwd[8]; + __le32 async_event_fwd[8]; +}; + +/* Output (16 bytes) */ +struct hwrm_func_drv_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_drv_unrgtr */ +/* Input (24 bytes) */ +struct hwrm_func_drv_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_func_drv_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_buf_rgtr */ +/* Input (128 bytes) */ +struct hwrm_func_buf_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL + #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL + __le16 vf_id; + __le16 req_buf_num_pages; + __le16 req_buf_page_size; + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B (0x4UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0) + __le16 req_buf_len; + __le16 resp_buf_len; + u8 unused_0; + u8 unused_1; + __le64 req_buf_page_addr0; + __le64 req_buf_page_addr1; + __le64 req_buf_page_addr2; + __le64 req_buf_page_addr3; + __le64 req_buf_page_addr4; + __le64 req_buf_page_addr5; + __le64 req_buf_page_addr6; + __le64 req_buf_page_addr7; + __le64 req_buf_page_addr8; + __le64 req_buf_page_addr9; + __le64 error_buf_addr; + __le64 resp_buf_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_func_buf_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_drv_qver */ +/* Input (24 bytes) */ +struct hwrm_func_drv_qver_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID 0x1UL + #define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID 0x2UL + __le16 fid; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_func_drv_qver_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 os_type; + u8 ver_maj; + u8 ver_min; + u8 ver_upd; + u8 unused_0; + u8 unused_1; + u8 valid; +}; + +/* hwrm_port_phy_cfg */ +/* Input (48 bytes) */ +struct hwrm_port_phy_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL + #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL + __le32 enables; + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL + #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL + #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL + #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL + __le16 port_id; + __le16 force_link_speed; + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB (0xaUL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB (0x14UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB (0x19UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB (0x64UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB (0xc8UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0) + u8 auto_mode; + #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0) + u8 auto_duplex; + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH (0x2UL << 0) + u8 auto_pause; + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL + u8 unused_0; + __le16 auto_link_speed; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB (0xaUL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB (0x14UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB (0x19UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB (0x64UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB (0xc8UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0) + __le16 auto_link_speed_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL + u8 wirespeed; + #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0) + #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0) + u8 lpbk; + #define PORT_PHY_CFG_REQ_LPBK_NONE (0x0UL << 0) + #define PORT_PHY_CFG_REQ_LPBK_LOCAL (0x1UL << 0) + #define PORT_PHY_CFG_REQ_LPBK_REMOTE (0x2UL << 0) + u8 force_pause; + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL + u8 unused_1; + __le32 preemphasis; + __le32 unused_2; +}; + +/* Output (16 bytes) */ +struct hwrm_port_phy_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_phy_qcfg */ +/* Input (24 bytes) */ +struct hwrm_port_phy_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (48 bytes) */ +struct hwrm_port_phy_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + #define PORT_PHY_QCFG_RESP_LINK_NO_LINK (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_LINK (0x2UL << 0) + u8 unused_0; + __le16 link_speed; + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB (0xaUL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB (0x14UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB (0x19UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB (0x64UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB (0xc8UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0) + u8 duplex; + #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0) + u8 pause; + #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL + __le16 support_speeds; + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL + __le16 force_link_speed; + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB (0x14UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB (0x19UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB (0x64UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB (0xc8UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0) + u8 auto_mode; + #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0) + u8 auto_pause; + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL + __le16 auto_link_speed; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB (0x14UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB (0x19UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB (0x64UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB (0xc8UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0) + __le16 auto_link_speed_mask; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL + u8 wirespeed; + #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0) + u8 lpbk; + #define PORT_PHY_QCFG_RESP_LPBK_NONE (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_LPBK_LOCAL (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_LPBK_REMOTE (0x2UL << 0) + u8 force_pause; + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL + u8 duplex_setting; + #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL (0x1UL << 0) + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0) + u8 media_type; + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0) + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0) + u8 transceiver_type; + #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0) + u8 phy_addr; + #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0 + u8 unused_2; + __le16 link_partner_adv_speeds; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL + u8 link_partner_adv_auto_mode; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0) + u8 link_partner_adv_pause; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL + u8 unused_3; + u8 unused_4; + u8 unused_5; + u8 valid; +}; + +/* hwrm_port_mac_cfg */ +/* Input (32 bytes) */ +struct hwrm_port_mac_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL + #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL + __le32 enables; + #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL + #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL + #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI 0x4UL + #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL + #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL + #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL + __le16 port_id; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_REQ_LPBK_NONE (0x0UL << 0) + #define PORT_MAC_CFG_REQ_LPBK_LOCAL (0x1UL << 0) + #define PORT_MAC_CFG_REQ_LPBK_REMOTE (0x2UL << 0) + u8 ivlan_pri2cos_map_pri; + u8 lcos_map_pri; + u8 tunnel_pri2cos_map_pri; + u8 dscp2pri_map_pri; +}; + +/* Output (16 bytes) */ +struct hwrm_port_mac_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + __le16 mtu; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_RESP_LPBK_NONE (0x0UL << 0) + #define PORT_MAC_CFG_RESP_LPBK_LOCAL (0x1UL << 0) + #define PORT_MAC_CFG_RESP_LPBK_REMOTE (0x2UL << 0) + u8 unused_0; + u8 valid; +}; + +/* hwrm_port_enable */ +/* Input (24 bytes) */ +struct hwrm_port_enable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC 0x1UL + __le16 port_id; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_port_enable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_qstats */ +/* Input (40 bytes) */ +struct hwrm_port_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0; + u8 unused_1; + u8 unused_2[3]; + u8 unused_3; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_port_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_lpbk_qstats */ +/* Input (16 bytes) */ +struct hwrm_port_lpbk_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (64 bytes) */ +struct hwrm_port_lpbk_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 lpbk_ucast_frames; + __le64 lpbk_mcast_frames; + __le64 lpbk_bcast_frames; + __le64 lpbk_ucast_bytes; + __le64 lpbk_mcast_bytes; + __le64 lpbk_bcast_bytes; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_clr_stats */ +/* Input (24 bytes) */ +struct hwrm_port_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_port_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_lpbk_clr_stats */ +/* Input (16 bytes) */ +struct hwrm_port_lpbk_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_port_lpbk_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_blink_led */ +/* Input (24 bytes) */ +struct hwrm_port_blink_led_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 num_blinks; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_port_blink_led_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_qportcfg */ +/* Input (24 bytes) */ +struct hwrm_queue_qportcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + __le16 port_id; + __le16 unused_0; +}; + +/* Output (32 bytes) */ +struct hwrm_queue_qportcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 max_configurable_queues; + u8 max_configurable_lossless_queues; + u8 queue_cfg_allowed; + u8 queue_buffers_cfg_allowed; + u8 queue_pfcenable_cfg_allowed; + u8 queue_pri2cos_cfg_allowed; + u8 queue_cos2bw_cfg_allowed; + u8 queue_id0; + u8 queue_id0_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id1; + u8 queue_id1_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id2; + u8 queue_id2_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id3; + u8 queue_id3_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id4; + u8 queue_id4_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id5; + u8 queue_id5_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id6; + u8 queue_id6_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id7; + u8 queue_id7_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 valid; +}; + +/* hwrm_queue_cfg */ +/* Input (40 bytes) */ +struct hwrm_queue_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + __le32 enables; + #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL + #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL + __le32 queue_id; + __le32 dflt_len; + u8 service_profile; + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_buffers_cfg */ +/* Input (56 bytes) */ +struct hwrm_queue_buffers_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + __le32 enables; + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP 0x4UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x8UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x10UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x20UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x40UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x80UL + __le32 queue_id; + __le32 reserved; + __le32 shared; + __le32 xoff; + __le32 xon; + __le32 full; + __le32 notfull; + __le32 max; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_buffers_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_pfcenable_cfg */ +/* Input (24 bytes) */ +struct hwrm_queue_pfcenable_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED 0x80UL + __le16 port_id; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_pfcenable_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_pri2cos_cfg */ +/* Input (40 bytes) */ +struct hwrm_queue_pri2cos_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL + __le32 enables; + u8 port_id; + u8 pri0_cos; + u8 pri1_cos; + u8 pri2_cos; + u8 pri3_cos; + u8 pri4_cos; + u8 pri5_cos; + u8 pri6_cos; + u8 pri7_cos; + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_pri2cos_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_cos2bw_cfg */ +/* Input (128 bytes) */ +struct hwrm_queue_cos2bw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL + __le16 port_id; + u8 queue_id0; + u8 unused_0; + __le32 queue_id0_min_bw; + __le32 queue_id0_max_bw; + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + u8 queue_id1; + __le32 queue_id1_min_bw; + __le32 queue_id1_max_bw; + u8 queue_id1_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id1_pri_lvl; + u8 queue_id1_bw_weight; + u8 queue_id2; + __le32 queue_id2_min_bw; + __le32 queue_id2_max_bw; + u8 queue_id2_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id2_pri_lvl; + u8 queue_id2_bw_weight; + u8 queue_id3; + __le32 queue_id3_min_bw; + __le32 queue_id3_max_bw; + u8 queue_id3_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id3_pri_lvl; + u8 queue_id3_bw_weight; + u8 queue_id4; + __le32 queue_id4_min_bw; + __le32 queue_id4_max_bw; + u8 queue_id4_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id4_pri_lvl; + u8 queue_id4_bw_weight; + u8 queue_id5; + __le32 queue_id5_min_bw; + __le32 queue_id5_max_bw; + u8 queue_id5_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id5_pri_lvl; + u8 queue_id5_bw_weight; + u8 queue_id6; + __le32 queue_id6_min_bw; + __le32 queue_id6_max_bw; + u8 queue_id6_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id6_pri_lvl; + u8 queue_id6_bw_weight; + u8 queue_id7; + __le32 queue_id7_min_bw; + __le32 queue_id7_max_bw; + u8 queue_id7_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id7_pri_lvl; + u8 queue_id7_bw_weight; + u8 unused_1[5]; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_cos2bw_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_alloc */ +/* Input (24 bytes) */ +struct hwrm_vnic_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_vnic_free */ +/* Input (24 bytes) */ +struct hwrm_vnic_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_cfg */ +/* Input (40 bytes) */ +struct hwrm_vnic_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL + #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL + __le32 enables; + #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL + #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL + #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL + #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL + #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL + __le16 vnic_id; + __le16 dflt_ring_grp; + __le16 rss_rule; + __le16 cos_rule; + __le16 lb_rule; + __le16 mru; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_tpa_cfg */ +/* Input (40 bytes) */ +struct hwrm_vnic_tpa_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL + #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL + __le32 enables; + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL + #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL + __le16 vnic_id; + __le16 max_agg_segs; + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 (0x0UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 (0x1UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 (0x2UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 (0x3UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX (0x1fUL << 0) + __le16 max_aggs; + #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 (0x0UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 (0x1UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 (0x2UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 (0x3UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 (0x4UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX (0x7UL << 0) + u8 unused_0; + u8 unused_1; + __le32 max_agg_timer; + __le32 min_agg_len; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_tpa_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_rss_cfg */ +/* Input (48 bytes) */ +struct hwrm_vnic_rss_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 hash_type; + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL + __le32 unused_0; + __le64 ring_grp_tbl_addr; + __le64 hash_key_tbl_addr; + __le16 rss_ctx_idx; + __le16 unused_1[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_rss_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_plcmodes_cfg */ +/* Input (40 bytes) */ +struct hwrm_vnic_plcmodes_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL + __le32 enables; + #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL + __le32 vnic_id; + __le16 jumbo_thresh; + __le16 hds_offset; + __le16 hds_threshold; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_plcmodes_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc */ +/* Input (16 bytes) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rss_cos_lb_ctx_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_free */ +/* Input (24 bytes) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_cos_lb_ctx_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_ring_alloc */ +/* Input (80 bytes) */ +struct hwrm_ring_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID 0x1UL + #define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID 0x2UL + #define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID 0x4UL + #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL + #define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID 0x10UL + #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + u8 ring_type; + #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0) + #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0) + #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0) + #define RING_ALLOC_REQ_RING_TYPE_STATUS (0x3UL << 0) + #define RING_ALLOC_REQ_RING_TYPE_CMD (0x4UL << 0) + u8 unused_0; + __le16 unused_1; + __le64 page_tbl_addr; + __le32 fbo; + u8 page_size; + u8 page_tbl_depth; + u8 unused_2; + u8 unused_3; + __le32 length; + __le16 logical_id; + __le16 cmpl_ring_id; + __le16 queue_id; + u8 unused_4; + u8 unused_5; + __le32 arb_grp_id; + __le16 input_number; + u8 unused_6; + u8 unused_7; + __le32 weight; + __le32 stat_ctx_id; + __le32 min_bw; + __le32 max_bw; + u8 int_mode; + #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0) + #define RING_ALLOC_REQ_INT_MODE_MSI (0x1UL << 0) + #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0) + #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0) + u8 unused_8[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ring_id; + __le16 logical_ring_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_ring_free */ +/* Input (24 bytes) */ +struct hwrm_ring_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0) + #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0) + #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0) + #define RING_FREE_REQ_RING_TYPE_STATUS (0x3UL << 0) + #define RING_FREE_REQ_RING_TYPE_CMD (0x4UL << 0) + u8 unused_0; + __le16 ring_id; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_qaggint_params */ +/* Input (24 bytes) */ +struct hwrm_ring_cmpl_ring_qaggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + __le16 unused_0[3]; +}; + +/* Output (32 bytes) */ +struct hwrm_ring_cmpl_ring_qaggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params */ +/* Input (40 bytes) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + __le16 flags; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_ring_reset */ +/* Input (24 bytes) */ +struct hwrm_ring_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0) + #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0) + #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0) + #define RING_RESET_REQ_RING_TYPE_STATUS (0x3UL << 0) + #define RING_RESET_REQ_RING_TYPE_CMD (0x4UL << 0) + u8 unused_0; + __le16 ring_id; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_ring_grp_alloc */ +/* Input (24 bytes) */ +struct hwrm_ring_grp_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 cr; + __le16 rr; + __le16 ar; + __le16 sc; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_grp_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 ring_group_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_ring_grp_free */ +/* Input (24 bytes) */ +struct hwrm_ring_grp_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 ring_group_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_grp_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_arb_grp_alloc */ +/* Input (24 bytes) */ +struct hwrm_arb_grp_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 input_number; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_arb_grp_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 arb_grp_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_arb_grp_cfg */ +/* Input (32 bytes) */ +struct hwrm_arb_grp_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 arb_grp_id; + __le16 input_number; + __le16 tx_ring; + __le32 weight; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_arb_grp_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_alloc */ +/* Input (96 bytes) */ +struct hwrm_cfa_l2_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL + __le32 enables; + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x8000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + u8 l2_addr[6]; + u8 unused_0; + u8 unused_1; + u8 l2_addr_mask[6]; + __le16 l2_ovlan; + __le16 l2_ovlan_mask; + __le16 l2_ivlan; + __le16 l2_ivlan_mask; + u8 unused_2; + u8 unused_3; + u8 t_l2_addr[6]; + u8 unused_4; + u8 unused_5; + u8 t_l2_addr_mask[6]; + __le16 t_l2_ovlan; + __le16 t_l2_ovlan_mask; + __le16 t_l2_ivlan; + __le16 t_l2_ivlan_mask; + u8 src_type; + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT (0x0UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF (0x1UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF (0x2UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC (0x3UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG (0x4UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE (0x5UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO (0x6UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG (0x7UL << 0) + u8 unused_6; + __le32 src_id; + u8 tunnel_type; + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 unused_7; + __le16 dst_vnic_id; + __le16 mirror_vnic_id; + u8 pri_hint; + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER (0x1UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER (0x2UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX (0x3UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN (0x4UL << 0) + u8 unused_8; + __le32 unused_9; + __le64 l2_filter_id_hint; +}; + +/* Output (24 bytes) */ +struct hwrm_cfa_l2_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 l2_filter_id; + __le32 flow_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_l2_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 l2_filter_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_l2_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_cfg */ +/* Input (40 bytes) */ +struct hwrm_cfa_l2_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL + __le32 enables; + #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID 0x1UL + __le64 l2_filter_id; + __le32 dst_vnic_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_l2_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask */ +/* Input (40 bytes) */ +struct hwrm_cfa_l2_set_rx_mask_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 dflt_vnic_id; + __le32 mask; + #define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST 0x1UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL + __le64 mc_tbl_addr; + __le32 num_mc_entries; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_l2_set_rx_mask_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_l2_set_bcastmcast_mirroring */ +/* Input (32 bytes) */ +struct hwrm_cfa_l2_set_bcastmcast_mirroring_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 dflt_vnic_id; + __le32 mirroring_flags; + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL + __le16 vlan_id; + u8 bcast_domain; + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0) + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0) + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0) + u8 mcast_domain; + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0) + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0) + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0) + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_l2_set_bcastmcast_mirroring_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_alloc */ +/* Input (88 bytes) */ +struct hwrm_cfa_tunnel_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + __le32 enables; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL + __le64 l2_filter_id; + u8 l2_addr[6]; + __le16 l2_ivlan; + __le32 l3_addr[4]; + __le32 t_l3_addr[4]; + u8 l3_addr_type; + u8 t_l3_addr_type; + u8 tunnel_type; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 unused_0; + __le32 vni; + __le32 dst_vnic_id; + __le32 mirror_vnic_id; +}; + +/* Output (24 bytes) */ +struct hwrm_cfa_tunnel_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tunnel_filter_id; + __le32 flow_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_tunnel_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 tunnel_filter_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_tunnel_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_encap_record_alloc */ +/* Input (32 bytes) */ +struct hwrm_cfa_encap_record_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + u8 encap_type; + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN (0x1UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE (0x2UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE (0x3UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP (0x4UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE (0x5UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS (0x6UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN (0x7UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE (0x8UL << 0) + u8 unused_0; + __le16 unused_1; + __le32 encap_data[16]; +}; + +/* Output (24 bytes) */ +struct hwrm_cfa_encap_record_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 encap_record_id; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_encap_record_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_encap_record_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 encap_record_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_encap_record_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc */ +/* Input (128 bytes) */ +struct hwrm_cfa_ntuple_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + __le32 enables; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x10000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL + __le64 l2_filter_id; + u8 src_macaddr[6]; + __be16 ethertype; + u8 ipaddr_type; + u8 ip_protocol; + __le16 dst_vnic_id; + __le16 mirror_vnic_id; + u8 tunnel_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 pri_hint; + __be32 src_ipaddr[4]; + __be32 src_ipaddr_mask[4]; + __be32 dst_ipaddr[4]; + __be32 dst_ipaddr_mask[4]; + __be16 src_port; + __be16 src_port_mask; + __be16 dst_port; + __be16 dst_port_mask; + __le64 ntuple_filter_id_hint; +}; + +/* Output (24 bytes) */ +struct hwrm_cfa_ntuple_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 ntuple_filter_id; + __le32 flow_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_ntuple_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 ntuple_filter_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_ntuple_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_cfg */ +/* Input (40 bytes) */ +struct hwrm_cfa_ntuple_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL + __le32 unused_0; + __le64 ntuple_filter_id; + __le32 new_dst_vnic_id; + __le32 new_mirror_vnic_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_ntuple_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_query */ +/* Input (24 bytes) */ +struct hwrm_tunnel_dst_port_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_tunnel_dst_port_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + __be16 tunnel_dst_port_val; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_alloc */ +/* Input (24 bytes) */ +struct hwrm_tunnel_dst_port_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 unused_0; + __be16 tunnel_dst_port_val; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_tunnel_dst_port_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_free */ +/* Input (24 bytes) */ +struct hwrm_tunnel_dst_port_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 unused_0; + __le16 tunnel_dst_port_id; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_tunnel_dst_port_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_stat_ctx_alloc */ +/* Input (32 bytes) */ +struct hwrm_stat_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 stats_dma_addr; + __le32 update_period_ms; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_stat_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_stat_ctx_free */ +/* Input (24 bytes) */ +struct hwrm_stat_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_stat_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_stat_ctx_query */ +/* Input (24 bytes) */ +struct hwrm_stat_ctx_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + __le32 unused_0; +}; + +/* Output (176 bytes) */ +struct hwrm_stat_ctx_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_err_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_err_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_stat_ctx_clr_stats */ +/* Input (24 bytes) */ +struct hwrm_stat_ctx_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_stat_ctx_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_mgmt_l2_filter_alloc */ +/* Input (56 bytes) */ +struct hwrm_mgmt_l2_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL + #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0) + __le32 enables; + #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS 0x1UL + #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN 0x2UL + #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN 0x4UL + #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID 0x8UL + u8 l2_address[6]; + u8 unused_0; + u8 unused_1; + u8 l2_address_mask[6]; + __le16 ovlan; + __le16 ovlan_mask; + __le16 ivlan; + __le16 ivlan_mask; + u8 unused_2; + u8 unused_3; + __le32 action_id; + u8 action_bypass; + #define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS 0x1UL + u8 unused_5[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_mgmt_l2_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mgmt_l2_filter_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_mgmt_l2_filter_free */ +/* Input (24 bytes) */ +struct hwrm_mgmt_l2_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 mgmt_l2_filter_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_mgmt_l2_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_raw_write_blk */ +/* Input (32 bytes) */ +struct hwrm_nvm_raw_write_blk_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le32 dest_addr; + __le32 len; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_raw_write_blk_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_read */ +/* Input (40 bytes) */ +struct hwrm_nvm_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le16 dir_idx; + u8 unused_0; + u8 unused_1; + __le32 offset; + __le32 len; + __le32 unused_2; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_raw_dump */ +/* Input (32 bytes) */ +struct hwrm_nvm_raw_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 offset; + __le32 len; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_raw_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_get_dir_entries */ +/* Input (24 bytes) */ +struct hwrm_nvm_get_dir_entries_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_get_dir_entries_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_get_dir_info */ +/* Input (16 bytes) */ +struct hwrm_nvm_get_dir_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (24 bytes) */ +struct hwrm_nvm_get_dir_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 entries; + __le32 entry_length; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_write */ +/* Input (40 bytes) */ +struct hwrm_nvm_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 dir_data_length; + __le16 option; + __le16 flags; + #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_modify */ +/* Input (40 bytes) */ +struct hwrm_nvm_modify_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_idx; + u8 unused_0; + u8 unused_1; + __le32 offset; + __le32 len; + __le32 unused_2; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_modify_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_find_dir_entry */ +/* Input (32 bytes) */ +struct hwrm_nvm_find_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL + __le16 dir_idx; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 opt_ordinal; + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0 + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ (0x0UL << 0) + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE (0x1UL << 0) + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT (0x2UL << 0) + u8 unused_1[3]; +}; + +/* Output (32 bytes) */ +struct hwrm_nvm_find_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le32 dir_data_length; + __le32 fw_ver; + __le16 dir_ordinal; + __le16 dir_idx; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_erase_dir_entry */ +/* Input (24 bytes) */ +struct hwrm_nvm_erase_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_idx; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_erase_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_get_dev_info */ +/* Input (16 bytes) */ +struct hwrm_nvm_get_dev_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (32 bytes) */ +struct hwrm_nvm_get_dev_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 manufacturer_id; + __le16 device_id; + __le32 sector_size; + __le32 nvram_size; + __le32 reserved_size; + __le32 available_size; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_nvm_mod_dir_entry */ +/* Input (32 bytes) */ +struct hwrm_nvm_mod_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL + __le16 dir_idx; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 checksum; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_mod_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_verify_update */ +/* Input (24 bytes) */ +struct hwrm_nvm_verify_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_verify_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_exec_fwd_resp */ +/* Input (120 bytes) */ +struct hwrm_exec_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[24]; + __le16 encap_resp_target_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_exec_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_reject_fwd_resp */ +/* Input (120 bytes) */ +struct hwrm_reject_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[24]; + __le16 encap_resp_target_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_reject_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_fwd_resp */ +/* Input (40 bytes) */ +struct hwrm_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_resp_target_id; + __le16 encap_resp_cmpl_ring; + __le16 encap_resp_len; + u8 unused_0; + u8 unused_1; + __le64 encap_resp_addr; + __le32 encap_resp[24]; +}; + +/* Output (16 bytes) */ +struct hwrm_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_fwd_async_event_cmpl */ +/* Input (32 bytes) */ +struct hwrm_fwd_async_event_cmpl_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_async_event_target_id; + u8 unused_0; + u8 unused_1; + u8 unused_2[3]; + u8 unused_3; + __le32 encap_async_event_cmpl[4]; +}; + +/* Output (16 bytes) */ +struct hwrm_fwd_async_event_cmpl_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_fw_reset */ +/* Input (24 bytes) */ +struct hwrm_fw_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0) + u8 selfrst_status; + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_fw_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) + u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_fw_qstatus */ +/* Input (24 bytes) */ +struct hwrm_fw_qstatus_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0) + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_fw_qstatus_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) + u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_temp_monitor_query */ +/* Input (16 bytes) */ +struct hwrm_temp_monitor_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_temp_monitor_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 temp; + u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h new file mode 100644 index 000000000000..3cf3e1b70b64 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -0,0 +1,59 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _BNXT_NVM_DEFS_H_ +#define _BNXT_NVM_DEFS_H_ + +enum bnxt_nvm_directory_type { + BNX_DIR_TYPE_UNUSED = 0, + BNX_DIR_TYPE_PKG_LOG = 1, + BNX_DIR_TYPE_CHIMP_PATCH = 3, + BNX_DIR_TYPE_BOOTCODE = 4, + BNX_DIR_TYPE_VPD = 5, + BNX_DIR_TYPE_EXP_ROM_MBA = 6, + BNX_DIR_TYPE_AVS = 7, + BNX_DIR_TYPE_PCIE = 8, + BNX_DIR_TYPE_PORT_MACRO = 9, + BNX_DIR_TYPE_APE_FW = 10, + BNX_DIR_TYPE_APE_PATCH = 11, + BNX_DIR_TYPE_KONG_FW = 12, + BNX_DIR_TYPE_KONG_PATCH = 13, + BNX_DIR_TYPE_BONO_FW = 14, + BNX_DIR_TYPE_BONO_PATCH = 15, + BNX_DIR_TYPE_TANG_FW = 16, + BNX_DIR_TYPE_TANG_PATCH = 17, + BNX_DIR_TYPE_BOOTCODE_2 = 18, + BNX_DIR_TYPE_CCM = 19, + BNX_DIR_TYPE_PCI_CFG = 20, + BNX_DIR_TYPE_TSCF_UCODE = 21, + BNX_DIR_TYPE_ISCSI_BOOT = 22, + BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24, + BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25, + BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26, + BNX_DIR_TYPE_EXT_PHY = 27, + BNX_DIR_TYPE_SHARED_CFG = 40, + BNX_DIR_TYPE_PORT_CFG = 41, + BNX_DIR_TYPE_FUNC_CFG = 42, + BNX_DIR_TYPE_MGMT_CFG = 48, + BNX_DIR_TYPE_MGMT_DATA = 49, + BNX_DIR_TYPE_MGMT_WEB_DATA = 50, + BNX_DIR_TYPE_MGMT_WEB_META = 51, + BNX_DIR_TYPE_MGMT_EVENT_LOG = 52, + BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53 +}; + +#define BNX_DIR_ORDINAL_FIRST 0 + +#define BNX_DIR_EXT_INACTIVE (1 << 0) +#define BNX_DIR_EXT_UPDATE (1 << 1) + +#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) +#define BNX_DIR_ATTR_PROP_STREAM (1 << 1) + +#endif /* Don't add anything after this line */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c new file mode 100644 index 000000000000..60989e7e266a --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -0,0 +1,816 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_sriov.h" +#include "bnxt_ethtool.h" + +#ifdef CONFIG_BNXT_SRIOV +static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) +{ + if (bp->state != BNXT_STATE_OPEN) { + netdev_err(bp->dev, "vf ndo called though PF is down\n"); + return -EINVAL; + } + if (!bp->pf.active_vfs) { + netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); + return -EINVAL; + } + if (vf_id >= bp->pf.max_vfs) { + netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); + return -EINVAL; + } + return 0; +} + +int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) +{ + struct hwrm_func_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + bool old_setting = false; + u32 func_flags; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + + vf = &bp->pf.vf[vf_id]; + if (vf->flags & BNXT_VF_SPOOFCHK) + old_setting = true; + if (old_setting == setting) + return 0; + + func_flags = vf->func_flags; + if (setting) + func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; + else + func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; + /*TODO: if the driver supports VLAN filter on guest VLAN, + * the spoof check should also include vlan anti-spoofing + */ + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.vf_id = cpu_to_le16(vf->fw_fid); + req.flags = cpu_to_le32(func_flags); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + vf->func_flags = func_flags; + if (setting) + vf->flags |= BNXT_VF_SPOOFCHK; + else + vf->flags &= ~BNXT_VF_SPOOFCHK; + } + return rc; +} + +int bnxt_get_vf_config(struct net_device *dev, int vf_id, + struct ifla_vf_info *ivi) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + + ivi->vf = vf_id; + vf = &bp->pf.vf[vf_id]; + + memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); + ivi->max_tx_rate = vf->max_tx_rate; + ivi->min_tx_rate = vf->min_tx_rate; + ivi->vlan = vf->vlan; + ivi->qos = vf->flags & BNXT_VF_QOS; + ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK; + if (!(vf->flags & BNXT_VF_LINK_FORCED)) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->flags & BNXT_VF_LINK_UP) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + + return 0; +} + +int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) +{ + struct hwrm_func_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + /* reject bc or mc mac addr, zero mac addr means allow + * VF to use its own mac addr + */ + if (is_multicast_ether_addr(mac)) { + netdev_err(dev, "Invalid VF ethernet address\n"); + return -EINVAL; + } + vf = &bp->pf.vf[vf_id]; + + memcpy(vf->mac_addr, mac, ETH_ALEN); + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.vf_id = cpu_to_le16(vf->fw_fid); + req.flags = cpu_to_le32(vf->func_flags); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, mac, ETH_ALEN); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) +{ + struct hwrm_func_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + u16 vlan_tag; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + + /* TODO: needed to implement proper handling of user priority, + * currently fail the command if there is valid priority + */ + if (vlan_id > 4095 || qos) + return -EINVAL; + + vf = &bp->pf.vf[vf_id]; + vlan_tag = vlan_id; + if (vlan_tag == vf->vlan) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.vf_id = cpu_to_le16(vf->fw_fid); + req.flags = cpu_to_le32(vf->func_flags); + req.dflt_vlan = cpu_to_le16(vlan_tag); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + vf->vlan = vlan_tag; + return rc; +} + +int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, + int max_tx_rate) +{ + struct hwrm_func_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + u32 pf_link_speed; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + + vf = &bp->pf.vf[vf_id]; + pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); + if (max_tx_rate > pf_link_speed) { + netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", + max_tx_rate, vf_id); + return -EINVAL; + } + + if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { + netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", + min_tx_rate, vf_id); + return -EINVAL; + } + if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.vf_id = cpu_to_le16(vf->fw_fid); + req.flags = cpu_to_le32(vf->func_flags); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); + req.max_bw = cpu_to_le32(max_tx_rate); + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); + req.min_bw = cpu_to_le32(min_tx_rate); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + vf->min_tx_rate = min_tx_rate; + vf->max_tx_rate = max_tx_rate; + } + return rc; +} + +int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + + vf = &bp->pf.vf[vf_id]; + + vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); + switch (link) { + case IFLA_VF_LINK_STATE_AUTO: + vf->flags |= BNXT_VF_LINK_UP; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->flags |= BNXT_VF_LINK_FORCED; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; + break; + default: + netdev_err(bp->dev, "Invalid link option\n"); + rc = -EINVAL; + break; + } + /* CHIMP TODO: send msg to VF to update new link state */ + + return rc; +} + +static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) +{ + int i; + struct bnxt_vf_info *vf; + + for (i = 0; i < num_vfs; i++) { + vf = &bp->pf.vf[i]; + memset(vf, 0, sizeof(*vf)); + vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP; + } + return 0; +} + +static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp) +{ + int i, rc = 0; + struct bnxt_pf_info *pf = &bp->pf; + struct hwrm_func_vf_resc_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = pf->first_vf_id; i < pf->first_vf_id + pf->active_vfs; i++) { + req.vf_id = cpu_to_le16(i); + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static void bnxt_free_vf_resources(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + int i; + + kfree(bp->pf.vf_event_bmap); + bp->pf.vf_event_bmap = NULL; + + for (i = 0; i < 4; i++) { + if (bp->pf.hwrm_cmd_req_addr[i]) { + dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, + bp->pf.hwrm_cmd_req_addr[i], + bp->pf.hwrm_cmd_req_dma_addr[i]); + bp->pf.hwrm_cmd_req_addr[i] = NULL; + } + } + + kfree(bp->pf.vf); + bp->pf.vf = NULL; +} + +static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) +{ + struct pci_dev *pdev = bp->pdev; + u32 nr_pages, size, i, j, k = 0; + + bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); + if (!bp->pf.vf) + return -ENOMEM; + + bnxt_set_vf_attr(bp, num_vfs); + + size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; + nr_pages = size / BNXT_PAGE_SIZE; + if (size & (BNXT_PAGE_SIZE - 1)) + nr_pages++; + + for (i = 0; i < nr_pages; i++) { + bp->pf.hwrm_cmd_req_addr[i] = + dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, + &bp->pf.hwrm_cmd_req_dma_addr[i], + GFP_KERNEL); + + if (!bp->pf.hwrm_cmd_req_addr[i]) + return -ENOMEM; + + for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { + struct bnxt_vf_info *vf = &bp->pf.vf[k]; + + vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + + j * BNXT_HWRM_REQ_MAX_SIZE; + vf->hwrm_cmd_req_dma_addr = + bp->pf.hwrm_cmd_req_dma_addr[i] + j * + BNXT_HWRM_REQ_MAX_SIZE; + k++; + } + } + + /* Max 128 VF's */ + bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); + if (!bp->pf.vf_event_bmap) + return -ENOMEM; + + bp->pf.hwrm_cmd_req_pages = nr_pages; + return 0; +} + +static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) +{ + struct hwrm_func_buf_rgtr_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); + + req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); + req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); + req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); + req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); + req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); + req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); + req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +/* only call by PF to reserve resources for VF */ +static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) +{ + u32 rc = 0, mtu, i; + u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; + struct hwrm_func_cfg_input req = {0}; + struct bnxt_pf_info *pf = &bp->pf; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + + /* Remaining rings are distributed equally amongs VF's for now */ + /* TODO: the following workaroud is needed to restrict total number + * of vf_cp_rings not exceed number of HW ring groups. This WA should + * be removed once new HWRM provides HW ring groups capability in + * hwrm_func_qcap. + */ + vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs); + vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs; + /* TODO: restore this logic below once the WA above is removed */ + /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */ + vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) / + *num_vfs; + else + vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) / + *num_vfs; + vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs; + + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | + FUNC_CFG_REQ_ENABLES_MRU | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_VNICS); + + mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + req.mru = cpu_to_le16(mtu); + req.mtu = cpu_to_le16(mtu); + + req.num_rsscos_ctxs = cpu_to_le16(1); + req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); + req.num_tx_rings = cpu_to_le16(vf_tx_rings); + req.num_rx_rings = cpu_to_le16(vf_rx_rings); + req.num_l2_ctxs = cpu_to_le16(4); + vf_vnics = 1; + + req.num_vnics = cpu_to_le16(vf_vnics); + /* FIXME spec currently uses 1 bit for stats ctx */ + req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < *num_vfs; i++) { + req.vf_id = cpu_to_le16(pf->first_vf_id + i); + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + bp->pf.active_vfs = i + 1; + bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id); + } + mutex_unlock(&bp->hwrm_cmd_lock); + if (!rc) { + bp->pf.max_pf_tx_rings = bp->tx_nr_rings; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2; + else + bp->pf.max_pf_rx_rings = bp->rx_nr_rings; + } + return rc; +} + +static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) +{ + int rc = 0, vfs_supported; + int min_rx_rings, min_tx_rings, min_rss_ctxs; + int tx_ok = 0, rx_ok = 0, rss_ok = 0; + + /* Check if we can enable requested num of vf's. At a mininum + * we require 1 RX 1 TX rings for each VF. In this minimum conf + * features like TPA will not be available. + */ + vfs_supported = *num_vfs; + + while (vfs_supported) { + min_rx_rings = vfs_supported; + min_tx_rings = vfs_supported; + min_rss_ctxs = vfs_supported; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >= + min_rx_rings) + rx_ok = 1; + } else { + if (bp->pf.max_rx_rings - bp->rx_nr_rings >= + min_rx_rings) + rx_ok = 1; + } + + if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) + tx_ok = 1; + + if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) + rss_ok = 1; + + if (tx_ok && rx_ok && rss_ok) + break; + + vfs_supported--; + } + + if (!vfs_supported) { + netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); + return -EINVAL; + } + + if (vfs_supported != *num_vfs) { + netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", + *num_vfs, vfs_supported); + *num_vfs = vfs_supported; + } + + rc = bnxt_alloc_vf_resources(bp, *num_vfs); + if (rc) + goto err_out1; + + /* Reserve resources for VFs */ + rc = bnxt_hwrm_func_cfg(bp, num_vfs); + if (rc) + goto err_out2; + + /* Register buffers for VFs */ + rc = bnxt_hwrm_func_buf_rgtr(bp); + if (rc) + goto err_out2; + + rc = pci_enable_sriov(bp->pdev, *num_vfs); + if (rc) + goto err_out2; + + return 0; + +err_out2: + /* Free the resources reserved for various VF's */ + bnxt_hwrm_func_vf_resource_free(bp); + +err_out1: + bnxt_free_vf_resources(bp); + + return rc; +} + +void bnxt_sriov_disable(struct bnxt *bp) +{ + if (!bp->pf.active_vfs) + return; + + pci_disable_sriov(bp->pdev); + + /* Free the resources reserved for various VF's */ + bnxt_hwrm_func_vf_resource_free(bp); + + bnxt_free_vf_resources(bp); + + bp->pf.active_vfs = 0; + bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings; + bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings; +} + +int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(dev); + + if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { + netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); + return 0; + } + + rtnl_lock(); + if (!netif_running(dev)) { + netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); + rtnl_unlock(); + return 0; + } + bp->sriov_cfg = true; + rtnl_unlock(); + if (!num_vfs) { + bnxt_sriov_disable(bp); + return 0; + } + + /* Check if enabled VFs is same as requested */ + if (num_vfs == bp->pf.active_vfs) + return 0; + + bnxt_sriov_enable(bp, &num_vfs); + + bp->sriov_cfg = false; + wake_up(&bp->sriov_cfg_wait); + + return num_vfs; +} + +static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, + void *encap_resp, __le64 encap_resp_addr, + __le16 encap_resp_cpr, u32 msg_size) +{ + int rc = 0; + struct hwrm_fwd_resp_input req = {0}; + struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); + + /* Set the new target id */ + req.target_id = cpu_to_le16(vf->fw_fid); + req.encap_resp_len = cpu_to_le16(msg_size); + req.encap_resp_addr = encap_resp_addr; + req.encap_resp_cmpl_ring = encap_resp_cpr; + memcpy(req.encap_resp, encap_resp, msg_size); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + if (rc) { + netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); + goto fwd_resp_exit; + } + + if (resp->error_code) { + netdev_err(bp->dev, "hwrm_fwd_resp error %d\n", + resp->error_code); + rc = -1; + } + +fwd_resp_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, + u32 msg_size) +{ + int rc = 0; + struct hwrm_reject_fwd_resp_input req = {0}; + struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); + /* Set the new target id */ + req.target_id = cpu_to_le16(vf->fw_fid); + memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + if (rc) { + netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); + goto fwd_err_resp_exit; + } + + if (resp->error_code) { + netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n", + resp->error_code); + rc = -1; + } + +fwd_err_resp_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, + u32 msg_size) +{ + int rc = 0; + struct hwrm_exec_fwd_resp_input req = {0}; + struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); + /* Set the new target id */ + req.target_id = cpu_to_le16(vf->fw_fid); + memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + if (rc) { + netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); + goto exec_fwd_resp_exit; + } + + if (resp->error_code) { + netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", + resp->error_code); + rc = -1; + } + +exec_fwd_resp_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); + struct hwrm_cfa_l2_filter_alloc_input *req = + (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; + + if (!is_valid_ether_addr(vf->mac_addr) || + ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) + return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); + else + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); +} + +static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + int rc = 0; + + if (!(vf->flags & BNXT_VF_LINK_FORCED)) { + /* real link */ + rc = bnxt_hwrm_exec_fwd_resp( + bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); + } else { + struct hwrm_port_phy_qcfg_output phy_qcfg_resp; + struct hwrm_port_phy_qcfg_input *phy_qcfg_req; + + phy_qcfg_req = + (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; + mutex_lock(&bp->hwrm_cmd_lock); + memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, + sizeof(phy_qcfg_resp)); + mutex_unlock(&bp->hwrm_cmd_lock); + phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; + + if (vf->flags & BNXT_VF_LINK_UP) { + /* if physical link is down, force link up on VF */ + if (phy_qcfg_resp.link == + PORT_PHY_QCFG_RESP_LINK_NO_LINK) { + phy_qcfg_resp.link = + PORT_PHY_QCFG_RESP_LINK_LINK; + if (phy_qcfg_resp.auto_link_speed) + phy_qcfg_resp.link_speed = + phy_qcfg_resp.auto_link_speed; + else + phy_qcfg_resp.link_speed = + phy_qcfg_resp.force_link_speed; + phy_qcfg_resp.duplex = + PORT_PHY_QCFG_RESP_DUPLEX_FULL; + phy_qcfg_resp.pause = + (PORT_PHY_QCFG_RESP_PAUSE_TX | + PORT_PHY_QCFG_RESP_PAUSE_RX); + } + } else { + /* force link down */ + phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; + phy_qcfg_resp.link_speed = 0; + phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF; + phy_qcfg_resp.pause = 0; + } + rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, + phy_qcfg_req->resp_addr, + phy_qcfg_req->cmpl_ring, + sizeof(phy_qcfg_resp)); + } + return rc; +} + +static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + int rc = 0; + struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr; + u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff; + + switch (req_type) { + case HWRM_CFA_L2_FILTER_ALLOC: + rc = bnxt_vf_validate_set_mac(bp, vf); + break; + case HWRM_FUNC_CFG: + /* TODO Validate if VF is allowed to change mac address, + * mtu, num of rings etc + */ + rc = bnxt_hwrm_exec_fwd_resp( + bp, vf, sizeof(struct hwrm_func_cfg_input)); + break; + case HWRM_PORT_PHY_QCFG: + rc = bnxt_vf_set_link(bp, vf); + break; + default: + break; + } + return rc; +} + +void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) +{ + u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; + + /* Scan through VF's and process commands */ + while (1) { + vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); + if (vf_id >= active_vfs) + break; + + clear_bit(vf_id, bp->pf.vf_event_bmap); + bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); + i = vf_id + 1; + } +} + +void bnxt_update_vf_mac(struct bnxt *bp) +{ + struct hwrm_func_qcaps_input req = {0}; + struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); + req.fid = cpu_to_le16(0xffff); + + mutex_lock(&bp->hwrm_cmd_lock); + if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + goto update_vf_mac_exit; + + if (!is_valid_ether_addr(resp->perm_mac_address)) + goto update_vf_mac_exit; + + if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) + goto update_vf_mac_exit; + + memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); + memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); +update_vf_mac_exit: + mutex_unlock(&bp->hwrm_cmd_lock); +} + +#else + +void bnxt_sriov_disable(struct bnxt *bp) +{ +} + +void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) +{ + netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); +} + +void bnxt_update_vf_mac(struct bnxt *bp) +{ +} +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h new file mode 100644 index 000000000000..c151280e3980 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -0,0 +1,23 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_SRIOV_H +#define BNXT_SRIOV_H + +int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *); +int bnxt_set_vf_mac(struct net_device *, int, u8 *); +int bnxt_set_vf_vlan(struct net_device *, int, u16, u8); +int bnxt_set_vf_bw(struct net_device *, int, int, int); +int bnxt_set_vf_link_state(struct net_device *, int, int); +int bnxt_set_vf_spoofchk(struct net_device *, int, bool); +int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); +void bnxt_sriov_disable(struct bnxt *); +void bnxt_hwrm_exec_fwd_req(struct bnxt *); +void bnxt_update_vf_mac(struct bnxt *); +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 02e4e028a647..a077f9476daf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -35,79 +35,79 @@ static void set_msglevel(struct net_device *dev, u32 val) } static const char stats_strings[][ETH_GSTRING_LEN] = { - "TxOctetsOK ", - "TxFramesOK ", - "TxBroadcastFrames ", - "TxMulticastFrames ", - "TxUnicastFrames ", - "TxErrorFrames ", - - "TxFrames64 ", - "TxFrames65To127 ", - "TxFrames128To255 ", - "TxFrames256To511 ", - "TxFrames512To1023 ", - "TxFrames1024To1518 ", - "TxFrames1519ToMax ", - - "TxFramesDropped ", - "TxPauseFrames ", - "TxPPP0Frames ", - "TxPPP1Frames ", - "TxPPP2Frames ", - "TxPPP3Frames ", - "TxPPP4Frames ", - "TxPPP5Frames ", - "TxPPP6Frames ", - "TxPPP7Frames ", - - "RxOctetsOK ", - "RxFramesOK ", - "RxBroadcastFrames ", - "RxMulticastFrames ", - "RxUnicastFrames ", - - "RxFramesTooLong ", - "RxJabberErrors ", - "RxFCSErrors ", - "RxLengthErrors ", - "RxSymbolErrors ", - "RxRuntFrames ", - - "RxFrames64 ", - "RxFrames65To127 ", - "RxFrames128To255 ", - "RxFrames256To511 ", - "RxFrames512To1023 ", - "RxFrames1024To1518 ", - "RxFrames1519ToMax ", - - "RxPauseFrames ", - "RxPPP0Frames ", - "RxPPP1Frames ", - "RxPPP2Frames ", - "RxPPP3Frames ", - "RxPPP4Frames ", - "RxPPP5Frames ", - "RxPPP6Frames ", - "RxPPP7Frames ", - - "RxBG0FramesDropped ", - "RxBG1FramesDropped ", - "RxBG2FramesDropped ", - "RxBG3FramesDropped ", - "RxBG0FramesTrunc ", - "RxBG1FramesTrunc ", - "RxBG2FramesTrunc ", - "RxBG3FramesTrunc ", - - "TSO ", - "TxCsumOffload ", - "RxCsumGood ", - "VLANextractions ", - "VLANinsertions ", - "GROpackets ", - "GROmerged ", + "tx_octets_ok ", + "tx_frames_ok ", + "tx_broadcast_frames ", + "tx_multicast_frames ", + "tx_unicast_frames ", + "tx_error_frames ", + + "tx_frames_64 ", + "tx_frames_65_to_127 ", + "tx_frames_128_to_255 ", + "tx_frames_256_to_511 ", + "tx_frames_512_to_1023 ", + "tx_frames_1024_to_1518 ", + "tx_frames_1519_to_max ", + + "tx_frames_dropped ", + "tx_pause_frames ", + "tx_ppp0_frames ", + "tx_ppp1_frames ", + "tx_ppp2_frames ", + "tx_ppp3_frames ", + "tx_ppp4_frames ", + "tx_ppp5_frames ", + "tx_ppp6_frames ", + "tx_ppp7_frames ", + + "rx_octets_ok ", + "rx_frames_ok ", + "rx_broadcast_frames ", + "rx_multicast_frames ", + "rx_unicast_frames ", + + "rx_frames_too_long ", + "rx_jabber_errors ", + "rx_fcs_errors ", + "rx_length_errors ", + "rx_symbol_errors ", + "rx_runt_frames ", + + "rx_frames_64 ", + "rx_frames_65_to_127 ", + "rx_frames_128_to_255 ", + "rx_frames_256_to_511 ", + "rx_frames_512_to_1023 ", + "rx_frames_1024_to_1518 ", + "rx_frames_1519_to_max ", + + "rx_pause_frames ", + "rx_ppp0_frames ", + "rx_ppp1_frames ", + "rx_ppp2_frames ", + "rx_ppp3_frames ", + "rx_ppp4_frames ", + "rx_ppp5_frames ", + "rx_ppp6_frames ", + "rx_ppp7_frames ", + + "rx_bg0_frames_dropped ", + "rx_bg1_frames_dropped ", + "rx_bg2_frames_dropped ", + "rx_bg3_frames_dropped ", + "rx_bg0_frames_trunc ", + "rx_bg1_frames_trunc ", + "rx_bg2_frames_trunc ", + "rx_bg3_frames_trunc ", + + "tso ", + "tx_csum_offload ", + "rx_csum_good ", + "vlan_extractions ", + "vlan_insertions ", + "gro_packets ", + "gro_merged ", }; static char adapter_stats_strings[][ETH_GSTRING_LEN] = { @@ -211,8 +211,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) sizeof(info->version)); strlcpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); + info->regdump_len = get_regs_len(dev); - if (adapter->params.fw_vers) + if (!adapter->params.fw_vers) + strcpy(info->fw_version, "N/A"); + else snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), @@ -612,6 +615,8 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct port_info *p = netdev_priv(dev); struct link_config *lc = &p->link_cfg; u32 speed = ethtool_cmd_speed(cmd); + struct link_config old_lc; + int ret; if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ return -EINVAL; @@ -626,13 +631,11 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return -EINVAL; } + old_lc = *lc; if (cmd->autoneg == AUTONEG_DISABLE) { cap = speed_to_caps(speed); - if (!(lc->supported & cap) || - (speed == 1000) || - (speed == 10000) || - (speed == 40000)) + if (!(lc->supported & cap)) return -EINVAL; lc->requested_speed = cap; lc->advertising = 0; @@ -645,10 +648,14 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } lc->autoneg = cmd->autoneg; - if (netif_running(dev)) - return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan, - lc); - return 0; + /* If the firmware rejects the Link Configuration request, back out + * the changes and report the error. + */ + ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc); + if (ret) + *lc = old_lc; + + return ret; } static void get_pauseparam(struct net_device *dev, @@ -847,7 +854,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, { int i, err = 0; struct adapter *adapter = netdev2adap(dev); - u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); + u8 *buf = t4_alloc_mem(EEPROMSIZE); if (!buf) return -ENOMEM; @@ -858,7 +865,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, if (!err) memcpy(data, buf + e->offset, e->len); - kfree(buf); + t4_free_mem(buf); return err; } @@ -887,7 +894,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { /* RMW possibly needed for first or last words. */ - buf = kmalloc(aligned_len, GFP_KERNEL); + buf = t4_alloc_mem(aligned_len); if (!buf) return -ENOMEM; err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); @@ -915,7 +922,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, err = t4_seeprom_wp(adapter, true); out: if (buf != data) - kfree(buf); + t4_free_mem(buf); return err; } @@ -1011,11 +1018,15 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, if (!p) return 0; - for (i = 0; i < pi->rss_size; i++) - pi->rss[i] = p[i]; - if (pi->adapter->flags & FULL_INIT_DONE) + /* Interface must be brought up atleast once */ + if (pi->adapter->flags & FULL_INIT_DONE) { + for (i = 0; i < pi->rss_size; i++) + pi->rss[i] = p[i]; + return cxgb4_write_rss(pi, pi->rss); - return 0; + } + + return -EPERM; } static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c29227ee9ee8..2cf81857a297 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -83,7 +83,7 @@ char cxgb4_driver_name[] = KBUILD_MODNAME; #endif #define DRV_VERSION "2.0.0-ko" const char cxgb4_driver_version[] = DRV_VERSION; -#define DRV_DESC "Chelsio T4/T5 Network Driver" +#define DRV_DESC "Chelsio T4/T5/T6 Network Driver" /* Host shadow copy of ingress filter entry. This is in host native format * and doesn't match the ordering or bit order, etc. of the hardware of the @@ -151,6 +151,7 @@ MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); MODULE_FIRMWARE(FW4_FNAME); MODULE_FIRMWARE(FW5_FNAME); +MODULE_FIRMWARE(FW6_FNAME); /* * Normally we're willing to become the firmware's Master PF but will be happy @@ -4485,6 +4486,10 @@ static int enable_msix(struct adapter *adap) } for (i = 0; i < allocated; ++i) adap->msix_info[i].vec = entries[i].vector; + dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " + "nic %d iscsi %d rdma cpl %d rdma ciq %d\n", + allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs, + s->rdmaciqs); kfree(entries); return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index b2b5e5bbe04c..0cfa5d72cafd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -56,7 +56,7 @@ * Generic information about the driver. */ #define DRV_VERSION "2.0.0-ko" -#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver" +#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver" /* * Module Parameters. diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 9d3bb8349a73..b3645297477e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -439,10 +439,7 @@ EXPORT_SYMBOL(hnae_ae_unregister); static int __init hnae_init(void) { hnae_class = class_create(THIS_MODULE, "hnae"); - if (IS_ERR(hnae_class)) - return PTR_ERR(hnae_class); - - return 0; + return PTR_ERR_OR_ZERO(hnae_class); } static void __exit hnae_exit(void) diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index e4ec52ae61ff..37491c85bc42 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -11,6 +11,7 @@ #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/netdevice.h> @@ -20,6 +21,7 @@ #include <linux/of_platform.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/regmap.h> #include <linux/spinlock_types.h> #define MDIO_DRV_NAME "Hi-HNS_MDIO" @@ -36,7 +38,7 @@ struct hns_mdio_device { void *vbase; /* mdio reg base address */ - void *sys_vbase; + struct regmap *subctrl_vbase; }; /* mdio reg */ @@ -155,10 +157,10 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev, u32 time_cnt; u32 reg_value; - mdio_write_reg((void *)mdio_dev->sys_vbase, cfg_reg, set_val); + regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val); for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) { - reg_value = mdio_read_reg((void *)mdio_dev->sys_vbase, st_reg); + regmap_read(mdio_dev->subctrl_vbase, st_reg, ®_value); reg_value &= st_msk; if ((!!check_st) == (!!reg_value)) break; @@ -352,7 +354,7 @@ static int hns_mdio_reset(struct mii_bus *bus) struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; int ret; - if (!mdio_dev->sys_vbase) { + if (!mdio_dev->subctrl_vbase) { dev_err(&bus->dev, "mdio sys ctl reg has not maped\n"); return -ENODEV; } @@ -455,13 +457,12 @@ static int hns_mdio_probe(struct platform_device *pdev) return ret; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - mdio_dev->sys_vbase = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mdio_dev->sys_vbase)) { - ret = PTR_ERR(mdio_dev->sys_vbase); - return ret; + mdio_dev->subctrl_vbase = + syscon_node_to_regmap(of_parse_phandle(np, "subctrl_vbase", 0)); + if (IS_ERR(mdio_dev->subctrl_vbase)) { + dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n"); + mdio_dev->subctrl_vbase = NULL; } - new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); if (!new_bus->irq) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7a58b1f9971f..4dd3e26129b4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -109,8 +109,10 @@ #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_HI_SHIFT 12 #define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) -#define I40E_OEM_VER_BUILD_MASK 0xff00 +#define I40E_OEM_VER_BUILD_MASK 0xffff #define I40E_OEM_VER_PATCH_MASK 0xff +#define I40E_OEM_VER_BUILD_SHIFT 8 +#define I40E_OEM_VER_SHIFT 24 /* The values in here are decimal coded as hex as is the case in the NVM map*/ #define I40E_CURRENT_NVM_VERSION_HI 0x2 @@ -468,6 +470,8 @@ struct i40e_vsi { #define I40E_VSI_FLAG_VEB_OWNER BIT(1) unsigned long flags; + /* Per VSI lock to protect elements/list (MAC filter) */ + spinlock_t mac_filter_list_lock; struct list_head mac_filter_list; /* VSI stats */ @@ -575,6 +579,8 @@ struct i40e_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; bool arm_wb_state; +#define ITR_COUNTDOWN_START 100 + u8 itr_countdown; /* when 0 should adjust ITR */ } ____cacheline_internodealigned_in_smp; /* lan device */ @@ -590,6 +596,15 @@ struct i40e_device { static inline char *i40e_nvm_version_str(struct i40e_hw *hw) { static char buf[32]; + u32 full_ver; + u8 ver, patch; + u16 build; + + full_ver = hw->nvm.oem_ver; + ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT); + build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) + & I40E_OEM_VER_BUILD_MASK); + patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK); snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", @@ -597,9 +612,7 @@ static inline char *i40e_nvm_version_str(struct i40e_hw *hw) I40E_NVM_VERSION_HI_SHIFT, (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> I40E_NVM_VERSION_LO_SHIFT, - hw->nvm.eetrack, (hw->nvm.oem_ver >> 24), - (hw->nvm.oem_ver & I40E_OEM_VER_BUILD_MASK) >> 8, - hw->nvm.oem_ver & I40E_OEM_VER_PATCH_MASK); + hw->nvm.eetrack, ver, build, patch); return buf; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index daa6b177fdb7..2d74c6e4d7b6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -331,25 +331,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, len = buf_len; /* write the full 16-byte chunks */ for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, - "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - i, buf[i], buf[i + 1], buf[i + 2], - buf[i + 3], buf[i + 4], buf[i + 5], - buf[i + 6], buf[i + 7], buf[i + 8], - buf[i + 9], buf[i + 10], buf[i + 11], - buf[i + 12], buf[i + 13], buf[i + 14], - buf[i + 15]); + i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); /* write whatever's left over without overrunning the buffer */ - if (i < len) { - char d_buf[80]; - int j = 0; - - memset(d_buf, 0, sizeof(d_buf)); - j += sprintf(d_buf, "\t0x%04X ", i); - while (i < len) - j += sprintf(&d_buf[j], " %02X", buf[i++]); - i40e_debug(hw, mask, "%s\n", d_buf); - } + if (i < len) + i40e_debug(hw, mask, "\t0x%04X %*ph\n", + i, len - i, buf + i); } } @@ -958,6 +944,9 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) else hw->pf_id = (u8)(func_rid & 0x7); + if (hw->mac.type == I40E_MAC_X722) + hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE; + status = i40e_init_nvm(hw); return status; } @@ -2275,13 +2264,15 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) if (status) return status; - status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, - NULL); - if (status) - return status; + if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) { + status = i40e_aq_get_phy_capabilities(hw, false, false, + &abilities, NULL); + if (status) + return status; - memcpy(hw->phy.link_info.module_type, &abilities.module_type, - sizeof(hw->phy.link_info.module_type)); + memcpy(hw->phy.link_info.module_type, &abilities.module_type, + sizeof(hw->phy.link_info.module_type)); + } return status; } @@ -3824,6 +3815,28 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } /** + * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control + * @hw: pointer to the hw struct + * @seid: VSI seid to add ethertype filter from + **/ +#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 seid) +{ + u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; + u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; + i40e_status status; + + status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, + seid, 0, true, NULL, + NULL); + if (status) + hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); +} + +/** * i40e_aq_alternate_read * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index d15bd62eb874..d4b7af9a2fc8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1137,7 +1137,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, ma, vlan, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); ret = i40e_sync_vsi_filters(vsi, true); if (f && !ret) dev_info(&pf->pdev->dev, @@ -1174,7 +1176,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, ma, vlan, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); ret = i40e_sync_vsi_filters(vsi, true); if (!ret) dev_info(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 767b1db1005f..8fd26fdd7705 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -661,28 +661,31 @@ static int i40e_set_settings(struct net_device *netdev, /* Check autoneg */ if (autoneg == AUTONEG_ENABLE) { - /* If autoneg is not supported, return error */ - if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { - netdev_info(netdev, "Autoneg not supported on this phy\n"); - return -EINVAL; - } /* If autoneg was not already enabled */ if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { + /* If autoneg is not supported, return error */ + if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { + netdev_info(netdev, "Autoneg not supported on this phy\n"); + return -EINVAL; + } + /* Autoneg is allowed to change */ config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_AN; change = true; } } else { - /* If autoneg is supported 10GBASE_T is the only phy that - * can disable it, so otherwise return error - */ - if (safe_ecmd.supported & SUPPORTED_Autoneg && - hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) { - netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); - return -EINVAL; - } /* If autoneg is currently enabled */ if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { + /* If autoneg is supported 10GBASE_T is the only PHY + * that can disable it, so otherwise return error + */ + if (safe_ecmd.supported & SUPPORTED_Autoneg && + hw->phy.link_info.phy_type != + I40E_PHY_TYPE_10GBASE_T) { + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); + return -EINVAL; + } + /* Autoneg is allowed to change */ config.abilities = abilities.abilities & ~I40E_AQ_PHY_ENABLE_AN; change = true; @@ -748,9 +751,9 @@ static int i40e_set_settings(struct net_device *netdev, status = i40e_update_link_info(hw); if (status) - netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n", - i40e_stat_str(hw, status), - i40e_aq_str(hw, hw->aq.asq_last_status)); + netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); } else { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 2ec241142e9d..fe5d9bf3ed6d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1516,10 +1516,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) * same PCI function. */ netdev->dev_port = 1; + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false); i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false); i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false); i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); /* use san mac */ ether_addr_copy(netdev->dev_addr, hw->mac.san_addr); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f7ed3131d037..3e595adfb0bf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 34 +#define DRV_VERSION_BUILD 46 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -1355,6 +1355,9 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL when no memory available. + * + * NOTE: This function is expected to be called with mac_filter_list_lock + * being held. **/ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -1413,6 +1416,9 @@ add_filter_out: * @vlan: the vlan * @is_vf: make sure it's a VF filter, else doesn't matter * @is_netdev: make sure it's a netdev filter, else doesn't matter + * + * NOTE: This function is expected to be called with mac_filter_list_lock + * being held. **/ void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -1519,8 +1525,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p) element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } else { + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); } if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { @@ -1531,10 +1539,12 @@ static int i40e_set_mac(struct net_device *netdev, void *p) element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } else { + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false); if (f) f->is_laa = true; + spin_unlock_bh(&vsi->mac_filter_list_lock); } i40e_sync_vsi_filters(vsi, false); @@ -1707,6 +1717,8 @@ static void i40e_set_rx_mode(struct net_device *netdev) struct netdev_hw_addr *mca; struct netdev_hw_addr *ha; + spin_lock_bh(&vsi->mac_filter_list_lock); + /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { if (!i40e_find_mac(vsi, uca->addr, false, true)) { @@ -1754,6 +1766,7 @@ static void i40e_set_rx_mode(struct net_device *netdev) bottom_of_search_loop: continue; } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* check for other flag changes */ if (vsi->current_netdev_flags != vsi->netdev->flags) { @@ -1763,6 +1776,79 @@ bottom_of_search_loop: } /** + * i40e_mac_filter_entry_clone - Clones a MAC filter entry + * @src: source MAC filter entry to be clones + * + * Returns the pointer to newly cloned MAC filter entry or NULL + * in case of error + **/ +static struct i40e_mac_filter *i40e_mac_filter_entry_clone( + struct i40e_mac_filter *src) +{ + struct i40e_mac_filter *f; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + *f = *src; + + INIT_LIST_HEAD(&f->list); + + return f; +} + +/** + * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries + * @vsi: pointer to vsi struct + * @from: Pointer to list which contains MAC filter entries - changes to + * those entries needs to be undone. + * + * MAC filter entries from list were slated to be removed from device. + **/ +static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, + struct list_head *from) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, from, list) { + f->changed = true; + /* Move the element back into MAC filter list*/ + list_move_tail(&f->list, &vsi->mac_filter_list); + } +} + +/** + * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries + * @vsi: pointer to vsi struct + * + * MAC filter entries from list were slated to be added from device. + **/ +static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed && f->counter) + f->changed = true; + } +} + +/** + * i40e_cleanup_add_list - Deletes the element from add list and release + * memory + * @add_list: Pointer to list which contains MAC filter entries + **/ +static void i40e_cleanup_add_list(struct list_head *add_list) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, add_list, list) { + list_del(&f->list); + kfree(f); + } +} + +/** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI * @grab_rtnl: whether RTNL needs to be grabbed @@ -1773,11 +1859,13 @@ bottom_of_search_loop: **/ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) { - struct i40e_mac_filter *f, *ftmp; + struct list_head tmp_del_list, tmp_add_list; + struct i40e_mac_filter *f, *ftmp, *fclone; bool promisc_forced_on = false; bool add_happened = false; int filter_list_len = 0; u32 changed_flags = 0; + bool err_cond = false; i40e_status ret = 0; struct i40e_pf *pf; int num_add = 0; @@ -1798,17 +1886,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) vsi->current_netdev_flags = vsi->netdev->flags; } + INIT_LIST_HEAD(&tmp_del_list); + INIT_LIST_HEAD(&tmp_add_list); + if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; - filter_list_len = pf->hw.aq.asq_buf_size / - sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_remove_macvlan_element_data), - GFP_KERNEL); - if (!del_list) - return -ENOMEM; - + spin_lock_bh(&vsi->mac_filter_list_lock); list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { if (!f->changed) continue; @@ -1816,6 +1900,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) if (f->counter != 0) continue; f->changed = false; + + /* Move the element into temporary del_list */ + list_move_tail(&f->list, &tmp_del_list); + } + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed) + continue; + + if (f->counter == 0) + continue; + f->changed = false; + + /* Clone MAC filter entry and add into temporary list */ + fclone = i40e_mac_filter_entry_clone(f); + if (!fclone) { + err_cond = true; + break; + } + list_add_tail(&fclone->list, &tmp_add_list); + } + + /* if failed to clone MAC filter entry - undo */ + if (err_cond) { + i40e_undo_del_filter_entries(vsi, &tmp_del_list); + i40e_undo_add_filter_entries(vsi); + } + spin_unlock_bh(&vsi->mac_filter_list_lock); + + if (err_cond) + i40e_cleanup_add_list(&tmp_add_list); + } + + /* Now process 'del_list' outside the lock */ + if (!list_empty(&tmp_del_list)) { + filter_list_len = pf->hw.aq.asq_buf_size / + sizeof(struct i40e_aqc_remove_macvlan_element_data); + del_list = kcalloc(filter_list_len, + sizeof(struct i40e_aqc_remove_macvlan_element_data), + GFP_KERNEL); + if (!del_list) { + i40e_cleanup_add_list(&tmp_add_list); + + /* Undo VSI's MAC filter entry element updates */ + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_undo_del_filter_entries(vsi, &tmp_del_list); + i40e_undo_add_filter_entries(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + return -ENOMEM; + } + + list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { cmd_flags = 0; /* add to delete list */ @@ -1828,10 +1964,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) del_list[num_del].flags = cmd_flags; num_del++; - /* unlink from filter list */ - list_del(&f->list); - kfree(f); - /* flush a full buffer */ if (num_del == filter_list_len) { ret = i40e_aq_remove_macvlan(&pf->hw, @@ -1842,12 +1974,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) memset(del_list, 0, sizeof(*del_list)); if (ret && aq_err != I40E_AQ_RC_ENOENT) - dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, aq_err)); + dev_err(&pf->pdev->dev, + "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); } + /* Release memory for MAC filter entries which were + * synced up with HW. + */ + list_del(&f->list); + kfree(f); } + if (num_del) { ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, del_list, num_del, NULL); @@ -1863,6 +2001,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) kfree(del_list); del_list = NULL; + } + + if (!list_empty(&tmp_add_list)) { /* do all the adds now */ filter_list_len = pf->hw.aq.asq_buf_size / @@ -1870,16 +2011,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) add_list = kcalloc(filter_list_len, sizeof(struct i40e_aqc_add_macvlan_element_data), GFP_KERNEL); - if (!add_list) + if (!add_list) { + /* Purge element from temporary lists */ + i40e_cleanup_add_list(&tmp_add_list); + + /* Undo add filter entries from VSI MAC filter list */ + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_undo_add_filter_entries(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; + } - list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - if (!f->changed) - continue; + list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { - if (f->counter == 0) - continue; - f->changed = false; add_happened = true; cmd_flags = 0; @@ -1906,7 +2050,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) break; memset(add_list, 0, sizeof(*add_list)); } + /* Entries from tmp_add_list were cloned from MAC + * filter list, hence clean those cloned entries + */ + list_del(&f->list); + kfree(f); } + if (num_add) { ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, add_list, num_add, NULL); @@ -2158,6 +2308,9 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(vsi->netdev); + /* Locked once because all functions invoked below iterates list*/ + spin_lock_bh(&vsi->mac_filter_list_lock); + if (is_netdev) { add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, is_vf, is_netdev); @@ -2165,6 +2318,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, vsi->netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2175,6 +2329,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2196,6 +2351,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add filter 0 for %pM\n", vsi->netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2204,22 +2360,28 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ if (vid > 0 && !vsi->info.pvid) { list_for_each_entry(f, &vsi->mac_filter_list, list) { - if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev)) { - i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev); - add_f = i40e_add_filter(vsi, f->macaddr, - 0, is_vf, is_netdev); - if (!add_f) { - dev_info(&vsi->back->pdev->dev, - "Could not add filter 0 for %pM\n", - f->macaddr); - return -ENOMEM; - } + if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev)) + continue; + i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev); + add_f = i40e_add_filter(vsi, f->macaddr, + 0, is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter 0 for %pM\n", + f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); + return -ENOMEM; } } } + /* Make sure to release before sync_vsi_filter because that + * function will lock/unlock as necessary + */ + spin_unlock_bh(&vsi->mac_filter_list_lock); + if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return 0; @@ -2244,6 +2406,9 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(netdev); + /* Locked once because all functions invoked below iterates list */ + spin_lock_bh(&vsi->mac_filter_list_lock); + if (is_netdev) i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); @@ -2274,6 +2439,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add filter %d for %pM\n", I40E_VLAN_ANY, netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2282,16 +2448,22 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) list_for_each_entry(f, &vsi->mac_filter_list, list) { i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev); + is_vf, is_netdev); if (!add_f) { dev_info(&vsi->back->pdev->dev, "Could not add filter %d for %pM\n", I40E_VLAN_ANY, f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } } + /* Make sure to release before sync_vsi_filter because that + * function with lock/unlock as necessary + */ + spin_unlock_bh(&vsi->mac_filter_list_lock); + if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return 0; @@ -2915,6 +3087,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_q_vectors; i++, vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[i]; + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), @@ -3010,6 +3183,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) u32 val; /* set the ITR configuration */ + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); @@ -3095,7 +3269,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -3264,6 +3438,8 @@ static irqreturn_t i40e_intr(int irq, void *data) /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_q_vector *q_vector = vsi->q_vectors[0]; /* temporarily disable queue cause for NAPI processing */ u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); @@ -3276,7 +3452,7 @@ static irqreturn_t i40e_intr(int irq, void *data) wr32(hw, I40E_QINT_TQCTL(0), qval); if (!test_bit(__I40E_DOWN, &pf->state)) - napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); + napi_schedule_irqoff(&q_vector->napi); } if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { @@ -6661,6 +6837,15 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); + /* Add a filter to drop all Flow control frames from any VSI from being + * transmitted. By doing so we stop a malicious VF from sending out + * PAUSE or PFC frames and potentially controlling traffic for other + * PF/VF VSIs. + * The FW can still send Flow control frames if enabled. + */ + i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, + pf->main_vsi_seid); + /* restart the VSIs that were rebuilt and running before the reset */ i40e_pf_unquiesce_all_vsi(pf); @@ -7062,6 +7247,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) /* Setup default MSIX irq handler for VSI */ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); + /* Initialize VSI lock */ + spin_lock_init(&vsi->mac_filter_list_lock); pf->vsi[vsi_idx] = vsi; ret = vsi_idx; goto unlock_pf; @@ -8364,7 +8551,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, /** * i40e_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff - * @netdev: This physical port's netdev + * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ static netdev_features_t i40e_features_check(struct sk_buff *skb, @@ -8479,17 +8666,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) * default a MAC-VLAN filter that accepts any tagged packet * which must be replaced by a normal filter. */ - if (!i40e_rm_default_mac_filter(vsi, mac_addr)) + if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); + spin_unlock_bh(&vsi->mac_filter_list_lock); + } } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ snprintf(netdev->name, IFNAMSIZ, "%sv%%d", pf->vsi[pf->lan_vsi]->netdev->name); random_ether_addr(mac_addr); + + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); } + + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); @@ -8545,12 +8741,22 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) return 1; veb = pf->veb[vsi->veb_idx]; + if (!veb) { + dev_info(&pf->pdev->dev, + "There is no veb associated with the bridge\n"); + return -ENOENT; + } + /* Uplink is a bridge in VEPA mode */ - if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) + if (veb->bridge_mode & BRIDGE_MODE_VEPA) { return 0; + } else { + /* Uplink is a bridge in VEB mode */ + return 1; + } - /* Uplink is a bridge in VEB mode */ - return 1; + /* VEPA is now default bridge, so return 0 */ + return 0; } /** @@ -8563,10 +8769,13 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) static int i40e_add_vsi(struct i40e_vsi *vsi) { int ret = -ENODEV; - struct i40e_mac_filter *f, *ftmp; + u8 laa_macaddr[ETH_ALEN]; + bool found_laa_mac_filter = false; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; + struct i40e_mac_filter *f, *ftmp; + u8 enabled_tc = 0x1; /* TC0 enabled */ int f_count = 0; @@ -8738,32 +8947,41 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) vsi->id = ctxt.vsi_number; } + spin_lock_bh(&vsi->mac_filter_list_lock); /* If macvlan filters already exist, force them to get loaded */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { f->changed = true; f_count++; + /* Expected to have only one MAC filter entry for LAA in list */ if (f->is_laa && vsi->type == I40E_VSI_MAIN) { - struct i40e_aqc_remove_macvlan_element_data element; + ether_addr_copy(laa_macaddr, f->macaddr); + found_laa_mac_filter = true; + } + } + spin_unlock_bh(&vsi->mac_filter_list_lock); - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, f->macaddr); - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; - ret = i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - if (ret) { - /* some older FW has a different default */ - element.flags |= - I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - } + if (found_laa_mac_filter) { + struct i40e_aqc_remove_macvlan_element_data element; - i40e_aq_mac_address_write(hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, - f->macaddr, NULL); + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, laa_macaddr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + ret = i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + if (ret) { + /* some older FW has a different default */ + element.flags |= + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); } + + i40e_aq_mac_address_write(hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + laa_macaddr, NULL); } + if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; pf->flags |= I40E_FLAG_FILTER_SYNC; @@ -8826,9 +9044,12 @@ int i40e_vsi_release(struct i40e_vsi *vsi) i40e_vsi_disable_irq(vsi); } + spin_lock_bh(&vsi->mac_filter_list_lock); list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, f->vlan, f->is_vf, f->is_netdev); + spin_unlock_bh(&vsi->mac_filter_list_lock); + i40e_sync_vsi_filters(vsi, false); i40e_vsi_delete(vsi); @@ -9959,9 +10180,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static u16 pfs_found; u16 wol_nvm_bits; u16 link_status; - int err = 0; + int err; u32 len; u32 i; + u8 set_fc_aq_fail; err = pci_enable_device_mem(pdev); if (err) @@ -10226,6 +10448,25 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } + + /* Make sure flow control is set according to current settings */ + err = i40e_set_fc(hw, &set_fc_aq_fail, true); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on get_phy_cap\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on set_phy_config\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on get_link_info\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + /* if FDIR VSI was set up, start it now */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { @@ -10383,6 +10624,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type); + /* Add a filter to drop all Flow control frames from any VSI from being + * transmitted. By doing so we stop a malicious VF from sending out + * PAUSE or PFC frames and potentially controlling traffic for other + * PF/VF VSIs. + * The FW can still send Flow control frames if enabled. + */ + i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, + pf->main_vsi_seid); + /* print a string summarizing features */ i40e_print_features(pf); @@ -10430,6 +10680,7 @@ err_dma: static void i40e_remove(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_hw *hw = &pf->hw; i40e_status ret_code; int i; @@ -10437,6 +10688,10 @@ static void i40e_remove(struct pci_dev *pdev) i40e_ptp_stop(pf); + /* Disable RSS in hw */ + wr32(hw, I40E_PFQF_HENA(0), 0); + wr32(hw, I40E_PFQF_HENA(1), 0); + /* no more scheduling of any task */ set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 875a8ccc2611..6100cdd9ad13 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -290,9 +290,18 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { - if (hw->mac.type == I40E_MAC_X722) - return i40e_read_nvm_word_aq(hw, offset, data); - return i40e_read_nvm_word_srctl(hw, offset, data); + enum i40e_status_code ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_word_aq(hw, offset, data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + } + return ret_code; } /** @@ -397,9 +406,19 @@ read_nvm_buffer_aq_exit: i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { - if (hw->mac.type == I40E_MAC_X722) - return i40e_read_nvm_buffer_aq(hw, offset, words, data); - return i40e_read_nvm_buffer_srctl(hw, offset, words, data); + enum i40e_status_code ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, + data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); + } + return ret_code; } /** @@ -465,7 +484,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { - i40e_status ret_code = 0; + i40e_status ret_code; struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; @@ -545,15 +564,16 @@ i40e_calc_nvm_checksum_exit: **/ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code; u16 checksum; __le16 le_sum; ret_code = i40e_calc_nvm_checksum(hw, &checksum); - le_sum = cpu_to_le16(checksum); - if (!ret_code) + if (!ret_code) { + le_sum = cpu_to_le16(checksum); ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, 1, &le_sum, true); + } return ret_code; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 10bf2ba8bc15..bb9d583e5416 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -322,4 +322,6 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, void *buff, u16 *ret_buff_size, u8 *ret_next_table, u32 *ret_next_index, struct i40e_asq_cmd_details *cmd_details); +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 vsi_seid); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 512707c2898e..635b3ac17877 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -815,6 +815,8 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) * i40e_set_new_dynamic_itr - Find new ITR level * @rc: structure containing ring performance data * + * Returns true if ITR changed, false if not + * * Stores a new ITR value based on packets and byte counts during * the last interrupt. The advantage of per interrupt computation * is faster updates and more accurate ITR for the current traffic @@ -823,21 +825,32 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) * testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; + struct i40e_q_vector *qv = rc->ring->q_vector; u32 new_itr = rc->itr; int bytes_per_int; + int usecs; if (rc->total_packets == 0 || !rc->itr) - return; + return false; /* simple throttlerate management - * 0-10MB/s lowest (100000 ints/s) + * 0-10MB/s lowest (50000 ints/s) * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (8000 ints/s) + * 20-1249MB/s bulk (18000 ints/s) + * > 40000 Rx packets per second (8000 ints/s) + * + * The math works out because the divisor is in 10^(-6) which + * turns the bytes/us input value into MB/s values, but + * make sure to use usecs, as the register values written + * are in 2 usec increments in the ITR registers, and make sure + * to use the smoothed values that the countdown timer gives us. */ - bytes_per_int = rc->total_bytes / rc->itr; + usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; + bytes_per_int = rc->total_bytes / usecs; + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) @@ -850,35 +863,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: - if (bytes_per_int <= 20) - new_latency_range = I40E_LOW_LATENCY; - break; + case I40E_ULTRA_LATENCY: default: if (bytes_per_int <= 20) new_latency_range = I40E_LOW_LATENCY; break; } + + /* this is to adjust RX more aggressively when streaming small + * packets. The value of 40000 was picked as it is just beyond + * what the hardware can receive per second if in low latency + * mode. + */ +#define RX_ULTRA_PACKET_RATE 40000 + + if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && + (&qv->rx == rc)) + new_latency_range = I40E_ULTRA_LATENCY; + rc->latency_range = new_latency_range; switch (new_latency_range) { case I40E_LOWEST_LATENCY: - new_itr = I40E_ITR_100K; + new_itr = I40E_ITR_50K; break; case I40E_LOW_LATENCY: new_itr = I40E_ITR_20K; break; case I40E_BULK_LATENCY: + new_itr = I40E_ITR_18K; + break; + case I40E_ULTRA_LATENCY: new_itr = I40E_ITR_8K; break; default: break; } - if (new_itr != rc->itr) - rc->itr = new_itr; - rc->total_bytes = 0; rc->total_packets = 0; + + if (new_itr != rc->itr) { + rc->itr = new_itr; + return true; + } + + return false; } /** @@ -1747,6 +1777,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) return total_rx_packets; } +static u32 i40e_buildreg_itr(const int type, const u16 itr) +{ + u32 val; + + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); + + return val; +} + +/* a small macro to shorten up some long lines */ +#define INTREG I40E_PFINT_DYN_CTLN + /** * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt * @vsi: the VSI we care about @@ -1757,54 +1802,69 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { struct i40e_hw *hw = &vsi->back->hw; - u16 old_itr; + bool rx = false, tx = false; + u32 rxval, txval; int vector; - u32 val; vector = (q_vector->v_idx + vsi->base_vector); + + /* avoid dynamic calculation if in countdown mode OR if + * all dynamic is disabled + */ + rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + + if (q_vector->itr_countdown > 0 || + (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) && + !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) { + goto enable_int; + } + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_RX_ITR << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (q_vector->rx.itr << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); - } else { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); - } else { - i40e_irq_dynamic_enable(vsi, q_vector->v_idx); + rx = i40e_set_new_dynamic_itr(&q_vector->rx); + rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); } + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_TX_ITR << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (q_vector->tx.itr << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); - } else { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx + - vsi->base_vector - 1), val); - } else { - i40e_irq_dynamic_enable(vsi, q_vector->v_idx); + tx = i40e_set_new_dynamic_itr(&q_vector->tx); + txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); } + + if (rx || tx) { + /* get the higher of the two ITR adjustments and + * use the same value for both ITR registers + * when in adaptive mode (Rx and/or Tx) + */ + u16 itr = max(q_vector->tx.itr, q_vector->rx.itr); + + q_vector->tx.itr = q_vector->rx.itr = itr; + txval = i40e_buildreg_itr(I40E_TX_ITR, itr); + tx = true; + rxval = i40e_buildreg_itr(I40E_RX_ITR, itr); + rx = true; + } + + /* only need to enable the interrupt once, but need + * to possibly update both ITR values + */ + if (rx) { + /* set the INTENA_MSK_MASK so that this first write + * won't actually enable the interrupt, instead just + * updating the ITR (it's bit 31 PF and VF) + */ + rxval |= BIT(31); + /* don't check _DOWN because interrupt isn't being enabled */ + wr32(hw, INTREG(vector - 1), rxval); + } + +enable_int: + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, INTREG(vector - 1), txval); + + if (q_vector->itr_countdown) + q_vector->itr_countdown--; + else + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } /** @@ -2127,6 +2187,7 @@ out: * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header + * @cd_type_cmd_tso_mss: ptr to u64 object * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error @@ -2186,6 +2247,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @tx_flags: the collected send information + * @cd_type_cmd_tso_mss: ptr to u64 object * * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen **/ @@ -2228,6 +2290,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set + * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 7c0ed84b296d..6779fb771d6a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -32,12 +32,14 @@ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 +#define I40E_ITR_50K 0x000A #define I40E_ITR_20K 0x0019 +#define I40E_ITR_18K 0x001B #define I40E_ITR_8K 0x003E #define I40E_ITR_4K 0x007A #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ -#define I40E_ITR_RX_DEF I40E_ITR_8K -#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_ITR_RX_DEF I40E_ITR_20K +#define I40E_ITR_TX_DEF I40E_ITR_20K #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ @@ -296,6 +298,7 @@ enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, I40E_BULK_LATENCY = 2, + I40E_ULTRA_LATENCY = 3, }; struct i40e_ring_container { diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 9c4a4573c28f..dd2da356d9a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -544,6 +544,9 @@ struct i40e_hw { struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ +#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) + u64 flags; + /* debug mask */ u32 debug_mask; char err_str[16]; @@ -1065,8 +1068,8 @@ enum i40e_filter_program_desc_fd_status { }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ - BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a35a204b4eeb..44462b40f2d7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -547,6 +547,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) */ if (vf->port_vlan_id) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); + + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); @@ -559,6 +561,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF broadcast filter\n"); + spin_unlock_bh(&vsi->mac_filter_list_lock); } /* program mac filter */ @@ -941,6 +944,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) if (pci_num_vf(pf->pdev) != num_alloc_vfs) { ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); if (ret) { + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; pf->num_alloc_vfs = 0; goto err_iov; } @@ -1598,6 +1602,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } vsi = pf->vsi[vf->lan_vsi_idx]; + /* Lock once, because all function inside for loop accesses VSI's + * MAC filter list which needs to be protected using same lock. + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + /* add new addresses to the list */ for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; @@ -1616,9 +1625,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) dev_err(&pf->pdev->dev, "Unable to add VF MAC filter\n"); ret = I40E_ERR_PARAM; + spin_unlock_bh(&vsi->mac_filter_list_lock); goto error_param; } } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ if (i40e_sync_vsi_filters(vsi, false)) @@ -1666,10 +1677,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } vsi = pf->vsi[vf->lan_vsi_idx]; + spin_lock_bh(&vsi->mac_filter_list_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) i40e_del_filter(vsi, al->list[i].addr, I40E_VLAN_ANY, true, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ if (i40e_sync_vsi_filters(vsi, false)) @@ -2066,6 +2079,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; } + /* Lock once because below invoked function add/del_filter requires + * mac_filter_list_lock to be held + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + /* delete the temporary mac address */ i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id ? vf->port_vlan_id : -1, @@ -2077,6 +2095,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) list_for_each_entry(f, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); + dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); /* program mac filter */ if (i40e_sync_vsi_filters(vsi, false)) { @@ -2109,6 +2129,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; + bool is_vsi_in_vlan = false; struct i40e_vsi *vsi; struct i40e_vf *vf; int ret = 0; @@ -2138,7 +2159,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, /* duplicate request, so just return success */ goto error_pvid; - if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) { + spin_lock_bh(&vsi->mac_filter_list_lock); + is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + + if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) { dev_err(&pf->pdev->dev, "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", vf_id); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 7435fb396dcd..72b1942a94aa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -331,25 +331,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, len = buf_len; /* write the full 16-byte chunks */ for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, - "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - i, buf[i], buf[i + 1], buf[i + 2], - buf[i + 3], buf[i + 4], buf[i + 5], - buf[i + 6], buf[i + 7], buf[i + 8], - buf[i + 9], buf[i + 10], buf[i + 11], - buf[i + 12], buf[i + 13], buf[i + 14], - buf[i + 15]); + i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); /* write whatever's left over without overrunning the buffer */ - if (i < len) { - char d_buf[80]; - int j = 0; - - memset(d_buf, 0, sizeof(d_buf)); - j += sprintf(d_buf, "\t0x%04X ", i); - while (i < len) - j += sprintf(&d_buf[j], " %02X", buf[i++]); - i40e_debug(hw, mask, "%s\n", d_buf); - } + if (i < len) + i40e_debug(hw, mask, "\t0x%04X %*ph\n", + i, len - i, buf + i); } } @@ -443,9 +429,6 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); - cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut)); - cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut)); - status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL); return status; @@ -520,8 +503,6 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); - cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key)); - cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key)); status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 8ed0edfbc125..cbd9a1b078ab 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -101,4 +101,6 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details); +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 vsi_seid); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 97493a4164eb..47e9a90d6b10 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -318,6 +318,8 @@ static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector * i40e_set_new_dynamic_itr - Find new ITR level * @rc: structure containing ring performance data * + * Returns true if ITR changed, false if not + * * Stores a new ITR value based on packets and byte counts during * the last interrupt. The advantage of per interrupt computation * is faster updates and more accurate ITR for the current traffic @@ -326,21 +328,32 @@ static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector * testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; + struct i40e_q_vector *qv = rc->ring->q_vector; u32 new_itr = rc->itr; int bytes_per_int; + int usecs; if (rc->total_packets == 0 || !rc->itr) - return; + return false; /* simple throttlerate management - * 0-10MB/s lowest (100000 ints/s) + * 0-10MB/s lowest (50000 ints/s) * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (8000 ints/s) + * 20-1249MB/s bulk (18000 ints/s) + * > 40000 Rx packets per second (8000 ints/s) + * + * The math works out because the divisor is in 10^(-6) which + * turns the bytes/us input value into MB/s values, but + * make sure to use usecs, as the register values written + * are in 2 usec increments in the ITR registers, and make sure + * to use the smoothed values that the countdown timer gives us. */ - bytes_per_int = rc->total_bytes / rc->itr; + usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; + bytes_per_int = rc->total_bytes / usecs; + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) @@ -353,35 +366,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: - if (bytes_per_int <= 20) - new_latency_range = I40E_LOW_LATENCY; - break; + case I40E_ULTRA_LATENCY: default: if (bytes_per_int <= 20) new_latency_range = I40E_LOW_LATENCY; break; } + + /* this is to adjust RX more aggressively when streaming small + * packets. The value of 40000 was picked as it is just beyond + * what the hardware can receive per second if in low latency + * mode. + */ +#define RX_ULTRA_PACKET_RATE 40000 + + if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && + (&qv->rx == rc)) + new_latency_range = I40E_ULTRA_LATENCY; + rc->latency_range = new_latency_range; switch (new_latency_range) { case I40E_LOWEST_LATENCY: - new_itr = I40E_ITR_100K; + new_itr = I40E_ITR_50K; break; case I40E_LOW_LATENCY: new_itr = I40E_ITR_20K; break; case I40E_BULK_LATENCY: + new_itr = I40E_ITR_18K; + break; + case I40E_ULTRA_LATENCY: new_itr = I40E_ITR_8K; break; default: break; } - if (new_itr != rc->itr) - rc->itr = new_itr; - rc->total_bytes = 0; rc->total_packets = 0; + + if (new_itr != rc->itr) { + rc->itr = new_itr; + return true; + } + + return false; } /* @@ -1187,6 +1217,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) return total_rx_packets; } +static u32 i40e_buildreg_itr(const int type, const u16 itr) +{ + u32 val; + + val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); + + return val; +} + +/* a small macro to shorten up some long lines */ +#define INTREG I40E_VFINT_DYN_CTLN1 + /** * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt * @vsi: the VSI we care about @@ -1197,55 +1242,67 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { struct i40e_hw *hw = &vsi->back->hw; - u16 old_itr; + bool rx = false, tx = false; + u32 rxval, txval; int vector; - u32 val; vector = (q_vector->v_idx + vsi->base_vector); + + /* avoid dynamic calculation if in countdown mode OR if + * all dynamic is disabled + */ + rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + + if (q_vector->itr_countdown > 0 || + (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) && + !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) { + goto enable_int; + } + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_RX_ITR << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | - (q_vector->rx.itr << - I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); - } else { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val); - } else { - i40evf_irq_enable_queues(vsi->back, 1 - << q_vector->v_idx); + rx = i40e_set_new_dynamic_itr(&q_vector->rx); + rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); } if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_TX_ITR << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | - (q_vector->tx.itr << - I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); + tx = i40e_set_new_dynamic_itr(&q_vector->tx); + txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); + } + if (rx || tx) { + /* get the higher of the two ITR adjustments and + * use the same value for both ITR registers + * when in adaptive mode (Rx and/or Tx) + */ + u16 itr = max(q_vector->tx.itr, q_vector->rx.itr); - } else { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val); - } else { - i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx)); + q_vector->tx.itr = q_vector->rx.itr = itr; + txval = i40e_buildreg_itr(I40E_TX_ITR, itr); + tx = true; + rxval = i40e_buildreg_itr(I40E_RX_ITR, itr); + rx = true; } + + /* only need to enable the interrupt once, but need + * to possibly update both ITR values + */ + if (rx) { + /* set the INTENA_MSK_MASK so that this first write + * won't actually enable the interrupt, instead just + * updating the ITR (it's bit 31 PF and VF) + */ + rxval |= BIT(31); + /* don't check _DOWN because interrupt isn't being enabled */ + wr32(hw, INTREG(vector - 1), rxval); + } + +enable_int: + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, INTREG(vector - 1), txval); + + if (q_vector->itr_countdown) + q_vector->itr_countdown--; + else + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } /** diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index c4f5a4eb2907..ebc1bf77f036 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -32,12 +32,14 @@ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 +#define I40E_ITR_50K 0x000A #define I40E_ITR_20K 0x0019 +#define I40E_ITR_18K 0x001B #define I40E_ITR_8K 0x003E #define I40E_ITR_4K 0x007A #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ -#define I40E_ITR_RX_DEF I40E_ITR_8K -#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_ITR_RX_DEF I40E_ITR_20K +#define I40E_ITR_TX_DEF I40E_ITR_20K #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ @@ -89,16 +91,16 @@ enum i40e_dyn_idx_t { BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ - BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ - BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ - BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) #define i40e_pf_get_default_rss_hena(pf) \ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ - I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) + I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ @@ -291,6 +293,7 @@ enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, I40E_BULK_LATENCY = 2, + I40E_ULTRA_LATENCY = 3, }; struct i40e_ring_container { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 85af3b48effc..301fe2b6dd03 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1055,8 +1055,8 @@ enum i40e_filter_program_desc_fd_status { }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ - BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 22841c619f37..22fc3d49c4b9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -112,6 +112,8 @@ struct i40e_q_vector { struct i40e_ring_container tx; u32 ring_mask; u8 num_ringpairs; /* total number of ring pairs in vector */ +#define ITR_COUNTDOWN_START 100 + u8 itr_countdown; /* when 0 or 1 update ITR */ int v_idx; /* vector index in list */ char name[IFNAMSIZ + 9]; bool arm_wb_state; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 4c4340cc4f45..d962164dfb0f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.3.21" +#define DRV_VERSION "1.3.33" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2015 Intel Corporation."; @@ -282,6 +282,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) /** * i40evf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure + * @flush: boolean value whether to run rd32() **/ void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush) { @@ -305,15 +306,14 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; u32 val; - u32 ena_mask; /* handle non-queue interrupts */ - val = rd32(hw, I40E_VFINT_ICR01); - ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1); + rd32(hw, I40E_VFINT_ICR01); + rd32(hw, I40E_VFINT_ICR0_ENA1); - val = rd32(hw, I40E_VFINT_DYN_CTL01); - val = val | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; + val = rd32(hw, I40E_VFINT_DYN_CTL01) | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, val); /* schedule work on the private workqueue */ @@ -334,7 +334,7 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -357,6 +357,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) q_vector->rx.ring = rx_ring; q_vector->rx.count++; q_vector->rx.latency_range = I40E_LOW_LATENCY; + q_vector->itr_countdown = ITR_COUNTDOWN_START; } /** @@ -377,6 +378,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) q_vector->tx.ring = tx_ring; q_vector->tx.count++; q_vector->tx.latency_range = I40E_LOW_LATENCY; + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->num_ringpairs++; q_vector->ring_mask |= BIT(t_idx); } @@ -1607,6 +1609,7 @@ static void i40evf_reset_task(struct work_struct *work) reset_task); struct net_device *netdev = adapter->netdev; struct i40e_hw *hw = &adapter->hw; + struct i40evf_vlan_filter *vlf; struct i40evf_mac_filter *f; u32 reg_val; int i = 0, err; @@ -1730,8 +1733,8 @@ continue_reset: f->add = true; } /* re-add all VLAN filters */ - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - f->add = true; + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { + vlf->add = true; } adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; @@ -2357,9 +2360,12 @@ err_alloc: err: /* Things went into the weeds, so try again later */ if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { - dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n"); + dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n"); adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - return; /* do not reschedule */ + i40evf_shutdown_adminq(hw); + adapter->state = __I40EVF_STARTUP; + schedule_delayed_work(&adapter->init_task, HZ * 5); + return; } schedule_delayed_work(&adapter->init_task, HZ); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index dda0f678339a..1d2174526a4c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -152,9 +152,17 @@ struct vf_data_storage { u16 vlan_count; u8 spoofchk_enabled; bool rss_query_enabled; + u8 trusted; + int xcast_mode; unsigned int vf_api; }; +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + struct vf_macvlans { struct list_head l; int vf; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9f8a7fd7a195..47395ff5d908 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8407,6 +8407,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, + .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, #ifdef CONFIG_IXGBE_DCB diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index b1e4703ff2a5..8daa95f74548 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -102,6 +102,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ #define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 1d17b5872dd1..fcd8b27a0ccb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -116,6 +116,12 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) * we want to disable the querying by default. */ adapter->vfinfo[i].rss_query_enabled = 0; + + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; } return 0; @@ -1001,6 +1007,59 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, return 0; } +static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && + !adapter->vfinfo[vf].trusted) { + xcast_mode = IXGBEVF_XCAST_MODE_MULTI; + } + + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case IXGBEVF_XCAST_MODE_NONE: + disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + enable = 0; + break; + case IXGBEVF_XCAST_MODE_MULTI: + disable = IXGBE_VMOLR_MPE; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; + break; + case IXGBEVF_XCAST_MODE_ALLMULTI: + disable = 0; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + vmolr &= ~disable; + vmolr |= enable; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + adapter->vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -1063,6 +1122,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_GET_RSS_KEY: retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); break; + case IXGBE_VF_UPDATE_XCAST_MODE: + retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; @@ -1124,6 +1186,17 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); } +static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ping; + + ping = IXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[vf].clear_to_send) + ping |= IXGBE_VT_MSGTYPE_CTS; + ixgbe_write_mbx(hw, &ping, 1, vf); +} + void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1416,6 +1489,28 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, return 0; } +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + ixgbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { @@ -1430,5 +1525,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, ivi->qos = adapter->vfinfo[vf].pf_qos; ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; + ivi->trusted = adapter->vfinfo[vf].trusted; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 2c197e6d1fe7..dad925706f4c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -49,6 +49,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting); +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 04c7ec8446e0..ec3147279621 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -471,6 +471,12 @@ enum ixgbevf_boards { board_X550EM_x_vf, }; +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + extern const struct ixgbevf_info ixgbevf_82599_vf_info; extern const struct ixgbevf_info ixgbevf_X540_vf_info; extern const struct ixgbevf_info ixgbevf_X550_vf_info; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 7570b5c7ccd8..592ff237d692 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1894,9 +1894,17 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + unsigned int flags = netdev->flags; + int xcast_mode; + + xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI : + (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? + IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE; spin_lock_bh(&adapter->mbx_lock); + hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode); + /* reprogram multicast list */ hw->mac.ops.update_mc_addr_list(hw, netdev); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 82f44e06e5fc..340cdd469455 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -112,6 +112,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ #define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index d1339b050627..427f3605cbfc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -469,6 +469,46 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, } /** + * ixgbevf_update_xcast_mode - Update Multicast mode + * @hw: pointer to the HW structure + * @netdev: pointer to net device structure + * @xcast_mode: new multicast mode + * + * Updates the Multicast Mode of VF. + **/ +static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, + struct net_device *netdev, int xcast_mode) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + switch (hw->api_version) { + case ixgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; + msgbuf[1] = xcast_mode; + + err = mbx->ops.write_posted(hw, msgbuf, 2); + if (err) + return err; + + err = mbx->ops.read_posted(hw, msgbuf, 2); + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + return 0; +} + +/** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID @@ -727,6 +767,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .check_link = ixgbevf_check_mac_link_vf, .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, + .update_xcast_mode = ixgbevf_update_xcast_mode, .set_uc_addr = ixgbevf_set_uc_addr_vf, .set_vfta = ixgbevf_set_vfta_vf, }; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index d40f036b6df0..ef9f7736b4dc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -63,6 +63,7 @@ struct ixgbe_mac_operations { s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 3b75adfb3f37..55d2d8577d07 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2583,17 +2583,7 @@ static struct platform_driver cpsw_driver = { .remove = cpsw_remove, }; -static int __init cpsw_init(void) -{ - return platform_driver_register(&cpsw_driver); -} -late_initcall(cpsw_init); - -static void __exit cpsw_exit(void) -{ - platform_driver_unregister(&cpsw_driver); -} -module_exit(cpsw_exit); +module_platform_driver(cpsw_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>"); diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 24f8dbcf854f..d50887e3df6d 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -348,7 +348,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb) struct rtable *rt; int err, ret = NET_XMIT_DROP; struct flowi4 fl4 = { - .flowi4_oif = dev_get_iflink(dev), + .flowi4_oif = dev->ifindex, .flowi4_tos = RT_TOS(ip4h->tos), .flowi4_flags = FLOWI_FLAG_ANYSRC, .daddr = ip4h->daddr, @@ -386,7 +386,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) struct dst_entry *dst; int err, ret = NET_XMIT_DROP; struct flowi6 fl6 = { - .flowi6_iif = skb->dev->ifindex, + .flowi6_iif = dev->ifindex, .daddr = ip6h->daddr, .saddr = ip6h->saddr, .flowi6_flags = FLOWI_FLAG_ANYSRC, diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 3bc9f03349f3..95f51d7267b3 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -25,7 +25,7 @@ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/gpio.h> -#include <linux/mdio-gpio.h> +#include <linux/platform_data/mdio-gpio.h> #include <linux/of_gpio.h> #include <linux/of_mdio.h> diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 12f44c53cc8e..88cb4592b6fb 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -372,6 +372,33 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) EXPORT_SYMBOL(mdiobus_scan); /** + * mdiobus_read_nested - Nested version of the mdiobus_read function + * @bus: the mii_bus struct + * @addr: the phy address + * @regnum: register number to read + * + * In case of nested MDIO bus access avoid lockdep false positives by + * using mutex_lock_nested(). + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum) +{ + int retval; + + BUG_ON(in_interrupt()); + + mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING); + retval = bus->read(bus, addr, regnum); + mutex_unlock(&bus->mdio_lock); + + return retval; +} +EXPORT_SYMBOL(mdiobus_read_nested); + +/** * mdiobus_read - Convenience function for reading a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address @@ -396,6 +423,34 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) EXPORT_SYMBOL(mdiobus_read); /** + * mdiobus_write_nested - Nested version of the mdiobus_write function + * @bus: the mii_bus struct + * @addr: the phy address + * @regnum: register number to write + * @val: value to write to @regnum + * + * In case of nested MDIO bus access avoid lockdep false positives by + * using mutex_lock_nested(). + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val) +{ + int err; + + BUG_ON(in_interrupt()); + + mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING); + err = bus->write(bus, addr, regnum, val); + mutex_unlock(&bus->mdio_lock); + + return err; +} +EXPORT_SYMBOL(mdiobus_write_nested); + +/** * mdiobus_write - Convenience function for writing a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index a511ef3614b9..3fda750db2a9 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -3312,7 +3312,7 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy, } /* fw uses seconds, also make sure that it's >0 */ - interval = max_t(u16, 1, request->interval / 1000); + interval = max_t(u16, 1, request->scan_plans[0].interval); ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, interval, interval, diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 7fcd2c24a0a4..13c97f665ba8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -630,6 +630,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) kfree(mvm->d3_resume_sram); if (mvm->nd_config) { kfree(mvm->nd_config->match_sets); + kfree(mvm->nd_config->scan_plans); kfree(mvm->nd_config); mvm->nd_config = NULL; } diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 4a1f9af63bf0..cee4f267ca66 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1271,12 +1271,12 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms); - if (req->interval > U16_MAX) { + if (req->scan_plans[0].interval > U16_MAX) { IWL_DEBUG_SCAN(mvm, "interval value is > 16-bits, set to max possible\n"); params.interval = U16_MAX; } else { - params.interval = req->interval / MSEC_PER_SEC; + params.interval = req->scan_plans[0].interval; } /* In theory, LMAC scans can handle a 32-bit delay, but since diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c index 48a2cad29477..7e8bb1198ae9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/rt2x00/rt2x00config.c @@ -266,7 +266,7 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, if (beacon_diff > beacon_int) beacon_diff = 0; - autowake_timeout = (conf->max_sleep_period * beacon_int) - beacon_diff; + autowake_timeout = (conf->ps_dtim_period * beacon_int) - beacon_diff; queue_delayed_work(rt2x00dev->workqueue, &rt2x00dev->autowakeup_work, autowake_timeout - 15); diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c index 7c355fff2c5e..ebed13af9852 100644 --- a/drivers/net/wireless/ti/wl12xx/scan.c +++ b/drivers/net/wireless/ti/wl12xx/scan.c @@ -350,7 +350,8 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl, cfg->bss_type = SCAN_BSS_TYPE_ANY; /* currently NL80211 supports only a single interval */ for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++) - cfg->intervals[i] = cpu_to_le32(req->interval); + cfg->intervals[i] = cpu_to_le32(req->scan_plans[0].interval * + MSEC_PER_SEC); cfg->ssid_len = 0; ret = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req); diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c index c938c494c785..bc15aa2c3efa 100644 --- a/drivers/net/wireless/ti/wl18xx/scan.c +++ b/drivers/net/wireless/ti/wl18xx/scan.c @@ -228,13 +228,15 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl, wl18xx_adjust_channels(cmd, cmd_channels); if (c->num_short_intervals && c->long_interval && - c->long_interval > req->interval) { - cmd->short_cycles_msec = cpu_to_le16(req->interval); + c->long_interval > req->scan_plans[0].interval * MSEC_PER_SEC) { + cmd->short_cycles_msec = + cpu_to_le16(req->scan_plans[0].interval * MSEC_PER_SEC); cmd->long_cycles_msec = cpu_to_le16(c->long_interval); cmd->short_cycles_count = c->num_short_intervals; } else { cmd->short_cycles_msec = 0; - cmd->long_cycles_msec = cpu_to_le16(req->interval); + cmd->long_cycles_msec = + cpu_to_le16(req->scan_plans[0].interval * MSEC_PER_SEC); cmd->short_cycles_count = 0; } wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d", diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index dcfb2f43d316..452c0b0d2f32 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1932,6 +1932,8 @@ enum ieee80211_category { WLAN_CATEGORY_HT = 7, WLAN_CATEGORY_SA_QUERY = 8, WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, + WLAN_CATEGORY_WNM = 10, + WLAN_CATEGORY_WNM_UNPROTECTED = 11, WLAN_CATEGORY_TDLS = 12, WLAN_CATEGORY_MESH_ACTION = 13, WLAN_CATEGORY_MULTIHOP_ACTION = 14, @@ -2396,7 +2398,10 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) category = ((u8 *) hdr) + 24; return *category != WLAN_CATEGORY_PUBLIC && *category != WLAN_CATEGORY_HT && + *category != WLAN_CATEGORY_WNM_UNPROTECTED && *category != WLAN_CATEGORY_SELF_PROTECTED && + *category != WLAN_CATEGORY_UNPROT_DMG && + *category != WLAN_CATEGORY_VHT && *category != WLAN_CATEGORY_VENDOR_SPECIFIC; } diff --git a/include/linux/if_link.h b/include/linux/if_link.h index ae5d0d22955d..f923d15b432c 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -24,5 +24,6 @@ struct ifla_vf_info { __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; + __u32 trusted; }; #endif /* _LINUX_IF_LINK_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 69fdd427c8cb..773383859bd9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -881,6 +881,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, * int max_tx_rate); * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); + * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_config)(struct net_device *dev, * int vf, struct ifla_vf_info *ivf); * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); @@ -1109,6 +1110,8 @@ struct net_device_ops { int max_tx_rate); int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); + int (*ndo_set_vf_trust)(struct net_device *dev, + int vf, bool setting); int (*ndo_get_vf_config)(struct net_device *dev, int vf, struct ifla_vf_info *ivf); diff --git a/include/linux/phy.h b/include/linux/phy.h index 4c477e6ece33..05fde31b6dc6 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -213,7 +213,9 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); +int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); +int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); #define PHY_INTERRUPT_DISABLED 0x0 diff --git a/include/linux/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h index 11f00cdabe3d..11f00cdabe3d 100644 --- a/include/linux/mdio-gpio.h +++ b/include/linux/platform_data/mdio-gpio.h diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4398411236f1..24f4dfd94c51 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -463,6 +463,15 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, return delta_us; } +static inline bool skb_mstamp_after(const struct skb_mstamp *t1, + const struct skb_mstamp *t0) +{ + s32 diff = t1->stamp_jiffies - t0->stamp_jiffies; + + if (!diff) + diff = t1->stamp_us - t0->stamp_us; + return diff > 0; +} /** * struct sk_buff - socket buffer diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 86a7edaa6797..c906f4534581 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -194,6 +194,12 @@ struct tcp_sock { u32 window_clamp; /* Maximal window to advertise */ u32 rcv_ssthresh; /* Current window clamp */ + /* Information of the most recently (s)acked skb */ + struct tcp_rack { + struct skb_mstamp mstamp; /* (Re)sent time of the skb */ + u8 advanced; /* mstamp advanced since last lost marking */ + u8 reord; /* reordering detected */ + } rack; u16 advmss; /* Advertised MSS */ u8 unused; u8 nonagle : 4,/* Disable Nagle algorithm? */ @@ -217,6 +223,9 @@ struct tcp_sock { u32 mdev_max_us; /* maximal mdev for the last rtt period */ u32 rttvar_us; /* smoothed mdev_max */ u32 rtt_seq; /* sequence number to update rttvar */ + struct rtt_meas { + u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */ + } rtt_min[3]; u32 packets_out; /* Packets which are "in flight" */ u32 retrans_out; /* Retransmitted packets out */ @@ -280,8 +289,6 @@ struct tcp_sock { int lost_cnt_hint; u32 retransmit_high; /* L-bits may be on up to this seqno */ - u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ - u32 prior_ssthresh; /* ssthresh saved at recovery start */ u32 high_seq; /* snd_nxt at onset of congestion */ @@ -385,8 +392,9 @@ static inline bool tcp_passive_fastopen(const struct sock *sk) static inline void fastopen_queue_tune(struct sock *sk, int backlog) { struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; + int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn); - queue->fastopenq.max_qlen = backlog; + queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn); } static inline void tcp_saved_syn_free(struct tcp_sock *tp) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 90332a1838cc..48155be5db7f 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -5,6 +5,7 @@ * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1501,13 +1502,26 @@ struct cfg80211_match_set { }; /** + * struct cfg80211_sched_scan_plan - scan plan for scheduled scan + * + * @interval: interval between scheduled scan iterations. In seconds. + * @iterations: number of scan iterations in this scan plan. Zero means + * infinite loop. + * The last scan plan will always have this parameter set to zero, + * all other scan plans will have a finite number of iterations. + */ +struct cfg80211_sched_scan_plan { + u32 interval; + u32 iterations; +}; + +/** * struct cfg80211_sched_scan_request - scheduled scan request description * * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans) * @n_ssids: number of SSIDs * @n_channels: total number of channels to scan * @scan_width: channel width for scanning - * @interval: interval between each scheduled scan cycle * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets * @flags: bit field of flags controlling operation @@ -1526,6 +1540,9 @@ struct cfg80211_match_set { * @mac_addr_mask: MAC address mask used with randomisation, bits that * are 0 in the mask should be randomised, bits that are 1 should * be taken from the @mac_addr + * @scan_plans: scan plans to be executed in this scheduled scan. Lowest + * index must be executed first. + * @n_scan_plans: number of scan plans, at least 1. * @rcu_head: RCU callback used to free the struct * @owner_nlportid: netlink portid of owner (if this should is a request * owned by a particular socket) @@ -1539,7 +1556,6 @@ struct cfg80211_sched_scan_request { int n_ssids; u32 n_channels; enum nl80211_bss_scan_width scan_width; - u32 interval; const u8 *ie; size_t ie_len; u32 flags; @@ -1547,6 +1563,8 @@ struct cfg80211_sched_scan_request { int n_match_sets; s32 min_rssi_thold; u32 delay; + struct cfg80211_sched_scan_plan *scan_plans; + int n_scan_plans; u8 mac_addr[ETH_ALEN] __aligned(2); u8 mac_addr_mask[ETH_ALEN] __aligned(2); @@ -1576,6 +1594,26 @@ enum cfg80211_signal_type { }; /** + * struct cfg80211_inform_bss - BSS inform data + * @chan: channel the frame was received on + * @scan_width: scan width that was used + * @signal: signal strength value, according to the wiphy's + * signal type + * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was + * received; should match the time when the frame was actually + * received by the device (not just by the host, in case it was + * buffered on the device) and be accurate to about 10ms. + * If the frame isn't buffered, just passing the return value of + * ktime_get_boot_ns() is likely appropriate. + */ +struct cfg80211_inform_bss { + struct ieee80211_channel *chan; + enum nl80211_bss_scan_width scan_width; + s32 signal; + u64 boottime_ns; +}; + +/** * struct cfg80211_bss_ie_data - BSS entry IE data * @tsf: TSF contained in the frame that carried these IEs * @rcu_head: internal use, for freeing @@ -3056,6 +3094,12 @@ struct wiphy_vendor_command { * include fixed IEs like supported rates * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled * scans + * @max_sched_scan_plans: maximum number of scan plans (scan interval and number + * of iterations) for scheduled scan supported by the device. + * @max_sched_scan_plan_interval: maximum interval (in seconds) for a + * single scan plan supported by the device. + * @max_sched_scan_plan_iterations: maximum number of iterations for a single + * scan plan supported by the device. * @coverage_class: current coverage class * @fw_version: firmware version for ethtool reporting * @hw_version: hardware version for ethtool reporting @@ -3163,6 +3207,9 @@ struct wiphy { u8 max_match_sets; u16 max_scan_ie_len; u16 max_sched_scan_ie_len; + u32 max_sched_scan_plans; + u32 max_sched_scan_plan_interval; + u32 max_sched_scan_plan_iterations; int n_cipher_suites; const u32 *cipher_suites; @@ -3958,14 +4005,11 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy); void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy); /** - * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame - * + * cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame * @wiphy: the wiphy reporting the BSS - * @rx_channel: The channel the frame was received on - * @scan_width: width of the control channel + * @data: the BSS metadata * @mgmt: the management frame (probe response or beacon) * @len: length of the management frame - * @signal: the signal strength, type depends on the wiphy's signal_type * @gfp: context flags * * This informs cfg80211 that BSS information was found and @@ -3975,11 +4019,26 @@ void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy); * Or %NULL on error. */ struct cfg80211_bss * __must_check +cfg80211_inform_bss_frame_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + struct ieee80211_mgmt *mgmt, size_t len, + gfp_t gfp); + +static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_width_frame(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum nl80211_bss_scan_width scan_width, struct ieee80211_mgmt *mgmt, size_t len, - s32 signal, gfp_t gfp); + s32 signal, gfp_t gfp) +{ + struct cfg80211_inform_bss data = { + .chan = rx_channel, + .scan_width = scan_width, + .signal = signal, + }; + + return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp); +} static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_frame(struct wiphy *wiphy, @@ -3987,9 +4046,13 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, struct ieee80211_mgmt *mgmt, size_t len, s32 signal, gfp_t gfp) { - return cfg80211_inform_bss_width_frame(wiphy, rx_channel, - NL80211_BSS_CHAN_WIDTH_20, - mgmt, len, signal, gfp); + struct cfg80211_inform_bss data = { + .chan = rx_channel, + .scan_width = NL80211_BSS_CHAN_WIDTH_20, + .signal = signal, + }; + + return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp); } /** @@ -4006,11 +4069,10 @@ enum cfg80211_bss_frame_type { }; /** - * cfg80211_inform_bss_width - inform cfg80211 of a new BSS + * cfg80211_inform_bss_data - inform cfg80211 of a new BSS * * @wiphy: the wiphy reporting the BSS - * @rx_channel: The channel the frame was received on - * @scan_width: width of the control channel + * @data: the BSS metadata * @ftype: frame type (if known) * @bssid: the BSSID of the BSS * @tsf: the TSF sent by the peer in the beacon/probe response (or 0) @@ -4018,7 +4080,6 @@ enum cfg80211_bss_frame_type { * @beacon_interval: the beacon interval announced by the peer * @ie: additional IEs sent by the peer * @ielen: length of the additional IEs - * @signal: the signal strength, type depends on the wiphy's signal_type * @gfp: context flags * * This informs cfg80211 that BSS information was found and @@ -4028,13 +4089,32 @@ enum cfg80211_bss_frame_type { * Or %NULL on error. */ struct cfg80211_bss * __must_check +cfg80211_inform_bss_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + enum cfg80211_bss_frame_type ftype, + const u8 *bssid, u64 tsf, u16 capability, + u16 beacon_interval, const u8 *ie, size_t ielen, + gfp_t gfp); + +static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_width(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum nl80211_bss_scan_width scan_width, enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, - s32 signal, gfp_t gfp); + s32 signal, gfp_t gfp) +{ + struct cfg80211_inform_bss data = { + .chan = rx_channel, + .scan_width = scan_width, + .signal = signal, + }; + + return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf, + capability, beacon_interval, ie, ielen, + gfp); +} static inline struct cfg80211_bss * __must_check cfg80211_inform_bss(struct wiphy *wiphy, @@ -4044,11 +4124,15 @@ cfg80211_inform_bss(struct wiphy *wiphy, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp) { - return cfg80211_inform_bss_width(wiphy, rx_channel, - NL80211_BSS_CHAN_WIDTH_20, ftype, - bssid, tsf, capability, - beacon_interval, ie, ielen, signal, - gfp); + struct cfg80211_inform_bss data = { + .chan = rx_channel, + .scan_width = NL80211_BSS_CHAN_WIDTH_20, + .signal = signal, + }; + + return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf, + capability, beacon_interval, ie, ielen, + gfp); } struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, diff --git a/include/net/dsa.h b/include/net/dsa.h index e00588625bc2..98ccbdef646f 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -198,6 +198,7 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) } struct switchdev_trans; +struct switchdev_obj; struct switchdev_obj_port_fdb; struct dsa_switch_driver { @@ -327,9 +328,9 @@ struct dsa_switch_driver { struct switchdev_trans *trans); int (*port_fdb_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb); - int (*port_fdb_getnext)(struct dsa_switch *ds, int port, - unsigned char *addr, u16 *vid, - bool *is_static); + int (*port_fdb_dump)(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)); }; void register_switch_driver(struct dsa_switch_driver *type); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 63615709839d..481fe1c9044c 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -43,7 +43,9 @@ struct inet_connection_sock_af_ops { int (*conn_request)(struct sock *sk, struct sk_buff *skb); struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst); + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req); u16 net_header_len; u16 net_frag_header_len; u16 sockaddr_len; @@ -272,6 +274,9 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, struct sock *child); void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, unsigned long timeout); +struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, + struct request_sock *req, + bool own_req); static inline void inet_csk_reqsk_queue_added(struct sock *sk) { diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 6683ada25fef..de2e3ade6102 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -205,8 +205,8 @@ void inet_put_port(struct sock *sk); void inet_hashinfo_init(struct inet_hashinfo *h); -int inet_ehash_insert(struct sock *sk, struct sock *osk); -void __inet_hash_nolisten(struct sock *sk, struct sock *osk); +bool inet_ehash_insert(struct sock *sk, struct sock *osk); +bool inet_ehash_nolisten(struct sock *sk, struct sock *osk); void __inet_hash(struct sock *sk, struct sock *osk); void inet_hash(struct sock *sk); void inet_unhash(struct sock *sk); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 4ec6fedeb220..4b9dd070aeb9 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1241,11 +1241,6 @@ enum ieee80211_smps_mode { * @flags: configuration flags defined above * * @listen_interval: listen interval in units of beacon interval - * @max_sleep_period: the maximum number of beacon intervals to sleep for - * before checking the beacon for a TIM bit (managed mode only); this - * value will be only achievable between DTIM frames, the hardware - * needs to check for the multicast traffic bit in DTIM beacons. - * This variable is valid only when the CONF_PS flag is set. * @ps_dtim_period: The DTIM period of the AP we're connected to, for use * in power saving. Power saving will not be enabled until a beacon * has been received and the DTIM period is known. @@ -1275,7 +1270,6 @@ enum ieee80211_smps_mode { struct ieee80211_conf { u32 flags; int power_level, dynamic_ps_timeout; - int max_sleep_period; u16 listen_interval; u8 ps_dtim_period; @@ -1683,6 +1677,7 @@ struct ieee80211_sta_rates { * @tdls: indicates whether the STA is a TDLS peer * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only * valid if the STA is a TDLS peer in the first place. + * @mfp: indicates whether the STA uses management frame protection or not. * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) */ struct ieee80211_sta { @@ -1700,6 +1695,7 @@ struct ieee80211_sta { struct ieee80211_sta_rates __rcu *rates; bool tdls; bool tdls_initiator; + bool mfp; struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h index 4757997f76ed..179253f9dcfd 100644 --- a/include/net/mpls_iptunnel.h +++ b/include/net/mpls_iptunnel.h @@ -18,7 +18,7 @@ struct mpls_iptunnel_encap { u32 label[MAX_NEW_LABELS]; - u32 labels; + u8 labels; }; static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate) diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index aff6ceb891a9..2f87c1ba13de 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -124,7 +124,8 @@ struct rtnl_af_ops { int (*fill_link_af)(struct sk_buff *skb, const struct net_device *dev, u32 ext_filter_mask); - size_t (*get_link_af_size)(const struct net_device *dev); + size_t (*get_link_af_size)(const struct net_device *dev, + u32 ext_filter_mask); int (*validate_link_af)(const struct net_device *dev, const struct nlattr *attr); diff --git a/include/net/tcp.h b/include/net/tcp.h index eed94fc355c1..f80e74c5ad18 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -279,6 +279,7 @@ extern int sysctl_tcp_limit_output_bytes; extern int sysctl_tcp_challenge_ack_limit; extern unsigned int sysctl_tcp_notsent_lowat; extern int sysctl_tcp_min_tso_segs; +extern int sysctl_tcp_min_rtt_wlen; extern int sysctl_tcp_autocorking; extern int sysctl_tcp_invalid_ratelimit; extern int sysctl_tcp_pacing_ss_ratio; @@ -456,7 +457,9 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst); struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst); + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req); int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int tcp_connect(struct sock *sk); @@ -566,6 +569,7 @@ void tcp_resume_early_retransmit(struct sock *sk); void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); void tcp_reset(struct sock *sk); +void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); /* tcp_timer.c */ void tcp_init_xmit_timers(struct sock *); @@ -671,6 +675,12 @@ static inline bool tcp_ca_dst_locked(const struct dst_entry *dst) return dst_metric_locked(dst, RTAX_CC_ALGO); } +/* Minimum RTT in usec. ~0 means not available. */ +static inline u32 tcp_min_rtt(const struct tcp_sock *tp) +{ + return tp->rtt_min[0].rtt; +} + /* Compute the actual receive window we are currently advertising. * Rcv_nxt can be after the window if our peer push more data * than the offered window. @@ -1743,6 +1753,19 @@ int tcpv4_offload_init(void); void tcp_v4_init(void); void tcp_init(void); +/* tcp_recovery.c */ + +/* Flags to enable various loss recovery features. See below */ +extern int sysctl_tcp_recovery; + +/* Use TCP RACK to detect (some) tail and retransmit losses */ +#define TCP_RACK_LOST_RETRANS 0x1 + +extern int tcp_rack_mark_lost(struct sock *sk); + +extern void tcp_rack_advance(struct tcp_sock *tp, + const struct skb_mstamp *xmit_time, u8 sacked); + /* * Save and compile IPv4 options, return a pointer to it */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 564f1f091991..2e032426cfb7 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -287,6 +287,17 @@ enum bpf_func_id { * Return: realm if != 0 */ BPF_FUNC_get_route_realm, + + /** + * bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample + * @ctx: struct pt_regs* + * @map: pointer to perf_event_array map + * @index: index of event in the map + * @data: data on stack to be output as raw data + * @size: size of data + * Return: 0 on success + */ + BPF_FUNC_perf_event_output, __BPF_FUNC_MAX_ID, }; diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index e3b6217f34f1..a7aea8418abb 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -550,6 +550,7 @@ enum { * on/off switch */ IFLA_VF_STATS, /* network device statistics */ + IFLA_VF_TRUST, /* Trust VF */ __IFLA_VF_MAX, }; @@ -611,6 +612,11 @@ enum { #define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1) +struct ifla_vf_trust { + __u32 vf; + __u32 setting; +}; + /* VF ports management section * * Nested layout of set/get msg is: diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index c0ab6b0a3919..1f0b4cf5dd03 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -10,6 +10,7 @@ * Copyright 2008, 2009 Luis R. Rodriguez <lrodriguez@atheros.com> * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com> * Copyright 2008 Colin McCabe <colin@cozybit.com> + * Copyright 2015 Intel Deutschland GmbH * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -328,7 +329,15 @@ * partial scan results may be available * * @NL80211_CMD_START_SCHED_SCAN: start a scheduled scan at certain - * intervals, as specified by %NL80211_ATTR_SCHED_SCAN_INTERVAL. + * intervals and certain number of cycles, as specified by + * %NL80211_ATTR_SCHED_SCAN_PLANS. If %NL80211_ATTR_SCHED_SCAN_PLANS is + * not specified and only %NL80211_ATTR_SCHED_SCAN_INTERVAL is specified, + * scheduled scan will run in an infinite loop with the specified interval. + * These attributes are mutually exculsive, + * i.e. NL80211_ATTR_SCHED_SCAN_INTERVAL must not be passed if + * NL80211_ATTR_SCHED_SCAN_PLANS is defined. + * If for some reason scheduled scan is aborted by the driver, all scan + * plans are canceled (including scan plans that did not start yet). * Like with normal scans, if SSIDs (%NL80211_ATTR_SCAN_SSIDS) * are passed, they are used in the probe requests. For * broadcast, a broadcast SSID must be passed (ie. an empty @@ -1761,6 +1770,19 @@ enum nl80211_commands { * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device * is operating in an indoor environment. * + * @NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS: maximum number of scan plans for + * scheduled scan supported by the device (u32), a wiphy attribute. + * @NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL: maximum interval (in seconds) for + * a scan plan (u32), a wiphy attribute. + * @NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS: maximum number of iterations in + * a scan plan (u32), a wiphy attribute. + * @NL80211_ATTR_SCHED_SCAN_PLANS: a list of scan plans for scheduled scan. + * Each scan plan defines the number of scan iterations and the interval + * between scans. The last scan plan will always run infinitely, + * thus it must not specify the number of iterations, only the interval + * between scans. The scan plans are executed sequentially. + * Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2130,6 +2152,11 @@ enum nl80211_attrs { NL80211_ATTR_REG_INDOOR, + NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS, + NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL, + NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS, + NL80211_ATTR_SCHED_SCAN_PLANS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3364,6 +3391,9 @@ enum nl80211_bss_scan_width { * (not present if no beacon frame has been received yet) * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and * @NL80211_BSS_TSF is known to be from a probe response (flag attribute) + * @NL80211_BSS_LAST_SEEN_BOOTTIME: CLOCK_BOOTTIME timestamp when this entry + * was last updated by a received frame. The value is expected to be + * accurate to about 10ms. (u64, nanoseconds) * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -3383,6 +3413,7 @@ enum nl80211_bss { NL80211_BSS_CHAN_WIDTH, NL80211_BSS_BEACON_TSF, NL80211_BSS_PRESP_DATA, + NL80211_BSS_LAST_SEEN_BOOTTIME, /* keep last */ __NL80211_BSS_AFTER_LAST, @@ -4589,4 +4620,28 @@ enum nl80211_tdls_peer_capability { NL80211_TDLS_PEER_WMM = 1<<2, }; +/** + * enum nl80211_sched_scan_plan - scanning plan for scheduled scan + * @__NL80211_SCHED_SCAN_PLAN_INVALID: attribute number 0 is reserved + * @NL80211_SCHED_SCAN_PLAN_INTERVAL: interval between scan iterations. In + * seconds (u32). + * @NL80211_SCHED_SCAN_PLAN_ITERATIONS: number of scan iterations in this + * scan plan (u32). The last scan plan must not specify this attribute + * because it will run infinitely. A value of zero is invalid as it will + * make the scan plan meaningless. + * @NL80211_SCHED_SCAN_PLAN_MAX: highest scheduled scan plan attribute number + * currently defined + * @__NL80211_SCHED_SCAN_PLAN_AFTER_LAST: internal use + */ +enum nl80211_sched_scan_plan { + __NL80211_SCHED_SCAN_PLAN_INVALID, + NL80211_SCHED_SCAN_PLAN_INTERVAL, + NL80211_SCHED_SCAN_PLAN_ITERATIONS, + + /* keep last */ + __NL80211_SCHED_SCAN_PLAN_AFTER_LAST, + NL80211_SCHED_SCAN_PLAN_MAX = + __NL80211_SCHED_SCAN_PLAN_AFTER_LAST - 1 +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 2881145cda86..d3c417615361 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -110,6 +110,7 @@ enum perf_sw_ids { PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, PERF_COUNT_SW_EMULATION_FAULTS = 8, PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, PERF_COUNT_SW_MAX, /* non-ABI */ }; diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index f2d9e698c753..e3cfe46b074f 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -295,6 +295,8 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd) return (void *)attr; if (attr->type != PERF_TYPE_RAW && + !(attr->type == PERF_TYPE_SOFTWARE && + attr->config == PERF_COUNT_SW_BPF_OUTPUT) && attr->type != PERF_TYPE_HARDWARE) { perf_event_release_kernel(event); return ERR_PTR(-EINVAL); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 1d6b97be79e1..b56cf51f8d42 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -245,6 +245,7 @@ static const struct { } func_limit[] = { {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call}, {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read}, + {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output}, }; static void print_verifier_state(struct verifier_env *env) @@ -910,7 +911,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) * don't allow any other map type to be passed into * the special func; */ - if (bool_map != bool_func) + if (bool_func && bool_map != bool_func) return -EINVAL; } diff --git a/kernel/events/core.c b/kernel/events/core.c index b11756f9b6dc..64754bfecd70 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5286,9 +5286,15 @@ void perf_output_sample(struct perf_output_handle *handle, if (sample_type & PERF_SAMPLE_RAW) { if (data->raw) { - perf_output_put(handle, data->raw->size); - __output_copy(handle, data->raw->data, - data->raw->size); + u32 raw_size = data->raw->size; + u32 real_size = round_up(raw_size + sizeof(u32), + sizeof(u64)) - sizeof(u32); + u64 zero = 0; + + perf_output_put(handle, real_size); + __output_copy(handle, data->raw->data, raw_size); + if (real_size - raw_size) + __output_copy(handle, &zero, real_size - raw_size); } else { struct { u32 size; @@ -5420,8 +5426,7 @@ void perf_prepare_sample(struct perf_event_header *header, else size += sizeof(u32); - WARN_ON_ONCE(size & (sizeof(u64)-1)); - header->size += size; + header->size += round_up(size, sizeof(u64)); } if (sample_type & PERF_SAMPLE_BRANCH_STACK) { diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0fe96c7c8803..47febbe7998e 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -215,6 +215,50 @@ const struct bpf_func_proto bpf_perf_event_read_proto = { .arg2_type = ARG_ANYTHING, }; +static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size) +{ + struct pt_regs *regs = (struct pt_regs *) (long) r1; + struct bpf_map *map = (struct bpf_map *) (long) r2; + struct bpf_array *array = container_of(map, struct bpf_array, map); + void *data = (void *) (long) r4; + struct perf_sample_data sample_data; + struct perf_event *event; + struct perf_raw_record raw = { + .size = size, + .data = data, + }; + + if (unlikely(index >= array->map.max_entries)) + return -E2BIG; + + event = (struct perf_event *)array->ptrs[index]; + if (unlikely(!event)) + return -ENOENT; + + if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || + event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) + return -EINVAL; + + if (unlikely(event->oncpu != smp_processor_id())) + return -EOPNOTSUPP; + + perf_sample_data_init(&sample_data, 0, 0); + sample_data.raw = &raw; + perf_event_output(event, &sample_data, regs); + return 0; +} + +static const struct bpf_func_proto bpf_perf_event_output_proto = { + .func = bpf_perf_event_output, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_STACK, + .arg5_type = ARG_CONST_STACK_SIZE, +}; + static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) { switch (func_id) { @@ -242,6 +286,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func return &bpf_get_smp_processor_id_proto; case BPF_FUNC_perf_event_read: return &bpf_perf_event_read_proto; + case BPF_FUNC_perf_event_output: + return &bpf_perf_event_output_proto; default: return NULL; } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 94b4de8c4646..40197ff8918a 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1214,29 +1214,10 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) return 0; } -static size_t br_get_link_af_size(const struct net_device *dev) -{ - struct net_bridge_port *p; - struct net_bridge *br; - int num_vlans = 0; - - if (br_port_exists(dev)) { - p = br_port_get_rtnl(dev); - num_vlans = br_get_num_vlan_infos(nbp_vlan_group(p), - RTEXT_FILTER_BRVLAN); - } else if (dev->priv_flags & IFF_EBRIDGE) { - br = netdev_priv(dev); - num_vlans = br_get_num_vlan_infos(br_vlan_group(br), - RTEXT_FILTER_BRVLAN); - } - - /* Each VLAN is returned in bridge_vlan_info along with flags */ - return num_vlans * nla_total_size(sizeof(struct bridge_vlan_info)); -} static struct rtnl_af_ops br_af_ops __read_mostly = { .family = AF_BRIDGE, - .get_link_af_size = br_get_link_af_size, + .get_link_af_size = br_get_link_af_size_filtered, }; struct rtnl_link_ops br_link_ops __read_mostly = { diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 4ca449a16132..fa53d7a89f48 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -15,6 +15,7 @@ #include <linux/kmod.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> +#include <net/switchdev.h> #include "br_private.h" #include "br_private_stp.h" @@ -35,11 +36,22 @@ static inline port_id br_make_port_id(__u8 priority, __u16 port_no) /* called under bridge lock */ void br_init_port(struct net_bridge_port *p) { + struct switchdev_attr attr = { + .id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, + .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER, + .u.ageing_time = p->br->ageing_time, + }; + int err; + p->port_id = br_make_port_id(p->priority, p->port_no); br_become_designated_port(p); br_set_state(p, BR_STATE_BLOCKING); p->topology_change_ack = 0; p->config_pending = 0; + + err = switchdev_port_attr_set(p->dev, &attr); + if (err) + netdev_err(p->dev, "failed to set HW ageing time\n"); } /* called under bridge lock */ diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 24775953fa68..504bd17b7456 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -497,7 +497,8 @@ void rtnl_af_unregister(struct rtnl_af_ops *ops) } EXPORT_SYMBOL_GPL(rtnl_af_unregister); -static size_t rtnl_link_get_af_size(const struct net_device *dev) +static size_t rtnl_link_get_af_size(const struct net_device *dev, + u32 ext_filter_mask) { struct rtnl_af_ops *af_ops; size_t size; @@ -509,7 +510,7 @@ static size_t rtnl_link_get_af_size(const struct net_device *dev) if (af_ops->get_link_af_size) { /* AF_* + nested data */ size += nla_total_size(sizeof(struct nlattr)) + - af_ops->get_link_af_size(dev); + af_ops->get_link_af_size(dev, ext_filter_mask); } } @@ -837,7 +838,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, /* IFLA_VF_STATS_BROADCAST */ nla_total_size(sizeof(__u64)) + /* IFLA_VF_STATS_MULTICAST */ - nla_total_size(sizeof(__u64))); + nla_total_size(sizeof(__u64)) + + nla_total_size(sizeof(struct ifla_vf_trust))); return size; } else return 0; @@ -900,7 +902,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ - + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */ + + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ + nla_total_size(1); /* IFLA_PROTO_DOWN */ @@ -1160,6 +1162,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, struct ifla_vf_link_state vf_linkstate; struct ifla_vf_rss_query_en vf_rss_query_en; struct ifla_vf_stats vf_stats; + struct ifla_vf_trust vf_trust; /* * Not all SR-IOV capable drivers support the @@ -1169,6 +1172,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, */ ivi.spoofchk = -1; ivi.rss_query_en = -1; + ivi.trusted = -1; memset(ivi.mac, 0, sizeof(ivi.mac)); /* The default value for VF link state is "auto" * IFLA_VF_LINK_STATE_AUTO which equals zero @@ -1182,7 +1186,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, vf_tx_rate.vf = vf_spoofchk.vf = vf_linkstate.vf = - vf_rss_query_en.vf = ivi.vf; + vf_rss_query_en.vf = + vf_trust.vf = ivi.vf; memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); vf_vlan.vlan = ivi.vlan; @@ -1193,6 +1198,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, vf_spoofchk.setting = ivi.spoofchk; vf_linkstate.link_state = ivi.linkstate; vf_rss_query_en.setting = ivi.rss_query_en; + vf_trust.setting = ivi.trusted; vf = nla_nest_start(skb, IFLA_VF_INFO); if (!vf) { nla_nest_cancel(skb, vfinfo); @@ -1210,7 +1216,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, &vf_linkstate) || nla_put(skb, IFLA_VF_RSS_QUERY_EN, sizeof(vf_rss_query_en), - &vf_rss_query_en)) + &vf_rss_query_en) || + nla_put(skb, IFLA_VF_TRUST, + sizeof(vf_trust), &vf_trust)) goto nla_put_failure; memset(&vf_stats, 0, sizeof(vf_stats)); if (dev->netdev_ops->ndo_get_vf_stats) @@ -1347,6 +1355,7 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, [IFLA_VF_STATS] = { .type = NLA_NESTED }, + [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, }; static const struct nla_policy ifla_vf_stats_policy[IFLA_VF_STATS_MAX + 1] = { @@ -1586,6 +1595,16 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) return err; } + if (tb[IFLA_VF_TRUST]) { + struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); + + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_trust) + err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); + if (err < 0) + return err; + } + return err; } @@ -3443,4 +3462,3 @@ void __init rtnetlink_init(void) rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL); rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL); } - diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 923f5a180134..b0e28d24e1a7 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -278,7 +278,9 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst); + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req); struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, struct request_sock *req); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 59bc180b02d8..5684e14932bd 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -393,7 +393,9 @@ static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb) struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst) + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req) { struct inet_request_sock *ireq; struct inet_sock *newinet; @@ -426,7 +428,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; - __inet_hash_nolisten(newsk, NULL); + *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); return newsk; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index d9cc731f2619..ef4e48ce9143 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -380,7 +380,9 @@ drop: static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst) + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req) { struct inet_request_sock *ireq = inet_rsk(req); struct ipv6_pinfo *newnp; @@ -393,7 +395,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, /* * v6 mapped */ - newsk = dccp_v4_request_recv_sock(sk, skb, req, dst); + newsk = dccp_v4_request_recv_sock(sk, skb, req, dst, + req_unhash, own_req); if (newsk == NULL) return NULL; @@ -511,7 +514,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, dccp_done(newsk); goto out; } - __inet_hash(newsk, NULL); + *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); return newsk; diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index d10aace43672..1994f8af646b 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -143,6 +143,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, { struct sock *child = NULL; struct dccp_request_sock *dreq = dccp_rsk(req); + bool own_req; /* Check for retransmitted REQUEST */ if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { @@ -182,14 +183,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, if (dccp_parse_options(sk, dreq, skb)) goto drop; - child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); - if (child == NULL) + child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, + req, &own_req); + if (!child) goto listen_overflow; - inet_csk_reqsk_queue_drop(sk, req); - inet_csk_reqsk_queue_add(sk, req, child); -out: - return child; + return inet_csk_complete_hashdance(sk, child, req, own_req); + listen_overflow: dccp_pr_debug("listen_overflow!\n"); DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; @@ -198,7 +198,7 @@ drop: req->rsk_ops->send_reset(sk, skb); inet_csk_reqsk_queue_drop(sk, req); - goto out; + return NULL; } EXPORT_SYMBOL_GPL(dccp_check_req); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index b0b8da0f5af8..481754ee062a 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -378,31 +378,11 @@ static int dsa_slave_port_fdb_dump(struct net_device *dev, { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - unsigned char addr[ETH_ALEN] = { 0 }; - u16 vid = 0; - int ret; - - if (!ds->drv->port_fdb_getnext) - return -EOPNOTSUPP; - for (;;) { - bool is_static; - - ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid, - &is_static); - if (ret < 0) - break; + if (ds->drv->port_fdb_dump) + return ds->drv->port_fdb_dump(ds, p->port, fdb, cb); - ether_addr_copy(fdb->addr, addr); - fdb->vid = vid; - fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; - - ret = cb(&fdb->obj); - if (ret < 0) - break; - } - - return ret == -ENOENT ? 0 : ret; + return -EOPNOTSUPP; } static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 89aacb630a53..c29809f765dc 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -8,6 +8,7 @@ obj-y := route.o inetpeer.o protocol.o \ inet_timewait_sock.o inet_connection_sock.o \ tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \ + tcp_recovery.o \ tcp_offload.o datagram.o raw.o udp.o udplite.o \ udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \ fib_frontend.o fib_semantics.o fib_trie.o \ diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 735008472844..cebd9d31e65a 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1644,7 +1644,8 @@ errout: rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err); } -static size_t inet_get_link_af_size(const struct net_device *dev) +static size_t inet_get_link_af_size(const struct net_device *dev, + u32 ext_filter_mask) { struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr); @@ -2398,4 +2399,3 @@ void __init devinet_init(void) rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf, inet_netconf_dump_devconf, NULL); } - diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index d7c2bb0c4f65..e786873c89f2 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -867,9 +867,10 @@ void fib_add_ifaddr(struct in_ifaddr *ifa) if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) && (prefix != addr || ifa->ifa_prefixlen < 32)) { - fib_magic(RTM_NEWROUTE, - dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, - prefix, ifa->ifa_prefixlen, prim); + if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE)) + fib_magic(RTM_NEWROUTE, + dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, + prefix, ifa->ifa_prefixlen, prim); /* Add network specific broadcasts, when it takes a sense */ if (ifa->ifa_prefixlen < 31) { @@ -914,9 +915,10 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) } } else if (!ipv4_is_zeronet(any) && (any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) { - fib_magic(RTM_DELROUTE, - dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, - any, ifa->ifa_prefixlen, prim); + if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE)) + fib_magic(RTM_DELROUTE, + dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, + any, ifa->ifa_prefixlen, prim); subnet = 1; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 8430bc8ccd58..1feb15f23de8 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -523,15 +523,15 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue, struct request_sock *req) { struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo; - spinlock_t *lock; - bool found; + bool found = false; - lock = inet_ehash_lockp(hashinfo, req->rsk_hash); - - spin_lock(lock); - found = __sk_nulls_del_node_init_rcu(req_to_sk(req)); - spin_unlock(lock); + if (sk_hashed(req_to_sk(req))) { + spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash); + spin_lock(lock); + found = __sk_nulls_del_node_init_rcu(req_to_sk(req)); + spin_unlock(lock); + } if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) reqsk_put(req); return found; @@ -811,6 +811,25 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, } EXPORT_SYMBOL(inet_csk_reqsk_queue_add); +struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, + struct request_sock *req, bool own_req) +{ + if (own_req) { + inet_csk_reqsk_queue_drop(sk, req); + reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); + inet_csk_reqsk_queue_add(sk, req, child); + /* Warning: caller must not call reqsk_put(req); + * child stole last reference on it. + */ + return child; + } + /* Too bad, another child took ownership of the request, undo. */ + bh_unlock_sock(child); + sock_put(child); + return NULL; +} +EXPORT_SYMBOL(inet_csk_complete_hashdance); + /* * This routine closes sockets which have been at least partially * opened, but not yet accepted. diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 958728a22001..ccc5980797fc 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -407,13 +407,13 @@ static u32 inet_sk_port_offset(const struct sock *sk) /* insert a socket into ehash, and eventually remove another one * (The another one can be a SYN_RECV or TIMEWAIT */ -int inet_ehash_insert(struct sock *sk, struct sock *osk) +bool inet_ehash_insert(struct sock *sk, struct sock *osk) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct hlist_nulls_head *list; struct inet_ehash_bucket *head; spinlock_t *lock; - int ret = 0; + bool ret = true; WARN_ON_ONCE(!sk_unhashed(sk)); @@ -423,30 +423,41 @@ int inet_ehash_insert(struct sock *sk, struct sock *osk) lock = inet_ehash_lockp(hashinfo, sk->sk_hash); spin_lock(lock); - __sk_nulls_add_node_rcu(sk, list); if (osk) { - WARN_ON(sk->sk_hash != osk->sk_hash); - sk_nulls_del_node_init_rcu(osk); + WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); + ret = sk_nulls_del_node_init_rcu(osk); } + if (ret) + __sk_nulls_add_node_rcu(sk, list); spin_unlock(lock); return ret; } -void __inet_hash_nolisten(struct sock *sk, struct sock *osk) +bool inet_ehash_nolisten(struct sock *sk, struct sock *osk) { - inet_ehash_insert(sk, osk); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); + bool ok = inet_ehash_insert(sk, osk); + + if (ok) { + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); + } else { + percpu_counter_inc(sk->sk_prot->orphan_count); + sk->sk_state = TCP_CLOSE; + sock_set_flag(sk, SOCK_DEAD); + inet_csk_destroy_sock(sk); + } + return ok; } -EXPORT_SYMBOL_GPL(__inet_hash_nolisten); +EXPORT_SYMBOL_GPL(inet_ehash_nolisten); void __inet_hash(struct sock *sk, struct sock *osk) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct inet_listen_hashbucket *ilb; - if (sk->sk_state != TCP_LISTEN) - return __inet_hash_nolisten(sk, osk); - + if (sk->sk_state != TCP_LISTEN) { + inet_ehash_nolisten(sk, osk); + return; + } WARN_ON(!sk_unhashed(sk)); ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; @@ -567,7 +578,7 @@ ok: inet_bind_hash(sk, tb, port); if (sk_unhashed(sk)) { inet_sk(sk)->inet_sport = htons(port); - __inet_hash_nolisten(sk, (struct sock *)tw); + inet_ehash_nolisten(sk, (struct sock *)tw); } if (tw) inet_twsk_bind_unhash(tw, hinfo); @@ -584,7 +595,7 @@ ok: tb = inet_csk(sk)->icsk_bind_hash; spin_lock_bh(&head->lock); if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { - __inet_hash_nolisten(sk, NULL); + inet_ehash_nolisten(sk, NULL); spin_unlock_bh(&head->lock); return 0; } else { diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 4c0892badb8b..4cbe9f0a4281 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -221,8 +221,10 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, { struct inet_connection_sock *icsk = inet_csk(sk); struct sock *child; + bool own_req; - child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); + child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, + NULL, &own_req); if (child) { atomic_set(&req->rsk_refcnt, 1); sock_rps_save_rxhash(child, skb); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 894da3a70aff..25300c5e283b 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -496,6 +496,13 @@ static struct ctl_table ipv4_table[] = { .proc_handler = proc_dointvec }, { + .procname = "tcp_recovery", + .data = &sysctl_tcp_recovery, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { .procname = "tcp_reordering", .data = &sysctl_tcp_reordering, .maxlen = sizeof(int), @@ -577,6 +584,13 @@ static struct ctl_table ipv4_table[] = { .proc_handler = proc_dointvec }, { + .procname = "tcp_min_rtt_wlen", + .data = &sysctl_tcp_min_rtt_wlen, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { .procname = "tcp_low_latency", .data = &sysctl_tcp_low_latency, .maxlen = sizeof(int), diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index ac1bdbb50352..0cfa7c0c1e80 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -388,6 +388,7 @@ void tcp_init_sock(struct sock *sk) icsk->icsk_rto = TCP_TIMEOUT_INIT; tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); + tp->rtt_min[0].rtt = ~0U; /* So many TCP implementations out there (incorrectly) count the * initial SYN frame in their delayed-ACK and congestion control diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 93396bf7b475..55be6ac70cff 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -133,12 +133,14 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; struct sock *child; u32 end_seq; + bool own_req; req->num_retrans = 0; req->num_timeout = 0; req->sk = NULL; - child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); + child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, + NULL, &own_req); if (!child) return NULL; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 944eaca69115..fdd88c3803a6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -95,6 +95,7 @@ int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; int sysctl_tcp_frto __read_mostly = 2; +int sysctl_tcp_min_rtt_wlen __read_mostly = 300; int sysctl_tcp_thin_dupack __read_mostly; @@ -880,6 +881,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, if (metric > 0) tcp_disable_early_retrans(tp); + tp->rack.reord = 1; } /* This must be called before lost_out is incremented */ @@ -905,8 +907,7 @@ static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) } } -static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, - struct sk_buff *skb) +void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) { tcp_verify_retransmit_hint(tp, skb); @@ -1047,70 +1048,6 @@ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, return !before(start_seq, end_seq - tp->max_window); } -/* Check for lost retransmit. This superb idea is borrowed from "ratehalving". - * Event "B". Later note: FACK people cheated me again 8), we have to account - * for reordering! Ugly, but should help. - * - * Search retransmitted skbs from write_queue that were sent when snd_nxt was - * less than what is now known to be received by the other end (derived from - * highest SACK block). Also calculate the lowest snd_nxt among the remaining - * retransmitted skbs to avoid some costly processing per ACKs. - */ -static void tcp_mark_lost_retrans(struct sock *sk, int *flag) -{ - const struct inet_connection_sock *icsk = inet_csk(sk); - struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb; - int cnt = 0; - u32 new_low_seq = tp->snd_nxt; - u32 received_upto = tcp_highest_sack_seq(tp); - - if (!tcp_is_fack(tp) || !tp->retrans_out || - !after(received_upto, tp->lost_retrans_low) || - icsk->icsk_ca_state != TCP_CA_Recovery) - return; - - tcp_for_write_queue(skb, sk) { - u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; - - if (skb == tcp_send_head(sk)) - break; - if (cnt == tp->retrans_out) - break; - if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) - continue; - - if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) - continue; - - /* TODO: We would like to get rid of tcp_is_fack(tp) only - * constraint here (see above) but figuring out that at - * least tp->reordering SACK blocks reside between ack_seq - * and received_upto is not easy task to do cheaply with - * the available datastructures. - * - * Whether FACK should check here for tp->reordering segs - * in-between one could argue for either way (it would be - * rather simple to implement as we could count fack_count - * during the walk and do tp->fackets_out - fack_count). - */ - if (after(received_upto, ack_seq)) { - TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; - tp->retrans_out -= tcp_skb_pcount(skb); - *flag |= FLAG_LOST_RETRANS; - tcp_skb_mark_lost_uncond_verify(tp, skb); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); - } else { - if (before(ack_seq, new_low_seq)) - new_low_seq = ack_seq; - cnt += tcp_skb_pcount(skb); - } - } - - if (tp->retrans_out) - tp->lost_retrans_low = new_low_seq; -} - static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, struct tcp_sack_block_wire *sp, int num_sacks, u32 prior_snd_una) @@ -1236,6 +1173,8 @@ static u8 tcp_sacktag_one(struct sock *sk, return sacked; if (!(sacked & TCPCB_SACKED_ACKED)) { + tcp_rack_advance(tp, xmit_time, sacked); + if (sacked & TCPCB_SACKED_RETRANS) { /* If the segment is not tagged as lost, * we do not clear RETRANS, believing @@ -1837,7 +1776,6 @@ advance_sp: ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) tcp_update_reordering(sk, tp->fackets_out - state->reord, 0); - tcp_mark_lost_retrans(sk, &state->flag); tcp_verify_left_out(tp); out: @@ -2314,14 +2252,29 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp) tp->snd_cwnd_stamp = tcp_time_stamp; } +static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) +{ + return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && + before(tp->rx_opt.rcv_tsecr, when); +} + +/* skb is spurious retransmitted if the returned timestamp echo + * reply is prior to the skb transmission time + */ +static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, + const struct sk_buff *skb) +{ + return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) && + tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb)); +} + /* Nothing was retransmitted or returned timestamp is less * than timestamp of the first retransmission. */ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) { return !tp->retrans_stamp || - (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && - before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp)); + tcp_tsopt_ecr_before(tp, tp->retrans_stamp); } /* Undo procedures. */ @@ -2853,6 +2806,11 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, } } + /* Use RACK to detect loss */ + if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS && + tcp_rack_mark_lost(sk)) + flag |= FLAG_LOST_RETRANS; + /* E. Process state. */ switch (icsk->icsk_ca_state) { case TCP_CA_Recovery: @@ -2915,8 +2873,69 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, tcp_xmit_retransmit_queue(sk); } +/* Kathleen Nichols' algorithm for tracking the minimum value of + * a data stream over some fixed time interval. (E.g., the minimum + * RTT over the past five minutes.) It uses constant space and constant + * time per update yet almost always delivers the same minimum as an + * implementation that has to keep all the data in the window. + * + * The algorithm keeps track of the best, 2nd best & 3rd best min + * values, maintaining an invariant that the measurement time of the + * n'th best >= n-1'th best. It also makes sure that the three values + * are widely separated in the time window since that bounds the worse + * case error when that data is monotonically increasing over the window. + * + * Upon getting a new min, we can forget everything earlier because it + * has no value - the new min is <= everything else in the window by + * definition and it's the most recent. So we restart fresh on every new min + * and overwrites 2nd & 3rd choices. The same property holds for 2nd & 3rd + * best. + */ +static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us) +{ + const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ; + struct rtt_meas *m = tcp_sk(sk)->rtt_min; + struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now }; + u32 elapsed; + + /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */ + if (unlikely(rttm.rtt <= m[0].rtt)) + m[0] = m[1] = m[2] = rttm; + else if (rttm.rtt <= m[1].rtt) + m[1] = m[2] = rttm; + else if (rttm.rtt <= m[2].rtt) + m[2] = rttm; + + elapsed = now - m[0].ts; + if (unlikely(elapsed > wlen)) { + /* Passed entire window without a new min so make 2nd choice + * the new min & 3rd choice the new 2nd. So forth and so on. + */ + m[0] = m[1]; + m[1] = m[2]; + m[2] = rttm; + if (now - m[0].ts > wlen) { + m[0] = m[1]; + m[1] = rttm; + if (now - m[0].ts > wlen) + m[0] = rttm; + } + } else if (m[1].ts == m[0].ts && elapsed > wlen / 4) { + /* Passed a quarter of the window without a new min so + * take 2nd choice from the 2nd quarter of the window. + */ + m[2] = m[1] = rttm; + } else if (m[2].ts == m[1].ts && elapsed > wlen / 2) { + /* Passed half the window without a new min so take the 3rd + * choice from the last half of the window. + */ + m[2] = rttm; + } +} + static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, - long seq_rtt_us, long sack_rtt_us) + long seq_rtt_us, long sack_rtt_us, + long ca_rtt_us) { const struct tcp_sock *tp = tcp_sk(sk); @@ -2925,9 +2944,6 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, * Karn's algorithm forbids taking RTT if some retransmitted data * is acked (RFC6298). */ - if (flag & FLAG_RETRANS_DATA_ACKED) - seq_rtt_us = -1L; - if (seq_rtt_us < 0) seq_rtt_us = sack_rtt_us; @@ -2939,11 +2955,16 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, */ if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED) - seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr); - + seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp - + tp->rx_opt.rcv_tsecr); if (seq_rtt_us < 0) return false; + /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is + * always taken together with ACK, SACK, or TS-opts. Any negative + * values will be skipped with the seq_rtt_us < 0 check above. + */ + tcp_update_rtt_min(sk, ca_rtt_us); tcp_rtt_estimator(sk, seq_rtt_us); tcp_set_rto(sk); @@ -2964,7 +2985,7 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack); } - tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L); + tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us); } @@ -3131,6 +3152,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, if (sacked & TCPCB_SACKED_ACKED) tp->sacked_out -= acked_pcount; + else if (tcp_is_sack(tp) && !tcp_skb_spurious_retrans(tp, skb)) + tcp_rack_advance(tp, &skb->skb_mstamp, sacked); if (sacked & TCPCB_LOST) tp->lost_out -= acked_pcount; @@ -3169,7 +3192,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, flag |= FLAG_SACK_RENEGING; skb_mstamp_get(&now); - if (likely(first_ackt.v64)) { + if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) { seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt); ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt); } @@ -3178,7 +3201,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt); } - rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us); + rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, + ca_rtt_us); if (flag & FLAG_ACKED) { tcp_rearm_rto(sk); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 30dd45c1f568..1c2648bbac4b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1247,7 +1247,9 @@ EXPORT_SYMBOL(tcp_v4_conn_request); */ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst) + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req) { struct inet_request_sock *ireq; struct inet_sock *newinet; @@ -1323,7 +1325,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; - __inet_hash_nolisten(newsk, NULL); + *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); return newsk; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 41828bdc5d32..3575dd1e5b67 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -470,6 +470,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->srtt_us = 0; newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); + newtp->rtt_min[0].rtt = ~0U; newicsk->icsk_rto = TCP_TIMEOUT_INIT; newtp->packets_out = 0; @@ -547,6 +548,8 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, tcp_ecn_openreq_child(newtp, req); newtp->fastopen_rsk = NULL; newtp->syn_data_acked = 0; + newtp->rack.mstamp.v64 = 0; + newtp->rack.advanced = 0; newtp->saved_syn = req->saved_syn; req->saved_syn = NULL; @@ -577,6 +580,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th = tcp_hdr(skb); __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); bool paws_reject = false; + bool own_req; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(struct tcphdr)>>2)) { @@ -764,18 +768,14 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, * ESTABLISHED STATE. If it will be dropped after * socket is created, wait for troubles. */ - child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); + child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, + req, &own_req); if (!child) goto listen_overflow; sock_rps_save_rxhash(child, skb); tcp_synack_rtt_meas(child, req); - inet_csk_reqsk_queue_drop(sk, req); - inet_csk_reqsk_queue_add(sk, req, child); - /* Warning: caller must not call reqsk_put(req); - * child stole last reference on it. - */ - return child; + return inet_csk_complete_hashdance(sk, child, req, own_req); listen_overflow: if (!sysctl_tcp_abort_on_overflow) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 19adedb8c5cc..f6f7f9b4901b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2655,8 +2655,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) net_dbg_ratelimited("retrans_out leaked\n"); } #endif - if (!tp->retrans_out) - tp->lost_retrans_low = tp->snd_nxt; TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; tp->retrans_out += tcp_skb_pcount(skb); @@ -2664,10 +2662,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) if (!tp->retrans_stamp) tp->retrans_stamp = tcp_skb_timestamp(skb); - /* snd_nxt is stored to detect loss of retransmitted segment, - * see tcp_input.c tcp_sacktag_write_queue(). - */ - TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; } else if (err != -EBUSY) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); } diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c new file mode 100644 index 000000000000..5353085fd0b2 --- /dev/null +++ b/net/ipv4/tcp_recovery.c @@ -0,0 +1,109 @@ +#include <linux/tcp.h> +#include <net/tcp.h> + +int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS; + +/* Marks a packet lost, if some packet sent later has been (s)acked. + * The underlying idea is similar to the traditional dupthresh and FACK + * but they look at different metrics: + * + * dupthresh: 3 OOO packets delivered (packet count) + * FACK: sequence delta to highest sacked sequence (sequence space) + * RACK: sent time delta to the latest delivered packet (time domain) + * + * The advantage of RACK is it applies to both original and retransmitted + * packet and therefore is robust against tail losses. Another advantage + * is being more resilient to reordering by simply allowing some + * "settling delay", instead of tweaking the dupthresh. + * + * The current version is only used after recovery starts but can be + * easily extended to detect the first loss. + */ +int tcp_rack_mark_lost(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb; + u32 reo_wnd, prior_retrans = tp->retrans_out; + + if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced) + return 0; + + /* Reset the advanced flag to avoid unnecessary queue scanning */ + tp->rack.advanced = 0; + + /* To be more reordering resilient, allow min_rtt/4 settling delay + * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed + * RTT because reordering is often a path property and less related + * to queuing or delayed ACKs. + * + * TODO: measure and adapt to the observed reordering delay, and + * use a timer to retransmit like the delayed early retransmit. + */ + reo_wnd = 1000; + if (tp->rack.reord && tcp_min_rtt(tp) != ~0U) + reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd); + + tcp_for_write_queue(skb, sk) { + struct tcp_skb_cb *scb = TCP_SKB_CB(skb); + + if (skb == tcp_send_head(sk)) + break; + + /* Skip ones already (s)acked */ + if (!after(scb->end_seq, tp->snd_una) || + scb->sacked & TCPCB_SACKED_ACKED) + continue; + + if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) { + + if (skb_mstamp_us_delta(&tp->rack.mstamp, + &skb->skb_mstamp) <= reo_wnd) + continue; + + /* skb is lost if packet sent later is sacked */ + tcp_skb_mark_lost_uncond_verify(tp, skb); + if (scb->sacked & TCPCB_SACKED_RETRANS) { + scb->sacked &= ~TCPCB_SACKED_RETRANS; + tp->retrans_out -= tcp_skb_pcount(skb); + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPLOSTRETRANSMIT); + } + } else if (!(scb->sacked & TCPCB_RETRANS)) { + /* Original data are sent sequentially so stop early + * b/c the rest are all sent after rack_sent + */ + break; + } + } + return prior_retrans - tp->retrans_out; +} + +/* Record the most recently (re)sent time among the (s)acked packets */ +void tcp_rack_advance(struct tcp_sock *tp, + const struct skb_mstamp *xmit_time, u8 sacked) +{ + if (tp->rack.mstamp.v64 && + !skb_mstamp_after(xmit_time, &tp->rack.mstamp)) + return; + + if (sacked & TCPCB_RETRANS) { + struct skb_mstamp now; + + /* If the sacked packet was retransmitted, it's ambiguous + * whether the retransmission or the original (or the prior + * retransmission) was sacked. + * + * If the original is lost, there is no ambiguity. Otherwise + * we assume the original can be delayed up to aRTT + min_rtt. + * the aRTT term is bounded by the fast recovery or timeout, + * so it's at least one RTT (i.e., retransmission is at least + * an RTT later). + */ + skb_mstamp_get(&now); + if (skb_mstamp_us_delta(&now, xmit_time) < tcp_min_rtt(tp)) + return; + } + + tp->rack.mstamp = *xmit_time; + tp->rack.advanced = 1; +} diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d135350495e8..d0c685cdc345 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4788,7 +4788,8 @@ nla_put_failure: return -EMSGSIZE; } -static size_t inet6_get_link_af_size(const struct net_device *dev) +static size_t inet6_get_link_af_size(const struct net_device *dev, + u32 ext_filter_mask) { if (!__in6_dev_get(dev)) return 0; diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 08b62047c67f..eeca943f12dc 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -264,6 +264,9 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); int err = -ENOSYS; + if (skb->encapsulation) + skb_set_inner_network_header(skb, nhoff); + iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); rcu_read_lock(); @@ -280,6 +283,13 @@ out_unlock: return err; } +static int sit_gro_complete(struct sk_buff *skb, int nhoff) +{ + skb->encapsulation = 1; + skb_shinfo(skb)->gso_type |= SKB_GSO_SIT; + return ipv6_gro_complete(skb, nhoff); +} + static struct packet_offload ipv6_packet_offload __read_mostly = { .type = cpu_to_be16(ETH_P_IPV6), .callbacks = { @@ -292,6 +302,8 @@ static struct packet_offload ipv6_packet_offload __read_mostly = { static const struct net_offload sit_offload = { .callbacks = { .gso_segment = ipv6_gso_segment, + .gro_receive = ipv6_gro_receive, + .gro_complete = sit_gro_complete, }, }; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f495d189f5e0..714bc5ad096e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -965,7 +965,9 @@ drop: static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst) + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req) { struct inet_request_sock *ireq; struct ipv6_pinfo *newnp; @@ -984,7 +986,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * * v6 mapped */ - newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); + newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst, + req_unhash, own_req); if (!newsk) return NULL; @@ -1145,7 +1148,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * tcp_done(newsk); goto out; } - __inet_hash(newsk, NULL); + *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); return newsk; diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 783e891b7525..f9137a8341f4 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -27,7 +27,6 @@ mac80211-y := \ key.o \ util.o \ wme.o \ - event.o \ chan.o \ trace.o mlme.o \ tdls.o \ diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 68e551e263c6..713cdbf6fb3c 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -17,7 +17,6 @@ #include <net/cfg80211.h> #include "ieee80211_i.h" #include "driver-ops.h" -#include "cfg.h" #include "rate.h" #include "mesh.h" #include "wme.h" @@ -469,45 +468,6 @@ void sta_set_rate_info_tx(struct sta_info *sta, rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; } -void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) -{ - rinfo->flags = 0; - - if (sta->last_rx_rate_flag & RX_FLAG_HT) { - rinfo->flags |= RATE_INFO_FLAGS_MCS; - rinfo->mcs = sta->last_rx_rate_idx; - } else if (sta->last_rx_rate_flag & RX_FLAG_VHT) { - rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; - rinfo->nss = sta->last_rx_rate_vht_nss; - rinfo->mcs = sta->last_rx_rate_idx; - } else { - struct ieee80211_supported_band *sband; - int shift = ieee80211_vif_get_shift(&sta->sdata->vif); - u16 brate; - - sband = sta->local->hw.wiphy->bands[ - ieee80211_get_sdata_band(sta->sdata)]; - brate = sband->bitrates[sta->last_rx_rate_idx].bitrate; - rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); - } - - if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI) - rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; - - if (sta->last_rx_rate_flag & RX_FLAG_5MHZ) - rinfo->bw = RATE_INFO_BW_5; - else if (sta->last_rx_rate_flag & RX_FLAG_10MHZ) - rinfo->bw = RATE_INFO_BW_10; - else if (sta->last_rx_rate_flag & RX_FLAG_40MHZ) - rinfo->bw = RATE_INFO_BW_40; - else if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80MHZ) - rinfo->bw = RATE_INFO_BW_80; - else if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_160MHZ) - rinfo->bw = RATE_INFO_BW_160; - else - rinfo->bw = RATE_INFO_BW_20; -} - static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { @@ -1138,6 +1098,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, } if (mask & BIT(NL80211_STA_FLAG_MFP)) { + sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP)); if (set & BIT(NL80211_STA_FLAG_MFP)) set_sta_flag(sta, WLAN_STA_MFP); else @@ -1427,7 +1388,7 @@ static int ieee80211_change_station(struct wiphy *wiphy, if (sdata->vif.type == NL80211_IFTYPE_STATION && params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { - ieee80211_recalc_ps(local, -1); + ieee80211_recalc_ps(local); ieee80211_recalc_ps_vif(sdata); } @@ -2462,7 +2423,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); - ieee80211_recalc_ps(local, -1); + ieee80211_recalc_ps(local); ieee80211_recalc_ps_vif(sdata); return 0; diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h deleted file mode 100644 index 2d51f62dc76c..000000000000 --- a/net/mac80211/cfg.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * mac80211 configuration hooks for cfg80211 - */ -#ifndef __CFG_H -#define __CFG_H - -extern const struct cfg80211_ops mac80211_config_ops; - -#endif /* __CFG_H */ diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 06d52935036d..a39512f09f9e 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -50,7 +50,6 @@ static const struct file_operations sta_ ##name## _ops = { \ STA_OPS(name) STA_FILE(aid, sta.aid, D); -STA_FILE(last_ack_signal, last_ack_signal, D); static ssize_t sta_flags_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) @@ -366,11 +365,10 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) DEBUGFS_ADD(agg_status); DEBUGFS_ADD(ht_capa); DEBUGFS_ADD(vht_capa); - DEBUGFS_ADD(last_ack_signal); - DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates); - DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments); - DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count); + DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates); + DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments); + DEBUGFS_ADD_COUNTER(tx_filtered, status_stats.filtered); if (sizeof(sta->driver_buffered_tids) == sizeof(u32)) debugfs_create_x32("driver_buffered_tids", 0400, diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c index 188faab11c24..9cc986deda61 100644 --- a/net/mac80211/ethtool.c +++ b/net/mac80211/ethtool.c @@ -40,7 +40,7 @@ static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = { "rx_duplicates", "rx_fragments", "rx_dropped", "tx_packets", "tx_bytes", "tx_filtered", "tx_retry_failed", "tx_retries", - "beacon_loss", "sta_state", "txrate", "rxrate", "signal", + "sta_state", "txrate", "rxrate", "signal", "channel", "noise", "ch_time", "ch_time_busy", "ch_time_ext_busy", "ch_time_rx", "ch_time_tx" }; @@ -77,20 +77,19 @@ static void ieee80211_get_stats(struct net_device *dev, memset(data, 0, sizeof(u64) * STA_STATS_LEN); -#define ADD_STA_STATS(sta) \ - do { \ - data[i++] += sta->rx_packets; \ - data[i++] += sta->rx_bytes; \ - data[i++] += sta->num_duplicates; \ - data[i++] += sta->rx_fragments; \ - data[i++] += sta->rx_dropped; \ - \ - data[i++] += sinfo.tx_packets; \ - data[i++] += sinfo.tx_bytes; \ - data[i++] += sta->tx_filtered_count; \ - data[i++] += sta->tx_retry_failed; \ - data[i++] += sta->tx_retry_count; \ - data[i++] += sta->beacon_loss_count; \ +#define ADD_STA_STATS(sta) \ + do { \ + data[i++] += sta->rx_stats.packets; \ + data[i++] += sta->rx_stats.bytes; \ + data[i++] += sta->rx_stats.num_duplicates; \ + data[i++] += sta->rx_stats.fragments; \ + data[i++] += sta->rx_stats.dropped; \ + \ + data[i++] += sinfo.tx_packets; \ + data[i++] += sinfo.tx_bytes; \ + data[i++] += sta->status_stats.filtered; \ + data[i++] += sta->status_stats.retry_failed; \ + data[i++] += sta->status_stats.retry_count; \ } while (0) /* For Managed stations, find the single station based on BSSID diff --git a/net/mac80211/event.c b/net/mac80211/event.c deleted file mode 100644 index 01ae759518f6..000000000000 --- a/net/mac80211/event.c +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * mac80211 - events - */ -#include <net/cfg80211.h> -#include "ieee80211_i.h" - -/* - * Indicate a failed Michael MIC to userspace. If the caller knows the TSC of - * the frame that generated the MIC failure (i.e., if it was provided by the - * driver or is still in the frame), it should provide that information. - */ -void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, - struct ieee80211_hdr *hdr, const u8 *tsc, - gfp_t gfp) -{ - cfg80211_michael_mic_failure(sdata->dev, hdr->addr2, - (hdr->addr1[0] & 0x01) ? - NL80211_KEYTYPE_GROUP : - NL80211_KEYTYPE_PAIRWISE, - keyidx, tsc, gfp); -} diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 7f72bc9bae2e..2001555d49cb 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -229,7 +229,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, struct cfg80211_chan_def chandef; struct ieee80211_channel *chan; struct beacon_data *presp; - enum nl80211_bss_scan_width scan_width; + struct cfg80211_inform_bss bss_meta = {}; bool have_higher_than_11mbit; bool radar_required; int err; @@ -383,10 +383,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); - scan_width = cfg80211_chandef_to_scan_width(&chandef); - bss = cfg80211_inform_bss_width_frame(local->hw.wiphy, chan, - scan_width, mgmt, - presp->head_len, 0, GFP_KERNEL); + bss_meta.chan = chan; + bss_meta.scan_width = cfg80211_chandef_to_scan_width(&chandef); + bss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, mgmt, + presp->head_len, GFP_KERNEL); + cfg80211_put_bss(local->hw.wiphy, bss); netif_carrier_on(sdata->dev); cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL); @@ -646,7 +647,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, return NULL; } - sta->last_rx = jiffies; + sta->rx_stats.last_rx = jiffies; /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; @@ -668,7 +669,8 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sta->sdata == sdata && - time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL, + time_after(sta->rx_stats.last_rx + + IEEE80211_IBSS_MERGE_INTERVAL, jiffies)) { active++; break; @@ -1234,7 +1236,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, if (!sta) return; - sta->last_rx = jiffies; + sta->rx_stats.last_rx = jiffies; /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; @@ -1252,7 +1254,7 @@ static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata) struct ieee80211_local *local = sdata->local; struct sta_info *sta, *tmp; unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT; - unsigned long exp_rsn_time = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT; + unsigned long exp_rsn = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT; mutex_lock(&local->sta_mtx); @@ -1260,8 +1262,8 @@ static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata) if (sdata != sta->sdata) continue; - if (time_after(jiffies, sta->last_rx + exp_time) || - (time_after(jiffies, sta->last_rx + exp_rsn_time) && + if (time_after(jiffies, sta->rx_stats.last_rx + exp_time) || + (time_after(jiffies, sta->rx_stats.last_rx + exp_rsn) && sta->sta_state != IEEE80211_STA_AUTHORIZED)) { sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n", sta->sta_state != IEEE80211_STA_AUTHORIZED ? diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index f9605f13def9..62f2a97cd2a6 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -34,6 +34,8 @@ #include "sta_info.h" #include "debug.h" +extern const struct cfg80211_ops mac80211_config_ops; + struct ieee80211_local; /* Maximum number of broadcast/multicast frames to buffer when some of the @@ -501,6 +503,9 @@ struct ieee80211_if_managed { */ unsigned int count_beacon_signal; + /* Number of times beacon loss was invoked. */ + unsigned int beacon_loss_count; + /* * Last Beacon frame signal strength average (ave_beacon_signal / 16) * that triggered a cqm event. 0 indicates that no event has been @@ -1305,7 +1310,6 @@ struct ieee80211_local { struct work_struct dynamic_ps_enable_work; struct work_struct dynamic_ps_disable_work; struct timer_list dynamic_ps_timer; - struct notifier_block network_latency_notifier; struct notifier_block ifa_notifier; struct notifier_block ifa6_notifier; @@ -1491,10 +1495,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_disassoc_request *req); void ieee80211_send_pspoll(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); -void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); +void ieee80211_recalc_ps(struct ieee80211_local *local); void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata); -int ieee80211_max_network_latency(struct notifier_block *nb, - unsigned long data, void *dummy); int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, @@ -1766,9 +1768,6 @@ extern const void *const mac80211_wiphy_privid; /* for wiphy privid */ int ieee80211_frame_duration(enum ieee80211_band band, size_t len, int rate, int erp, int short_preamble, int shift); -void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, - struct ieee80211_hdr *hdr, const u8 *tsc, - gfp_t gfp); void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, bool bss_notify); void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 42d7f0f65bd6..f848c75518a2 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -709,7 +709,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) if (hw_reconf_flags) ieee80211_hw_config(local, hw_reconf_flags); - ieee80211_recalc_ps(local, -1); + ieee80211_recalc_ps(local); if (sdata->vif.type == NL80211_IFTYPE_MONITOR || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { @@ -1016,7 +1016,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, drv_remove_interface(local, sdata); } - ieee80211_recalc_ps(local, -1); + ieee80211_recalc_ps(local); if (cancel_scan) flush_delayed_work(&local->scan_work); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 9b813a2f3a75..273c96de4910 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -20,7 +20,6 @@ #include <linux/if_arp.h> #include <linux/rtnetlink.h> #include <linux/bitmap.h> -#include <linux/pm_qos.h> #include <linux/inetdevice.h> #include <net/net_namespace.h> #include <net/cfg80211.h> @@ -32,7 +31,6 @@ #include "mesh.h" #include "wep.h" #include "led.h" -#include "cfg.h" #include "debugfs.h" void ieee80211_configure_filter(struct ieee80211_local *local) @@ -1083,13 +1081,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) rtnl_unlock(); - local->network_latency_notifier.notifier_call = - ieee80211_max_network_latency; - result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, - &local->network_latency_notifier); - if (result) - goto fail_pm_qos; - #ifdef CONFIG_INET local->ifa_notifier.notifier_call = ieee80211_ifa_changed; result = register_inetaddr_notifier(&local->ifa_notifier); @@ -1114,10 +1105,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) #endif #if defined(CONFIG_INET) || defined(CONFIG_IPV6) fail_ifa: - pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, - &local->network_latency_notifier); #endif - fail_pm_qos: rtnl_lock(); rate_control_deinitialize(local); ieee80211_remove_interfaces(local); @@ -1143,8 +1131,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) tasklet_kill(&local->tx_pending_tasklet); tasklet_kill(&local->tasklet); - pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, - &local->network_latency_notifier); #ifdef CONFIG_INET unregister_inetaddr_notifier(&local->ifa_notifier); #endif diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index d80e0a4c16cf..c6be0b4f4058 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -329,7 +329,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, if (sta->mesh->fail_avg >= 100) return MAX_METRIC; - sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo); + sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo); rate = cfg80211_calculate_bitrate(&rinfo); if (WARN_ON(!rate)) return MAX_METRIC; diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index a360b24b7df8..c1f889270484 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -60,7 +60,9 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata, { s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold; return rssi_threshold == 0 || - (sta && (s8) -ewma_signal_read(&sta->avg_signal) > rssi_threshold); + (sta && + (s8)-ewma_signal_read(&sta->rx_stats.avg_signal) > + rssi_threshold); } /** @@ -390,7 +392,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates); spin_lock_bh(&sta->mesh->plink_lock); - sta->last_rx = jiffies; + sta->rx_stats.last_rx = jiffies; /* rates and capabilities don't change during peering */ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB && diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 56ef9a8e151c..ded4b976bb48 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -20,7 +20,6 @@ #include <linux/etherdevice.h> #include <linux/moduleparam.h> #include <linux/rtnetlink.h> -#include <linux/pm_qos.h> #include <linux/crc32.h> #include <linux/slab.h> #include <linux/export.h> @@ -1476,7 +1475,7 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata) } /* need to hold RTNL or interface lock */ -void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) +void ieee80211_recalc_ps(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *found = NULL; int count = 0; @@ -1505,48 +1504,23 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) } if (count == 1 && ieee80211_powersave_allowed(found)) { + u8 dtimper = found->u.mgd.dtim_period; s32 beaconint_us; - if (latency < 0) - latency = pm_qos_request(PM_QOS_NETWORK_LATENCY); - beaconint_us = ieee80211_tu_to_usec( found->vif.bss_conf.beacon_int); timeout = local->dynamic_ps_forced_timeout; - if (timeout < 0) { - /* - * Go to full PSM if the user configures a very low - * latency requirement. - * The 2000 second value is there for compatibility - * until the PM_QOS_NETWORK_LATENCY is configured - * with real values. - */ - if (latency > (1900 * USEC_PER_MSEC) && - latency != (2000 * USEC_PER_SEC)) - timeout = 0; - else - timeout = 100; - } + if (timeout < 0) + timeout = 100; local->hw.conf.dynamic_ps_timeout = timeout; - if (beaconint_us > latency) { - local->ps_sdata = NULL; - } else { - int maxslp = 1; - u8 dtimper = found->u.mgd.dtim_period; - - /* If the TIM IE is invalid, pretend the value is 1 */ - if (!dtimper) - dtimper = 1; - else if (dtimper > 1) - maxslp = min_t(int, dtimper, - latency / beaconint_us); - - local->hw.conf.max_sleep_period = maxslp; - local->hw.conf.ps_dtim_period = dtimper; - local->ps_sdata = found; - } + /* If the TIM IE is invalid, pretend the value is 1 */ + if (!dtimper) + dtimper = 1; + + local->hw.conf.ps_dtim_period = dtimper; + local->ps_sdata = found; } else { local->ps_sdata = NULL; } @@ -1997,7 +1971,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, ieee80211_bss_info_change_notify(sdata, bss_info_changed); mutex_lock(&local->iflist_mtx); - ieee80211_recalc_ps(local, -1); + ieee80211_recalc_ps(local); mutex_unlock(&local->iflist_mtx); ieee80211_recalc_smps(sdata); @@ -2165,7 +2139,7 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) __ieee80211_stop_poll(sdata); mutex_lock(&local->iflist_mtx); - ieee80211_recalc_ps(local, -1); + ieee80211_recalc_ps(local); mutex_unlock(&local->iflist_mtx); if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) @@ -2341,7 +2315,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, goto out; mutex_lock(&sdata->local->iflist_mtx); - ieee80211_recalc_ps(sdata->local, -1); + ieee80211_recalc_ps(sdata->local); mutex_unlock(&sdata->local->iflist_mtx); ifmgd->probe_send_count = 0; @@ -2446,15 +2420,9 @@ static void ieee80211_beacon_connection_loss_work(struct work_struct *work) container_of(work, struct ieee80211_sub_if_data, u.mgd.beacon_connection_loss_work); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - struct sta_info *sta; - if (ifmgd->associated) { - rcu_read_lock(); - sta = sta_info_get(sdata, ifmgd->bssid); - if (sta) - sta->beacon_loss_count++; - rcu_read_unlock(); - } + if (ifmgd->associated) + ifmgd->beacon_loss_count++; if (ifmgd->connection_loss) { sdata_info(sdata, "Connection to AP %pM lost\n", @@ -3044,8 +3012,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, rate_control_rate_init(sta); - if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) + if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) { set_sta_flag(sta, WLAN_STA_MFP); + sta->sta.mfp = true; + } else { + sta->sta.mfp = false; + } sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS; @@ -3544,7 +3516,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ifmgd->have_beacon = true; mutex_lock(&local->iflist_mtx); - ieee80211_recalc_ps(local, -1); + ieee80211_recalc_ps(local); mutex_unlock(&local->iflist_mtx); ieee80211_recalc_ps_vif(sdata); @@ -4148,21 +4120,6 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) rcu_read_unlock(); } -int ieee80211_max_network_latency(struct notifier_block *nb, - unsigned long data, void *dummy) -{ - s32 latency_usec = (s32) data; - struct ieee80211_local *local = - container_of(nb, struct ieee80211_local, - network_latency_notifier); - - mutex_lock(&local->iflist_mtx); - ieee80211_recalc_ps(local, latency_usec); - mutex_unlock(&local->iflist_mtx); - - return NOTIFY_OK; -} - static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss) { diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c index 573b81a1fb2d..0be0aadfc559 100644 --- a/net/mac80211/ocb.c +++ b/net/mac80211/ocb.c @@ -75,7 +75,7 @@ void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, if (!sta) return; - sta->last_rx = jiffies; + sta->rx_stats.last_rx = jiffies; /* Add only mandatory rates for now */ sband = local->hw.wiphy->bands[band]; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 5bc0b88d9eb1..8bae5de0dc44 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1113,16 +1113,16 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) is_multicast_ether_addr(hdr->addr1)) return RX_CONTINUE; - if (rx->sta) { - if (unlikely(ieee80211_has_retry(hdr->frame_control) && - rx->sta->last_seq_ctrl[rx->seqno_idx] == - hdr->seq_ctrl)) { - I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); - rx->sta->num_duplicates++; - return RX_DROP_UNUSABLE; - } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { - rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; - } + if (!rx->sta) + return RX_CONTINUE; + + if (unlikely(ieee80211_has_retry(hdr->frame_control) && + rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { + I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); + rx->sta->rx_stats.num_duplicates++; + return RX_DROP_UNUSABLE; + } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { + rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; } return RX_CONTINUE; @@ -1396,51 +1396,56 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) NL80211_IFTYPE_ADHOC); if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { - sta->last_rx = jiffies; + sta->rx_stats.last_rx = jiffies; if (ieee80211_is_data(hdr->frame_control) && !is_multicast_ether_addr(hdr->addr1)) { - sta->last_rx_rate_idx = status->rate_idx; - sta->last_rx_rate_flag = status->flag; - sta->last_rx_rate_vht_flag = status->vht_flag; - sta->last_rx_rate_vht_nss = status->vht_nss; + sta->rx_stats.last_rate_idx = + status->rate_idx; + sta->rx_stats.last_rate_flag = + status->flag; + sta->rx_stats.last_rate_vht_flag = + status->vht_flag; + sta->rx_stats.last_rate_vht_nss = + status->vht_nss; } } } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { - sta->last_rx = jiffies; + sta->rx_stats.last_rx = jiffies; } else if (!is_multicast_ether_addr(hdr->addr1)) { /* * Mesh beacons will update last_rx when if they are found to * match the current local configuration when processed. */ - sta->last_rx = jiffies; + sta->rx_stats.last_rx = jiffies; if (ieee80211_is_data(hdr->frame_control)) { - sta->last_rx_rate_idx = status->rate_idx; - sta->last_rx_rate_flag = status->flag; - sta->last_rx_rate_vht_flag = status->vht_flag; - sta->last_rx_rate_vht_nss = status->vht_nss; + sta->rx_stats.last_rate_idx = status->rate_idx; + sta->rx_stats.last_rate_flag = status->flag; + sta->rx_stats.last_rate_vht_flag = status->vht_flag; + sta->rx_stats.last_rate_vht_nss = status->vht_nss; } } if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_sta_rx_notify(rx->sdata, hdr); - sta->rx_fragments++; - sta->rx_bytes += rx->skb->len; + sta->rx_stats.fragments++; + sta->rx_stats.bytes += rx->skb->len; if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { - sta->last_signal = status->signal; - ewma_signal_add(&sta->avg_signal, -status->signal); + sta->rx_stats.last_signal = status->signal; + ewma_signal_add(&sta->rx_stats.avg_signal, -status->signal); } if (status->chains) { - sta->chains = status->chains; + sta->rx_stats.chains = status->chains; for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { int signal = status->chain_signal[i]; if (!(status->chains & BIT(i))) continue; - sta->chain_signal_last[i] = signal; - ewma_signal_add(&sta->chain_signal_avg[i], -signal); + sta->rx_stats.chain_signal_last[i] = signal; + ewma_signal_add(&sta->rx_stats.chain_signal_avg[i], + -signal); } } @@ -1500,7 +1505,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) * Update counter and free packet here to avoid * counting this as a dropped packed. */ - sta->rx_packets++; + sta->rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; } @@ -1922,7 +1927,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) ieee80211_led_rx(rx->local); out_no_led: if (rx->sta) - rx->sta->rx_packets++; + rx->sta->rx_stats.packets++; return RX_CONTINUE; } @@ -2376,7 +2381,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) * for non-QoS-data frames. Here we know it's a data * frame, so count MSDUs. */ - rx->sta->rx_msdu[rx->seqno_idx]++; + rx->sta->rx_stats.msdu[rx->seqno_idx]++; } /* @@ -2413,7 +2418,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb); schedule_work(&local->tdls_chsw_work); if (rx->sta) - rx->sta->rx_packets++; + rx->sta->rx_stats.packets++; return RX_QUEUED; } @@ -2875,7 +2880,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) handled: if (rx->sta) - rx->sta->rx_packets++; + rx->sta->rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; @@ -2884,7 +2889,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) skb_queue_tail(&sdata->skb_queue, rx->skb); ieee80211_queue_work(&local->hw, &sdata->work); if (rx->sta) - rx->sta->rx_packets++; + rx->sta->rx_stats.packets++; return RX_QUEUED; } @@ -2911,7 +2916,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, rx->skb->data, rx->skb->len, 0)) { if (rx->sta) - rx->sta->rx_packets++; + rx->sta->rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; } @@ -3030,7 +3035,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) skb_queue_tail(&sdata->skb_queue, rx->skb); ieee80211_queue_work(&rx->local->hw, &sdata->work); if (rx->sta) - rx->sta->rx_packets++; + rx->sta->rx_stats.packets++; return RX_QUEUED; } @@ -3112,7 +3117,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, case RX_DROP_MONITOR: I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); if (rx->sta) - rx->sta->rx_dropped++; + rx->sta->rx_stats.dropped++; /* fall through */ case RX_CONTINUE: { struct ieee80211_rate *rate = NULL; @@ -3132,7 +3137,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, case RX_DROP_UNUSABLE: I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); if (rx->sta) - rx->sta->rx_dropped++; + rx->sta->rx_stats.dropped++; dev_kfree_skb(rx->skb); break; case RX_QUEUED: diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 11d0901ebb7b..b64fd2b2d95a 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -16,7 +16,6 @@ #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> -#include <linux/pm_qos.h> #include <net/sch_generic.h> #include <linux/slab.h> #include <linux/export.h> @@ -67,24 +66,23 @@ ieee80211_bss_info_update(struct ieee80211_local *local, struct cfg80211_bss *cbss; struct ieee80211_bss *bss; int clen, srlen; - enum nl80211_bss_scan_width scan_width; - s32 signal = 0; + struct cfg80211_inform_bss bss_meta = {}; bool signal_valid; if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) - signal = rx_status->signal * 100; + bss_meta.signal = rx_status->signal * 100; else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) - signal = (rx_status->signal * 100) / local->hw.max_signal; + bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal; - scan_width = NL80211_BSS_CHAN_WIDTH_20; + bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20; if (rx_status->flag & RX_FLAG_5MHZ) - scan_width = NL80211_BSS_CHAN_WIDTH_5; + bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5; if (rx_status->flag & RX_FLAG_10MHZ) - scan_width = NL80211_BSS_CHAN_WIDTH_10; + bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10; - cbss = cfg80211_inform_bss_width_frame(local->hw.wiphy, channel, - scan_width, mgmt, len, signal, - GFP_ATOMIC); + bss_meta.chan = channel; + cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, + mgmt, len, GFP_ATOMIC); if (!cbss) return NULL; /* In case the signal is invalid update the status */ diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index c3644458e2ee..f91d1873218c 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -331,7 +331,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, memcpy(sta->sta.addr, addr, ETH_ALEN); sta->local = local; sta->sdata = sdata; - sta->last_rx = jiffies; + sta->rx_stats.last_rx = jiffies; sta->sta_state = IEEE80211_STA_NONE; @@ -339,9 +339,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->reserved_tid = IEEE80211_TID_UNRESERVED; sta->last_connected = ktime_get_seconds(); - ewma_signal_init(&sta->avg_signal); - for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++) - ewma_signal_init(&sta->chain_signal_avg[i]); + ewma_signal_init(&sta->rx_stats.avg_signal); + for (i = 0; i < ARRAY_SIZE(sta->rx_stats.chain_signal_avg); i++) + ewma_signal_init(&sta->rx_stats.chain_signal_avg[i]); if (local->ops->wake_tx_queue) { void *txq_data; @@ -1066,7 +1066,7 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, if (sdata != sta->sdata) continue; - if (time_after(jiffies, sta->last_rx + exp_time)) { + if (time_after(jiffies, sta->rx_stats.last_rx + exp_time)) { sta_dbg(sta->sdata, "expiring inactive STA %pM\n", sta->sta.addr); @@ -1806,6 +1806,45 @@ u8 sta_info_tx_streams(struct sta_info *sta) >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; } +static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) +{ + rinfo->flags = 0; + + if (sta->rx_stats.last_rate_flag & RX_FLAG_HT) { + rinfo->flags |= RATE_INFO_FLAGS_MCS; + rinfo->mcs = sta->rx_stats.last_rate_idx; + } else if (sta->rx_stats.last_rate_flag & RX_FLAG_VHT) { + rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; + rinfo->nss = sta->rx_stats.last_rate_vht_nss; + rinfo->mcs = sta->rx_stats.last_rate_idx; + } else { + struct ieee80211_supported_band *sband; + int shift = ieee80211_vif_get_shift(&sta->sdata->vif); + u16 brate; + + sband = sta->local->hw.wiphy->bands[ + ieee80211_get_sdata_band(sta->sdata)]; + brate = sband->bitrates[sta->rx_stats.last_rate_idx].bitrate; + rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); + } + + if (sta->rx_stats.last_rate_flag & RX_FLAG_SHORT_GI) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + + if (sta->rx_stats.last_rate_flag & RX_FLAG_5MHZ) + rinfo->bw = RATE_INFO_BW_5; + else if (sta->rx_stats.last_rate_flag & RX_FLAG_10MHZ) + rinfo->bw = RATE_INFO_BW_10; + else if (sta->rx_stats.last_rate_flag & RX_FLAG_40MHZ) + rinfo->bw = RATE_INFO_BW_40; + else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_80MHZ) + rinfo->bw = RATE_INFO_BW_80; + else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_160MHZ) + rinfo->bw = RATE_INFO_BW_160; + else + rinfo->bw = RATE_INFO_BW_20; +} + void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = sta->sdata; @@ -1832,50 +1871,54 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) BIT(NL80211_STA_INFO_STA_FLAGS) | BIT(NL80211_STA_INFO_BSS_PARAM) | BIT(NL80211_STA_INFO_CONNECTED_TIME) | - BIT(NL80211_STA_INFO_RX_DROP_MISC) | - BIT(NL80211_STA_INFO_BEACON_LOSS); + BIT(NL80211_STA_INFO_RX_DROP_MISC); + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count; + sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_LOSS); + } sinfo->connected_time = ktime_get_seconds() - sta->last_connected; - sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); + sinfo->inactive_time = + jiffies_to_msecs(jiffies - sta->rx_stats.last_rx); if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) | BIT(NL80211_STA_INFO_TX_BYTES)))) { sinfo->tx_bytes = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - sinfo->tx_bytes += sta->tx_bytes[ac]; + sinfo->tx_bytes += sta->tx_stats.bytes[ac]; sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64); } if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_PACKETS))) { sinfo->tx_packets = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - sinfo->tx_packets += sta->tx_packets[ac]; + sinfo->tx_packets += sta->tx_stats.packets[ac]; sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS); } if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) | BIT(NL80211_STA_INFO_RX_BYTES)))) { - sinfo->rx_bytes = sta->rx_bytes; + sinfo->rx_bytes = sta->rx_stats.bytes; sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64); } if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) { - sinfo->rx_packets = sta->rx_packets; + sinfo->rx_packets = sta->rx_stats.packets; sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS); } if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_RETRIES))) { - sinfo->tx_retries = sta->tx_retry_count; + sinfo->tx_retries = sta->status_stats.retry_count; sinfo->filled |= BIT(NL80211_STA_INFO_TX_RETRIES); } if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_FAILED))) { - sinfo->tx_failed = sta->tx_retry_failed; + sinfo->tx_failed = sta->status_stats.retry_failed; sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED); } - sinfo->rx_dropped_misc = sta->rx_dropped; - sinfo->beacon_loss_count = sta->beacon_loss_count; + sinfo->rx_dropped_misc = sta->rx_stats.dropped; if (sdata->vif.type == NL80211_IFTYPE_STATION && !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { @@ -1887,33 +1930,35 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) { - sinfo->signal = (s8)sta->last_signal; + sinfo->signal = (s8)sta->rx_stats.last_signal; sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); } if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) { sinfo->signal_avg = - (s8) -ewma_signal_read(&sta->avg_signal); + -ewma_signal_read(&sta->rx_stats.avg_signal); sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); } } - if (sta->chains && + if (sta->rx_stats.chains && !(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) | BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL) | BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); - sinfo->chains = sta->chains; + sinfo->chains = sta->rx_stats.chains; for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { - sinfo->chain_signal[i] = sta->chain_signal_last[i]; + sinfo->chain_signal[i] = + sta->rx_stats.chain_signal_last[i]; sinfo->chain_signal_avg[i] = - (s8) -ewma_signal_read(&sta->chain_signal_avg[i]); + -ewma_signal_read(&sta->rx_stats.chain_signal_avg[i]); } } if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE))) { - sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate); + sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, + &sinfo->txrate); sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); } @@ -1928,12 +1973,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); - tidstats->rx_msdu = sta->rx_msdu[i]; + tidstats->rx_msdu = sta->rx_stats.msdu[i]; } if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); - tidstats->tx_msdu = sta->tx_msdu[i]; + tidstats->tx_msdu = sta->tx_stats.msdu[i]; } if (!(tidstats->filled & @@ -1941,7 +1986,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); - tidstats->tx_msdu_retries = sta->tx_msdu_retries[i]; + tidstats->tx_msdu_retries = + sta->status_stats.msdu_retries[i]; } if (!(tidstats->filled & @@ -1949,7 +1995,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); - tidstats->tx_msdu_failed = sta->tx_msdu_failed[i]; + tidstats->tx_msdu_failed = + sta->status_stats.msdu_failed[i]; } } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index d5ded8749ac4..2cafb21b422f 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -344,12 +344,6 @@ DECLARE_EWMA(signal, 1024, 8) * @rate_ctrl_lock: spinlock used to protect rate control data * (data inside the algorithm, so serializes calls there) * @rate_ctrl_priv: rate control private per-STA pointer - * @last_tx_rate: rate used for last transmit, to report to userspace as - * "the" transmit rate - * @last_rx_rate_idx: rx status rate index of the last data packet - * @last_rx_rate_flag: rx status flag of the last data packet - * @last_rx_rate_vht_flag: rx status vht flag of the last data packet - * @last_rx_rate_vht_nss: rx status nss of last data packet * @lock: used for locking all fields that require locking, see comments * in the header file. * @drv_deliver_wk: used for delivering frames after driver PS unblocking @@ -364,23 +358,9 @@ DECLARE_EWMA(signal, 1024, 8) * the station when it leaves powersave or polls for frames * @driver_buffered_tids: bitmap of TIDs the driver has data buffered on * @txq_buffered_tids: bitmap of TIDs that mac80211 has txq data buffered on - * @rx_packets: Number of MSDUs received from this STA - * @rx_bytes: Number of bytes received from this STA - * @last_rx: time (in jiffies) when last frame was received from this STA * @last_connected: time (in seconds) when a station got connected - * @num_duplicates: number of duplicate frames received from this STA - * @rx_fragments: number of received MPDUs - * @rx_dropped: number of dropped MPDUs from this STA - * @last_signal: signal of last received frame from this STA - * @avg_signal: moving average of signal of received frames from this STA - * @last_ack_signal: signal of last received Ack frame from this STA * @last_seq_ctrl: last received seq/frag number from this STA (per TID * plus one for non-QoS frames) - * @tx_filtered_count: number of frames the hardware filtered for this STA - * @tx_retry_failed: number of frames that failed retry - * @tx_retry_count: total number of retries for frames to this STA - * @tx_packets: number of RX/TX MSDUs - * @tx_bytes: number of bytes transmitted to this STA * @tid_seq: per-TID sequence numbers for sending to this STA * @ampdu_mlme: A-MPDU state machine state * @timer_to_tid: identity mapping to ID timers @@ -388,32 +368,22 @@ DECLARE_EWMA(signal, 1024, 8) * @debugfs: debug filesystem info * @dead: set to true when sta is unlinked * @uploaded: set to true when sta is uploaded to the driver - * @lost_packets: number of consecutive lost packets * @sta: station information we share with the driver * @sta_state: duplicates information about station state (for debug) * @beacon_loss_count: number of times beacon loss has triggered * @rcu_head: RCU head used for freeing this station struct * @cur_max_bandwidth: maximum bandwidth to use for TX to the station, * taken from HT/VHT capabilities or VHT operating mode notification - * @chains: chains ever used for RX from this station - * @chain_signal_last: last signal (per chain) - * @chain_signal_avg: signal average (per chain) * @known_smps_mode: the smps_mode the client thinks we are in. Relevant for * AP only. * @cipher_scheme: optional cipher scheme for this station - * @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED) - * @tx_msdu: MSDUs transmitted to this station, using IEEE80211_NUM_TID - * entry for non-QoS frames - * @tx_msdu_retries: MSDU retries for transmissions to to this station, - * using IEEE80211_NUM_TID entry for non-QoS frames - * @tx_msdu_failed: MSDU failures for transmissions to to this station, - * using IEEE80211_NUM_TID entry for non-QoS frames - * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID - * entry for non-QoS frames * @fast_tx: TX fastpath information * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to * the BSS one. + * @tx_stats: TX statistics + * @rx_stats: RX statistics + * @status_stats: TX status statistics */ struct sta_info { /* General information, mostly static */ @@ -457,42 +427,49 @@ struct sta_info { unsigned long driver_buffered_tids; unsigned long txq_buffered_tids; - /* Updated from RX path only, no locking requirements */ - unsigned long rx_packets; - u64 rx_bytes; - unsigned long last_rx; long last_connected; - unsigned long num_duplicates; - unsigned long rx_fragments; - unsigned long rx_dropped; - int last_signal; - struct ewma_signal avg_signal; - int last_ack_signal; - u8 chains; - s8 chain_signal_last[IEEE80211_MAX_CHAINS]; - struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS]; + /* Updated from RX path only, no locking requirements */ + struct { + unsigned long packets; + u64 bytes; + unsigned long last_rx; + unsigned long num_duplicates; + unsigned long fragments; + unsigned long dropped; + int last_signal; + struct ewma_signal avg_signal; + u8 chains; + s8 chain_signal_last[IEEE80211_MAX_CHAINS]; + struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS]; + int last_rate_idx; + u32 last_rate_flag; + u32 last_rate_vht_flag; + u8 last_rate_vht_nss; + u64 msdu[IEEE80211_NUM_TIDS + 1]; + } rx_stats; /* Plus 1 for non-QoS frames */ __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1]; /* Updated from TX status path only, no locking requirements */ - unsigned long tx_filtered_count; - unsigned long tx_retry_failed, tx_retry_count; + struct { + unsigned long filtered; + unsigned long retry_failed, retry_count; + unsigned int lost_packets; + unsigned long last_tdls_pkt_time; + u64 msdu_retries[IEEE80211_NUM_TIDS + 1]; + u64 msdu_failed[IEEE80211_NUM_TIDS + 1]; + } status_stats; /* Updated from TX path only, no locking requirements */ - u64 tx_packets[IEEE80211_NUM_ACS]; - u64 tx_bytes[IEEE80211_NUM_ACS]; - struct ieee80211_tx_rate last_tx_rate; - int last_rx_rate_idx; - u32 last_rx_rate_flag; - u32 last_rx_rate_vht_flag; - u8 last_rx_rate_vht_nss; + struct { + u64 packets[IEEE80211_NUM_ACS]; + u64 bytes[IEEE80211_NUM_ACS]; + struct ieee80211_tx_rate last_rate; + u64 msdu[IEEE80211_NUM_TIDS + 1]; + } tx_stats; u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; - u64 tx_msdu[IEEE80211_NUM_TIDS + 1]; - u64 tx_msdu_retries[IEEE80211_NUM_TIDS + 1]; - u64 tx_msdu_failed[IEEE80211_NUM_TIDS + 1]; - u64 rx_msdu[IEEE80211_NUM_TIDS + 1]; /* * Aggregation information, locked with lock. @@ -509,15 +486,9 @@ struct sta_info { enum ieee80211_sta_rx_bandwidth cur_max_bandwidth; - unsigned int lost_packets; - unsigned int beacon_loss_count; - enum ieee80211_smps_mode known_smps_mode; const struct ieee80211_cipher_scheme *cipher_scheme; - /* TDLS timeout data */ - unsigned long last_tdls_pkt_time; - u8 reserved_tid; struct cfg80211_chan_def tdls_chandef; @@ -688,8 +659,6 @@ static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata) void sta_set_rate_info_tx(struct sta_info *sta, const struct ieee80211_tx_rate *rate, struct rate_info *rinfo); -void sta_set_rate_info_rx(struct sta_info *sta, - struct rate_info *rinfo); void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo); void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 9169ccc36534..5bad05e9af90 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -67,7 +67,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, IEEE80211_TX_INTFL_RETRANSMISSION; info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; - sta->tx_filtered_count++; + sta->status_stats.filtered++; /* * Clear more-data bit on filtered frames, it might be set @@ -183,7 +183,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) struct ieee80211_sub_if_data *sdata = sta->sdata; if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) - sta->last_rx = jiffies; + sta->rx_stats.last_rx = jiffies; if (ieee80211_is_data_qos(mgmt->frame_control)) { struct ieee80211_hdr *hdr = (void *) skb->data; @@ -557,8 +557,9 @@ static void ieee80211_lost_packet(struct sta_info *sta, !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; - sta->lost_packets++; - if (!sta->sta.tdls && sta->lost_packets < STA_LOST_PKT_THRESHOLD) + sta->status_stats.lost_packets++; + if (!sta->sta.tdls && + sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD) return; /* @@ -568,14 +569,15 @@ static void ieee80211_lost_packet(struct sta_info *sta, * mechanism. */ if (sta->sta.tdls && - (sta->lost_packets < STA_LOST_TDLS_PKT_THRESHOLD || + (sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD || time_before(jiffies, - sta->last_tdls_pkt_time + STA_LOST_TDLS_PKT_TIME))) + sta->status_stats.last_tdls_pkt_time + + STA_LOST_TDLS_PKT_TIME))) return; cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr, - sta->lost_packets, GFP_ATOMIC); - sta->lost_packets = 0; + sta->status_stats.lost_packets, GFP_ATOMIC); + sta->status_stats.lost_packets = 0; } static int ieee80211_tx_get_rates(struct ieee80211_hw *hw, @@ -636,18 +638,18 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, sta = container_of(pubsta, struct sta_info, sta); if (!acked) - sta->tx_retry_failed++; - sta->tx_retry_count += retry_count; + sta->status_stats.retry_failed++; + sta->status_stats.retry_count += retry_count; if (acked) { - sta->last_rx = jiffies; + sta->rx_stats.last_rx = jiffies; - if (sta->lost_packets) - sta->lost_packets = 0; + if (sta->status_stats.lost_packets) + sta->status_stats.lost_packets = 0; /* Track when last TDLS packet was ACKed */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) - sta->last_tdls_pkt_time = jiffies; + sta->status_stats.last_tdls_pkt_time = jiffies; } else { ieee80211_lost_packet(sta, info); } @@ -784,7 +786,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) && (ieee80211_is_data(hdr->frame_control)) && (rates_idx != -1)) - sta->last_tx_rate = info->status.rates[rates_idx]; + sta->tx_stats.last_rate = + info->status.rates[rates_idx]; if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && (ieee80211_is_data_qos(fc))) { @@ -830,13 +833,15 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) return; } else { if (!acked) - sta->tx_retry_failed++; - sta->tx_retry_count += retry_count; + sta->status_stats.retry_failed++; + sta->status_stats.retry_count += retry_count; if (ieee80211_is_data_present(fc)) { if (!acked) - sta->tx_msdu_failed[tid]++; - sta->tx_msdu_retries[tid] += retry_count; + sta->status_stats.msdu_failed[tid]++; + + sta->status_stats.msdu_retries[tid] += + retry_count; } } @@ -854,19 +859,17 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { if (info->flags & IEEE80211_TX_STAT_ACK) { - if (sta->lost_packets) - sta->lost_packets = 0; + if (sta->status_stats.lost_packets) + sta->status_stats.lost_packets = 0; /* Track when last TDLS packet was ACKed */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) - sta->last_tdls_pkt_time = jiffies; + sta->status_stats.last_tdls_pkt_time = + jiffies; } else { ieee80211_lost_packet(sta, info); } } - - if (acked) - sta->last_ack_signal = info->status.ack_signal; } rcu_read_unlock(); diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 314e3bd7fbdb..5cf8f4ea077f 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -325,7 +325,6 @@ TRACE_EVENT(drv_config, __field(u32, flags) __field(int, power_level) __field(int, dynamic_ps_timeout) - __field(int, max_sleep_period) __field(u16, listen_interval) __field(u8, long_frame_max_tx_count) __field(u8, short_frame_max_tx_count) @@ -339,7 +338,6 @@ TRACE_EVENT(drv_config, __entry->flags = local->hw.conf.flags; __entry->power_level = local->hw.conf.power_level; __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; - __entry->max_sleep_period = local->hw.conf.max_sleep_period; __entry->listen_interval = local->hw.conf.listen_interval; __entry->long_frame_max_tx_count = local->hw.conf.long_frame_max_tx_count; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3478a83187e5..bdc224d5053a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -757,9 +757,9 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) if (txrc.reported_rate.idx < 0) { txrc.reported_rate = tx->rate; if (tx->sta && ieee80211_is_data(hdr->frame_control)) - tx->sta->last_tx_rate = txrc.reported_rate; + tx->sta->tx_stats.last_rate = txrc.reported_rate; } else if (tx->sta) - tx->sta->last_tx_rate = txrc.reported_rate; + tx->sta->tx_stats.last_rate = txrc.reported_rate; if (ratetbl) return TX_CONTINUE; @@ -824,7 +824,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); tx->sdata->sequence_number += 0x10; if (tx->sta) - tx->sta->tx_msdu[IEEE80211_NUM_TIDS]++; + tx->sta->tx_stats.msdu[IEEE80211_NUM_TIDS]++; return TX_CONTINUE; } @@ -840,7 +840,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) qc = ieee80211_get_qos_ctl(hdr); tid = *qc & IEEE80211_QOS_CTL_TID_MASK; - tx->sta->tx_msdu[tid]++; + tx->sta->tx_stats.msdu[tid]++; if (!tx->sta->sta.txq[0]) hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid); @@ -994,10 +994,10 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) skb_queue_walk(&tx->skbs, skb) { ac = skb_get_queue_mapping(skb); - tx->sta->tx_bytes[ac] += skb->len; + tx->sta->tx_stats.bytes[ac] += skb->len; } if (ac >= 0) - tx->sta->tx_packets[ac]++; + tx->sta->tx_stats.packets[ac]++; return TX_CONTINUE; } @@ -2779,10 +2779,10 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, } if (skb_shinfo(skb)->gso_size) - sta->tx_msdu[tid] += + sta->tx_stats.msdu[tid] += DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size); else - sta->tx_msdu[tid]++; + sta->tx_stats.msdu[tid]++; info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; @@ -2813,8 +2813,8 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, /* statistics normally done by ieee80211_tx_h_stats (but that * has to consider fragmentation, so is more complex) */ - sta->tx_bytes[skb_get_queue_mapping(skb)] += skb->len; - sta->tx_packets[skb_get_queue_mapping(skb)]++; + sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len; + sta->tx_stats.packets[skb_get_queue_mapping(skb)]++; if (fast_tx->pn_offs) { u64 pn; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 60c4dbf92625..8274c86296f9 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1951,7 +1951,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) } } - ieee80211_recalc_ps(local, -1); + ieee80211_recalc_ps(local); /* * The sta might be in psm against the ap (e.g. because @@ -2042,9 +2042,13 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (sched_scan_sdata && sched_scan_req) /* * Sched scan stopped, but we don't want to report it. Instead, - * we're trying to reschedule. + * we're trying to reschedule. However, if more than one scan + * plan was set, we cannot reschedule since we don't know which + * scan plan was currently running (and some scan plans may have + * already finished). */ - if (__ieee80211_request_sched_scan_start(sched_scan_sdata, + if (sched_scan_req->n_scan_plans > 1 || + __ieee80211_request_sched_scan_start(sched_scan_sdata, sched_scan_req)) sched_scan_stopped = true; mutex_unlock(&local->mtx); @@ -3301,9 +3305,11 @@ void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata, if (sta) { txqi->txq.sta = &sta->sta; sta->sta.txq[tid] = &txqi->txq; + txqi->txq.tid = tid; txqi->txq.ac = ieee802_1d_to_ac[tid & 7]; } else { sdata->vif.txq = &txqi->txq; + txqi->txq.tid = 0; txqi->txq.ac = IEEE80211_AC_BE; } } diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index feb547dc8643..d824c38971ed 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -174,9 +174,12 @@ mic_fail_no_key: * a driver that supports HW encryption. Send up the key idx only if * the key is set. */ - mac80211_ev_michael_mic_failure(rx->sdata, - rx->key ? rx->key->conf.keyidx : -1, - (void *) skb->data, NULL, GFP_ATOMIC); + cfg80211_michael_mic_failure(rx->sdata->dev, hdr->addr2, + is_multicast_ether_addr(hdr->addr1) ? + NL80211_KEYTYPE_GROUP : + NL80211_KEYTYPE_PAIRWISE, + rx->key ? rx->key->conf.keyidx : -1, + NULL, GFP_ATOMIC); return RX_DROP_UNUSABLE; } diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index bb185a28de98..cc972e30355b 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -19,36 +19,13 @@ #include <net/ipv6.h> #include <net/addrconf.h> #endif +#include <net/nexthop.h> #include "internal.h" -#define LABEL_NOT_SPECIFIED (1<<20) -#define MAX_NEW_LABELS 2 - -/* This maximum ha length copied from the definition of struct neighbour */ -#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))) - -enum mpls_payload_type { - MPT_UNSPEC, /* IPv4 or IPv6 */ - MPT_IPV4 = 4, - MPT_IPV6 = 6, - - /* Other types not implemented: - * - Pseudo-wire with or without control word (RFC4385) - * - GAL (RFC5586) - */ -}; - -struct mpls_route { /* next hop label forwarding entry */ - struct net_device __rcu *rt_dev; - struct rcu_head rt_rcu; - u32 rt_label[MAX_NEW_LABELS]; - u8 rt_protocol; /* routing protocol that set this entry */ - u8 rt_payload_type; - u8 rt_labels; - u8 rt_via_alen; - u8 rt_via_table; - u8 rt_via[0]; -}; +/* Maximum number of labels to look ahead at when selecting a path of + * a multipath route + */ +#define MAX_MP_SELECT_LABELS 4 static int zero = 0; static int label_limit = (1 << 20) - 1; @@ -80,10 +57,10 @@ bool mpls_output_possible(const struct net_device *dev) } EXPORT_SYMBOL_GPL(mpls_output_possible); -static unsigned int mpls_rt_header_size(const struct mpls_route *rt) +static unsigned int mpls_nh_header_size(const struct mpls_nh *nh) { /* The size of the layer 2.5 labels to be added for this route */ - return rt->rt_labels * sizeof(struct mpls_shim_hdr); + return nh->nh_labels * sizeof(struct mpls_shim_hdr); } unsigned int mpls_dev_mtu(const struct net_device *dev) @@ -105,6 +82,80 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) } EXPORT_SYMBOL_GPL(mpls_pkt_too_big); +static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, + struct sk_buff *skb, bool bos) +{ + struct mpls_entry_decoded dec; + struct mpls_shim_hdr *hdr; + bool eli_seen = false; + int label_index; + int nh_index = 0; + u32 hash = 0; + + /* No need to look further into packet if there's only + * one path + */ + if (rt->rt_nhn == 1) + goto out; + + for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos; + label_index++) { + if (!pskb_may_pull(skb, sizeof(*hdr) * label_index)) + break; + + /* Read and decode the current label */ + hdr = mpls_hdr(skb) + label_index; + dec = mpls_entry_decode(hdr); + + /* RFC6790 - reserved labels MUST NOT be used as keys + * for the load-balancing function + */ + if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) { + hash = jhash_1word(dec.label, hash); + + /* The entropy label follows the entropy label + * indicator, so this means that the entropy + * label was just added to the hash - no need to + * go any deeper either in the label stack or in the + * payload + */ + if (eli_seen) + break; + } else if (dec.label == MPLS_LABEL_ENTROPY) { + eli_seen = true; + } + + bos = dec.bos; + if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index + + sizeof(struct iphdr))) { + const struct iphdr *v4hdr; + + v4hdr = (const struct iphdr *)(mpls_hdr(skb) + + label_index); + if (v4hdr->version == 4) { + hash = jhash_3words(ntohl(v4hdr->saddr), + ntohl(v4hdr->daddr), + v4hdr->protocol, hash); + } else if (v4hdr->version == 6 && + pskb_may_pull(skb, sizeof(*hdr) * label_index + + sizeof(struct ipv6hdr))) { + const struct ipv6hdr *v6hdr; + + v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) + + label_index); + + hash = __ipv6_addr_jhash(&v6hdr->saddr, hash); + hash = __ipv6_addr_jhash(&v6hdr->daddr, hash); + hash = jhash_1word(v6hdr->nexthdr, hash); + } + } + } + + nh_index = hash % rt->rt_nhn; +out: + return &rt->rt_nh[nh_index]; +} + static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb, struct mpls_entry_decoded dec) { @@ -159,6 +210,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, struct net *net = dev_net(dev); struct mpls_shim_hdr *hdr; struct mpls_route *rt; + struct mpls_nh *nh; struct mpls_entry_decoded dec; struct net_device *out_dev; struct mpls_dev *mdev; @@ -196,8 +248,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, if (!rt) goto drop; + nh = mpls_select_multipath(rt, skb, dec.bos); + if (!nh) + goto drop; + /* Find the output device */ - out_dev = rcu_dereference(rt->rt_dev); + out_dev = rcu_dereference(nh->nh_dev); if (!mpls_output_possible(out_dev)) goto drop; @@ -212,7 +268,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, dec.ttl -= 1; /* Verify the destination can hold the packet */ - new_header_size = mpls_rt_header_size(rt); + new_header_size = mpls_nh_header_size(nh); mtu = mpls_dev_mtu(out_dev); if (mpls_pkt_too_big(skb, mtu - new_header_size)) goto drop; @@ -240,13 +296,14 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, /* Push the new labels */ hdr = mpls_hdr(skb); bos = dec.bos; - for (i = rt->rt_labels - 1; i >= 0; i--) { - hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos); + for (i = nh->nh_labels - 1; i >= 0; i--) { + hdr[i] = mpls_entry_encode(nh->nh_label[i], + dec.ttl, 0, bos); bos = false; } } - err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb); + err = neigh_xmit(nh->nh_via_table, out_dev, nh->nh_via, skb); if (err) net_dbg_ratelimited("%s: packet transmission failed: %d\n", __func__, err); @@ -270,24 +327,28 @@ static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = { struct mpls_route_config { u32 rc_protocol; u32 rc_ifindex; - u16 rc_via_table; - u16 rc_via_alen; + u8 rc_via_table; + u8 rc_via_alen; u8 rc_via[MAX_VIA_ALEN]; u32 rc_label; - u32 rc_output_labels; + u8 rc_output_labels; u32 rc_output_label[MAX_NEW_LABELS]; u32 rc_nlflags; enum mpls_payload_type rc_payload_type; struct nl_info rc_nlinfo; + struct rtnexthop *rc_mp; + int rc_mp_len; }; -static struct mpls_route *mpls_rt_alloc(size_t alen) +static struct mpls_route *mpls_rt_alloc(int num_nh) { struct mpls_route *rt; - rt = kzalloc(sizeof(*rt) + alen, GFP_KERNEL); + rt = kzalloc(sizeof(*rt) + (num_nh * sizeof(struct mpls_nh)), + GFP_KERNEL); if (rt) - rt->rt_via_alen = alen; + rt->rt_nhn = num_nh; + return rt; } @@ -312,25 +373,22 @@ static void mpls_notify_route(struct net *net, unsigned index, } static void mpls_route_update(struct net *net, unsigned index, - struct net_device *dev, struct mpls_route *new, + struct mpls_route *new, const struct nl_info *info) { struct mpls_route __rcu **platform_label; - struct mpls_route *rt, *old = NULL; + struct mpls_route *rt; ASSERT_RTNL(); platform_label = rtnl_dereference(net->mpls.platform_label); rt = rtnl_dereference(platform_label[index]); - if (!dev || (rt && (rtnl_dereference(rt->rt_dev) == dev))) { - rcu_assign_pointer(platform_label[index], new); - old = rt; - } + rcu_assign_pointer(platform_label[index], new); - mpls_notify_route(net, index, old, new, info); + mpls_notify_route(net, index, rt, new, info); /* If we removed a route free it now */ - mpls_rt_free(old); + mpls_rt_free(rt); } static unsigned find_free_label(struct net *net) @@ -406,40 +464,199 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr) #endif static struct net_device *find_outdev(struct net *net, - struct mpls_route_config *cfg) + struct mpls_nh *nh, int oif) { struct net_device *dev = NULL; - if (!cfg->rc_ifindex) { - switch (cfg->rc_via_table) { + if (!oif) { + switch (nh->nh_via_table) { case NEIGH_ARP_TABLE: - dev = inet_fib_lookup_dev(net, cfg->rc_via); + dev = inet_fib_lookup_dev(net, nh->nh_via); break; case NEIGH_ND_TABLE: - dev = inet6_fib_lookup_dev(net, cfg->rc_via); + dev = inet6_fib_lookup_dev(net, nh->nh_via); break; case NEIGH_LINK_TABLE: break; } } else { - dev = dev_get_by_index(net, cfg->rc_ifindex); + dev = dev_get_by_index(net, oif); } if (!dev) return ERR_PTR(-ENODEV); + /* The caller is holding rtnl anyways, so release the dev reference */ + dev_put(dev); + return dev; } +static int mpls_nh_assign_dev(struct net *net, struct mpls_nh *nh, int oif) +{ + struct net_device *dev = NULL; + int err = -ENODEV; + + dev = find_outdev(net, nh, oif); + if (IS_ERR(dev)) { + err = PTR_ERR(dev); + dev = NULL; + goto errout; + } + + /* Ensure this is a supported device */ + err = -EINVAL; + if (!mpls_dev_get(dev)) + goto errout; + + RCU_INIT_POINTER(nh->nh_dev, dev); + + return 0; + +errout: + return err; +} + +static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg, + struct mpls_route *rt) +{ + struct net *net = cfg->rc_nlinfo.nl_net; + struct mpls_nh *nh = rt->rt_nh; + int err; + int i; + + if (!nh) + return -ENOMEM; + + err = -EINVAL; + /* Ensure only a supported number of labels are present */ + if (cfg->rc_output_labels > MAX_NEW_LABELS) + goto errout; + + nh->nh_labels = cfg->rc_output_labels; + for (i = 0; i < nh->nh_labels; i++) + nh->nh_label[i] = cfg->rc_output_label[i]; + + nh->nh_via_table = cfg->rc_via_table; + memcpy(nh->nh_via, cfg->rc_via, cfg->rc_via_alen); + nh->nh_via_alen = cfg->rc_via_alen; + + err = mpls_nh_assign_dev(net, nh, cfg->rc_ifindex); + if (err) + goto errout; + + return 0; + +errout: + return err; +} + +static int mpls_nh_build(struct net *net, struct mpls_nh *nh, + int oif, struct nlattr *via, struct nlattr *newdst) +{ + int err = -ENOMEM; + + if (!nh) + goto errout; + + if (newdst) { + err = nla_get_labels(newdst, MAX_NEW_LABELS, + &nh->nh_labels, nh->nh_label); + if (err) + goto errout; + } + + err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table, + nh->nh_via); + if (err) + goto errout; + + err = mpls_nh_assign_dev(net, nh, oif); + if (err) + goto errout; + + return 0; + +errout: + return err; +} + +static int mpls_count_nexthops(struct rtnexthop *rtnh, int len) +{ + int nhs = 0; + int remaining = len; + + while (rtnh_ok(rtnh, remaining)) { + nhs++; + rtnh = rtnh_next(rtnh, &remaining); + } + + /* leftover implies invalid nexthop configuration, discard it */ + return remaining > 0 ? 0 : nhs; +} + +static int mpls_nh_build_multi(struct mpls_route_config *cfg, + struct mpls_route *rt) +{ + struct rtnexthop *rtnh = cfg->rc_mp; + struct nlattr *nla_via, *nla_newdst; + int remaining = cfg->rc_mp_len; + int nhs = 0; + int err = 0; + + change_nexthops(rt) { + int attrlen; + + nla_via = NULL; + nla_newdst = NULL; + + err = -EINVAL; + if (!rtnh_ok(rtnh, remaining)) + goto errout; + + /* neither weighted multipath nor any flags + * are supported + */ + if (rtnh->rtnh_hops || rtnh->rtnh_flags) + goto errout; + + attrlen = rtnh_attrlen(rtnh); + if (attrlen > 0) { + struct nlattr *attrs = rtnh_attrs(rtnh); + + nla_via = nla_find(attrs, attrlen, RTA_VIA); + nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST); + } + + if (!nla_via) + goto errout; + + err = mpls_nh_build(cfg->rc_nlinfo.nl_net, nh, + rtnh->rtnh_ifindex, nla_via, + nla_newdst); + if (err) + goto errout; + + rtnh = rtnh_next(rtnh, &remaining); + nhs++; + } endfor_nexthops(rt); + + rt->rt_nhn = nhs; + + return 0; + +errout: + return err; +} + static int mpls_route_add(struct mpls_route_config *cfg) { struct mpls_route __rcu **platform_label; struct net *net = cfg->rc_nlinfo.nl_net; - struct net_device *dev = NULL; struct mpls_route *rt, *old; - unsigned index; - int i; int err = -EINVAL; + unsigned index; + int nhs = 1; /* default to one nexthop */ index = cfg->rc_label; @@ -457,27 +674,6 @@ static int mpls_route_add(struct mpls_route_config *cfg) if (index >= net->mpls.platform_labels) goto errout; - /* Ensure only a supported number of labels are present */ - if (cfg->rc_output_labels > MAX_NEW_LABELS) - goto errout; - - dev = find_outdev(net, cfg); - if (IS_ERR(dev)) { - err = PTR_ERR(dev); - dev = NULL; - goto errout; - } - - /* Ensure this is a supported device */ - err = -EINVAL; - if (!mpls_dev_get(dev)) - goto errout; - - err = -EINVAL; - if ((cfg->rc_via_table == NEIGH_LINK_TABLE) && - (dev->addr_len != cfg->rc_via_alen)) - goto errout; - /* Append makes no sense with mpls */ err = -EOPNOTSUPP; if (cfg->rc_nlflags & NLM_F_APPEND) @@ -497,28 +693,35 @@ static int mpls_route_add(struct mpls_route_config *cfg) if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old) goto errout; + if (cfg->rc_mp) { + err = -EINVAL; + nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len); + if (nhs == 0) + goto errout; + } + err = -ENOMEM; - rt = mpls_rt_alloc(cfg->rc_via_alen); + rt = mpls_rt_alloc(nhs); if (!rt) goto errout; - rt->rt_labels = cfg->rc_output_labels; - for (i = 0; i < rt->rt_labels; i++) - rt->rt_label[i] = cfg->rc_output_label[i]; rt->rt_protocol = cfg->rc_protocol; - RCU_INIT_POINTER(rt->rt_dev, dev); rt->rt_payload_type = cfg->rc_payload_type; - rt->rt_via_table = cfg->rc_via_table; - memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen); - mpls_route_update(net, index, NULL, rt, &cfg->rc_nlinfo); + if (cfg->rc_mp) + err = mpls_nh_build_multi(cfg, rt); + else + err = mpls_nh_build_from_cfg(cfg, rt); + if (err) + goto freert; + + mpls_route_update(net, index, rt, &cfg->rc_nlinfo); - dev_put(dev); return 0; +freert: + mpls_rt_free(rt); errout: - if (dev) - dev_put(dev); return err; } @@ -538,7 +741,7 @@ static int mpls_route_del(struct mpls_route_config *cfg) if (index >= net->mpls.platform_labels) goto errout; - mpls_route_update(net, index, NULL, NULL, &cfg->rc_nlinfo); + mpls_route_update(net, index, NULL, &cfg->rc_nlinfo); err = 0; errout: @@ -635,9 +838,11 @@ static void mpls_ifdown(struct net_device *dev) struct mpls_route *rt = rtnl_dereference(platform_label[index]); if (!rt) continue; - if (rtnl_dereference(rt->rt_dev) != dev) - continue; - rt->rt_dev = NULL; + for_nexthops(rt) { + if (rtnl_dereference(nh->nh_dev) != dev) + continue; + nh->nh_dev = NULL; + } endfor_nexthops(rt); } mdev = mpls_dev_get(dev); @@ -736,7 +941,7 @@ int nla_put_labels(struct sk_buff *skb, int attrtype, EXPORT_SYMBOL_GPL(nla_put_labels); int nla_get_labels(const struct nlattr *nla, - u32 max_labels, u32 *labels, u32 label[]) + u32 max_labels, u8 *labels, u32 label[]) { unsigned len = nla_len(nla); unsigned nla_labels; @@ -781,6 +986,48 @@ int nla_get_labels(const struct nlattr *nla, } EXPORT_SYMBOL_GPL(nla_get_labels); +int nla_get_via(const struct nlattr *nla, u8 *via_alen, + u8 *via_table, u8 via_addr[]) +{ + struct rtvia *via = nla_data(nla); + int err = -EINVAL; + int alen; + + if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr)) + goto errout; + alen = nla_len(nla) - + offsetof(struct rtvia, rtvia_addr); + if (alen > MAX_VIA_ALEN) + goto errout; + + /* Validate the address family */ + switch (via->rtvia_family) { + case AF_PACKET: + *via_table = NEIGH_LINK_TABLE; + break; + case AF_INET: + *via_table = NEIGH_ARP_TABLE; + if (alen != 4) + goto errout; + break; + case AF_INET6: + *via_table = NEIGH_ND_TABLE; + if (alen != 16) + goto errout; + break; + default: + /* Unsupported address family */ + goto errout; + } + + memcpy(via_addr, via->rtvia_addr, alen); + *via_alen = alen; + err = 0; + +errout: + return err; +} + static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh, struct mpls_route_config *cfg) { @@ -844,7 +1091,7 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh, break; case RTA_DST: { - u32 label_count; + u8 label_count; if (nla_get_labels(nla, 1, &label_count, &cfg->rc_label)) goto errout; @@ -857,35 +1104,15 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh, } case RTA_VIA: { - struct rtvia *via = nla_data(nla); - if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr)) + if (nla_get_via(nla, &cfg->rc_via_alen, + &cfg->rc_via_table, cfg->rc_via)) goto errout; - cfg->rc_via_alen = nla_len(nla) - - offsetof(struct rtvia, rtvia_addr); - if (cfg->rc_via_alen > MAX_VIA_ALEN) - goto errout; - - /* Validate the address family */ - switch(via->rtvia_family) { - case AF_PACKET: - cfg->rc_via_table = NEIGH_LINK_TABLE; - break; - case AF_INET: - cfg->rc_via_table = NEIGH_ARP_TABLE; - if (cfg->rc_via_alen != 4) - goto errout; - break; - case AF_INET6: - cfg->rc_via_table = NEIGH_ND_TABLE; - if (cfg->rc_via_alen != 16) - goto errout; - break; - default: - /* Unsupported address family */ - goto errout; - } - - memcpy(cfg->rc_via, via->rtvia_addr, cfg->rc_via_alen); + break; + } + case RTA_MULTIPATH: + { + cfg->rc_mp = nla_data(nla); + cfg->rc_mp_len = nla_len(nla); break; } default: @@ -946,16 +1173,52 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; - if (rt->rt_labels && - nla_put_labels(skb, RTA_NEWDST, rt->rt_labels, rt->rt_label)) - goto nla_put_failure; - if (nla_put_via(skb, rt->rt_via_table, rt->rt_via, rt->rt_via_alen)) - goto nla_put_failure; - dev = rtnl_dereference(rt->rt_dev); - if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex)) - goto nla_put_failure; if (nla_put_labels(skb, RTA_DST, 1, &label)) goto nla_put_failure; + if (rt->rt_nhn == 1) { + struct mpls_nh *nh = rt->rt_nh; + + if (nh->nh_labels && + nla_put_labels(skb, RTA_NEWDST, nh->nh_labels, + nh->nh_label)) + goto nla_put_failure; + if (nla_put_via(skb, nh->nh_via_table, nh->nh_via, + nh->nh_via_alen)) + goto nla_put_failure; + dev = rtnl_dereference(nh->nh_dev); + if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex)) + goto nla_put_failure; + } else { + struct rtnexthop *rtnh; + struct nlattr *mp; + + mp = nla_nest_start(skb, RTA_MULTIPATH); + if (!mp) + goto nla_put_failure; + + for_nexthops(rt) { + rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); + if (!rtnh) + goto nla_put_failure; + + dev = rtnl_dereference(nh->nh_dev); + if (dev) + rtnh->rtnh_ifindex = dev->ifindex; + if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST, + nh->nh_labels, + nh->nh_label)) + goto nla_put_failure; + if (nla_put_via(skb, nh->nh_via_table, + nh->nh_via, + nh->nh_via_alen)) + goto nla_put_failure; + + /* length of rtnetlink header + attributes */ + rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh; + } endfor_nexthops(rt); + + nla_nest_end(skb, mp); + } nlmsg_end(skb, nlh); return 0; @@ -1000,12 +1263,30 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt) { size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) - + nla_total_size(2 + rt->rt_via_alen) /* RTA_VIA */ + nla_total_size(4); /* RTA_DST */ - if (rt->rt_labels) /* RTA_NEWDST */ - payload += nla_total_size(rt->rt_labels * 4); - if (rt->rt_dev) /* RTA_OIF */ - payload += nla_total_size(4); + + if (rt->rt_nhn == 1) { + struct mpls_nh *nh = rt->rt_nh; + + if (nh->nh_dev) + payload += nla_total_size(4); /* RTA_OIF */ + payload += nla_total_size(2 + nh->nh_via_alen); /* RTA_VIA */ + if (nh->nh_labels) /* RTA_NEWDST */ + payload += nla_total_size(nh->nh_labels * 4); + } else { + /* each nexthop is packed in an attribute */ + size_t nhsize = 0; + + for_nexthops(rt) { + nhsize += nla_total_size(sizeof(struct rtnexthop)); + nhsize += nla_total_size(2 + nh->nh_via_alen); + if (nh->nh_labels) + nhsize += nla_total_size(nh->nh_labels * 4); + } endfor_nexthops(rt); + /* nested attribute */ + payload += nla_total_size(nhsize); + } + return payload; } @@ -1057,25 +1338,25 @@ static int resize_platform_label_table(struct net *net, size_t limit) /* In case the predefined labels need to be populated */ if (limit > MPLS_LABEL_IPV4NULL) { struct net_device *lo = net->loopback_dev; - rt0 = mpls_rt_alloc(lo->addr_len); + rt0 = mpls_rt_alloc(1); if (!rt0) goto nort0; - RCU_INIT_POINTER(rt0->rt_dev, lo); + RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo); rt0->rt_protocol = RTPROT_KERNEL; rt0->rt_payload_type = MPT_IPV4; - rt0->rt_via_table = NEIGH_LINK_TABLE; - memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len); + rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE; + memcpy(rt0->rt_nh->nh_via, lo->dev_addr, lo->addr_len); } if (limit > MPLS_LABEL_IPV6NULL) { struct net_device *lo = net->loopback_dev; - rt2 = mpls_rt_alloc(lo->addr_len); + rt2 = mpls_rt_alloc(1); if (!rt2) goto nort2; - RCU_INIT_POINTER(rt2->rt_dev, lo); + RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo); rt2->rt_protocol = RTPROT_KERNEL; rt2->rt_payload_type = MPT_IPV6; - rt2->rt_via_table = NEIGH_LINK_TABLE; - memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len); + rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE; + memcpy(rt2->rt_nh->nh_via, lo->dev_addr, lo->addr_len); } rtnl_lock(); @@ -1085,7 +1366,7 @@ static int resize_platform_label_table(struct net *net, size_t limit) /* Free any labels beyond the new table */ for (index = limit; index < old_limit; index++) - mpls_route_update(net, index, NULL, NULL, NULL); + mpls_route_update(net, index, NULL, NULL); /* Copy over the old labels */ cp_size = size; diff --git a/net/mpls/internal.h b/net/mpls/internal.h index 2681a4ba6c37..d7757be39877 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -21,6 +21,54 @@ struct mpls_dev { struct sk_buff; +#define LABEL_NOT_SPECIFIED (1 << 20) +#define MAX_NEW_LABELS 2 + +/* This maximum ha length copied from the definition of struct neighbour */ +#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))) + +enum mpls_payload_type { + MPT_UNSPEC, /* IPv4 or IPv6 */ + MPT_IPV4 = 4, + MPT_IPV6 = 6, + + /* Other types not implemented: + * - Pseudo-wire with or without control word (RFC4385) + * - GAL (RFC5586) + */ +}; + +struct mpls_nh { /* next hop label forwarding entry */ + struct net_device __rcu *nh_dev; + u32 nh_label[MAX_NEW_LABELS]; + u8 nh_labels; + u8 nh_via_alen; + u8 nh_via_table; + u8 nh_via[MAX_VIA_ALEN]; +}; + +struct mpls_route { /* next hop label forwarding entry */ + struct rcu_head rt_rcu; + u8 rt_protocol; + u8 rt_payload_type; + int rt_nhn; + struct mpls_nh rt_nh[0]; +}; + +#define for_nexthops(rt) { \ + int nhsel; struct mpls_nh *nh; \ + for (nhsel = 0, nh = (rt)->rt_nh; \ + nhsel < (rt)->rt_nhn; \ + nh++, nhsel++) + +#define change_nexthops(rt) { \ + int nhsel; struct mpls_nh *nh; \ + for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh); \ + nhsel < (rt)->rt_nhn; \ + nh++, nhsel++) + +#define endfor_nexthops(rt) } + static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb) { return (struct mpls_shim_hdr *)skb_network_header(skb); @@ -52,8 +100,10 @@ static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr * int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels, const u32 label[]); -int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels, +int nla_get_labels(const struct nlattr *nla, u32 max_labels, u8 *labels, u32 label[]); +int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table, + u8 via[]); bool mpls_output_possible(const struct net_device *dev); unsigned int mpls_dev_mtu(const struct net_device *dev); bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu); diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 6799c8d470c6..80e1f09397c0 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -548,11 +548,11 @@ static int ip_tun_from_nlattr(const struct nlattr *attr, struct sw_flow_match *match, bool is_mask, bool log) { + bool ttl = false, ipv4 = false, ipv6 = false; + __be16 tun_flags = 0; + int opts_type = 0; struct nlattr *a; int rem; - bool ttl = false; - __be16 tun_flags = 0, ipv4 = false, ipv6 = false; - int opts_type = 0; nla_for_each_nested(a, attr, rem) { int type = nla_type(a); diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c index 2735e9c4a3b8..7a568ca8da54 100644 --- a/net/openvswitch/vport-geneve.c +++ b/net/openvswitch/vport-geneve.c @@ -128,7 +128,7 @@ static struct vport_ops ovs_geneve_vport_ops = { .create = geneve_create, .destroy = ovs_netdev_tunnel_destroy, .get_options = geneve_get_options, - .send = ovs_netdev_send, + .send = dev_queue_xmit, .owner = THIS_MODULE, .get_egress_tun_info = geneve_get_egress_tun_info, }; diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index 4d24481669c9..cdb758ab01cf 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -94,7 +94,7 @@ static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, static struct vport_ops ovs_gre_vport_ops = { .type = OVS_VPORT_TYPE_GRE, .create = gre_create, - .send = ovs_netdev_send, + .send = dev_queue_xmit, .get_egress_tun_info = gre_get_egress_tun_info, .destroy = ovs_netdev_tunnel_destroy, .owner = THIS_MODULE, diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 388b8a6bf112..7f0a8bd08857 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -202,22 +202,21 @@ static void internal_dev_destroy(struct vport *vport) rtnl_unlock(); } -static void internal_dev_recv(struct vport *vport, struct sk_buff *skb) +static netdev_tx_t internal_dev_recv(struct sk_buff *skb) { - struct net_device *netdev = vport->dev; + struct net_device *netdev = skb->dev; struct pcpu_sw_netstats *stats; if (unlikely(!(netdev->flags & IFF_UP))) { kfree_skb(skb); netdev->stats.rx_dropped++; - return; + return NETDEV_TX_OK; } skb_dst_drop(skb); nf_reset(skb); secpath_reset(skb); - skb->dev = netdev; skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, netdev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); @@ -229,6 +228,7 @@ static void internal_dev_recv(struct vport *vport, struct sk_buff *skb) u64_stats_update_end(&stats->syncp); netif_rx(skb); + return NETDEV_TX_OK; } static struct vport_ops ovs_internal_vport_ops = { diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index f7e8dcce7ada..b327368a3848 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -190,37 +190,6 @@ void ovs_netdev_tunnel_destroy(struct vport *vport) } EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy); -static unsigned int packet_length(const struct sk_buff *skb) -{ - unsigned int length = skb->len - ETH_HLEN; - - if (skb->protocol == htons(ETH_P_8021Q)) - length -= VLAN_HLEN; - - return length; -} - -void ovs_netdev_send(struct vport *vport, struct sk_buff *skb) -{ - int mtu = vport->dev->mtu; - - if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { - net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n", - vport->dev->name, - packet_length(skb), mtu); - vport->dev->stats.tx_errors++; - goto drop; - } - - skb->dev = vport->dev; - dev_queue_xmit(skb); - return; - -drop: - kfree_skb(skb); -} -EXPORT_SYMBOL_GPL(ovs_netdev_send); - /* Returns null if this device is not attached to a datapath. */ struct vport *ovs_netdev_get_vport(struct net_device *dev) { @@ -235,7 +204,7 @@ static struct vport_ops ovs_netdev_vport_ops = { .type = OVS_VPORT_TYPE_NETDEV, .create = netdev_create, .destroy = netdev_destroy, - .send = ovs_netdev_send, + .send = dev_queue_xmit, }; int __init ovs_netdev_init(void) diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h index bf22fcedbc69..19e29c12adcc 100644 --- a/net/openvswitch/vport-netdev.h +++ b/net/openvswitch/vport-netdev.h @@ -27,7 +27,6 @@ struct vport *ovs_netdev_get_vport(struct net_device *dev); struct vport *ovs_netdev_link(struct vport *vport, const char *name); -void ovs_netdev_send(struct vport *vport, struct sk_buff *skb); void ovs_netdev_detach_dev(struct vport *); int __init ovs_netdev_init(void); diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index fb3cdb85905d..6f700710d413 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -170,7 +170,7 @@ static struct vport_ops ovs_vxlan_netdev_vport_ops = { .create = vxlan_create, .destroy = ovs_netdev_tunnel_destroy, .get_options = vxlan_get_options, - .send = ovs_netdev_send, + .send = dev_queue_xmit, .get_egress_tun_info = vxlan_get_egress_tun_info, }; diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 12a36ac21eda..ef19d0b77d13 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -537,3 +537,33 @@ int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, return vport->ops->get_egress_tun_info(vport, skb, upcall); } + +static unsigned int packet_length(const struct sk_buff *skb) +{ + unsigned int length = skb->len - ETH_HLEN; + + if (skb->protocol == htons(ETH_P_8021Q)) + length -= VLAN_HLEN; + + return length; +} + +void ovs_vport_send(struct vport *vport, struct sk_buff *skb) +{ + int mtu = vport->dev->mtu; + + if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { + net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n", + vport->dev->name, + packet_length(skb), mtu); + vport->dev->stats.tx_errors++; + goto drop; + } + + skb->dev = vport->dev; + vport->ops->send(skb); + return; + +drop: + kfree_skb(skb); +} diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index a413f3ae6a7b..885607f28d56 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -153,7 +153,7 @@ struct vport_ops { int (*set_options)(struct vport *, struct nlattr *); int (*get_options)(const struct vport *, struct sk_buff *); - void (*send)(struct vport *, struct sk_buff *); + netdev_tx_t (*send) (struct sk_buff *skb); int (*get_egress_tun_info)(struct vport *, struct sk_buff *, struct dp_upcall_info *upcall); @@ -234,9 +234,6 @@ static inline struct rtable *ovs_tunnel_route_lookup(struct net *net, return rt; } -static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb) -{ - vport->ops->send(vport, skb); -} +void ovs_vport_send(struct vport *vport, struct sk_buff *skb); #endif /* vport.h */ diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 4f5543dd2524..da72ed32f143 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -174,6 +174,16 @@ config CFG80211_INTERNAL_REGDB Most distributions have a CRDA package. So if unsure, say N. +config CFG80211_CRDA_SUPPORT + bool "support CRDA" if CFG80211_INTERNAL_REGDB + default y + depends on CFG80211 + help + You should enable this option unless you know for sure you have no + need for it, for example when using internal regdb (above.) + + If unsure, say Y. + config CFG80211_WEXT bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT depends on CFG80211 diff --git a/net/wireless/core.c b/net/wireless/core.c index f223026ddb03..b0915515640e 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -461,6 +461,9 @@ use_default_name: rdev->wiphy.max_num_csa_counters = 1; + rdev->wiphy.max_sched_scan_plans = 1; + rdev->wiphy.max_sched_scan_plan_interval = U32_MAX; + return &rdev->wiphy; } EXPORT_SYMBOL(wiphy_new_nm); @@ -636,7 +639,7 @@ int wiphy_register(struct wiphy *wiphy) if (WARN_ON(!sband->n_channels)) return -EINVAL; /* - * on 60gHz band, there are no legacy rates, so + * on 60GHz band, there are no legacy rates, so * n_bitrates is 0 */ if (WARN_ON(band != IEEE80211_BAND_60GHZ && diff --git a/net/wireless/core.h b/net/wireless/core.h index b9d5bc8c148d..a618b4b86fa4 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -137,6 +137,7 @@ struct cfg80211_internal_bss { struct list_head list; struct list_head hidden_list; struct rb_node rbn; + u64 ts_boottime; unsigned long ts; unsigned long refcount; atomic_t hold; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index f05ba8b7af61..d693c9d031fc 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -479,6 +479,12 @@ nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = { [NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 }, }; +static const struct nla_policy +nl80211_plan_policy[NL80211_SCHED_SCAN_PLAN_MAX + 1] = { + [NL80211_SCHED_SCAN_PLAN_INTERVAL] = { .type = NLA_U32 }, + [NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 }, +}; + static int nl80211_prepare_wdev_dump(struct sk_buff *skb, struct netlink_callback *cb, struct cfg80211_registered_device **rdev, @@ -1304,7 +1310,13 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, rdev->wiphy.max_sched_scan_ie_len) || nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS, - rdev->wiphy.max_match_sets)) + rdev->wiphy.max_match_sets) || + nla_put_u32(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS, + rdev->wiphy.max_sched_scan_plans) || + nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL, + rdev->wiphy.max_sched_scan_plan_interval) || + nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS, + rdev->wiphy.max_sched_scan_plan_iterations)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) && @@ -4932,56 +4944,6 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) return err; } -static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = { - [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 }, - [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 }, - [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 }, - [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 }, - [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 }, - [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 }, - [NL80211_ATTR_DFS_CAC_TIME] = { .type = NLA_U32 }, -}; - -static int parse_reg_rule(struct nlattr *tb[], - struct ieee80211_reg_rule *reg_rule) -{ - struct ieee80211_freq_range *freq_range = ®_rule->freq_range; - struct ieee80211_power_rule *power_rule = ®_rule->power_rule; - - if (!tb[NL80211_ATTR_REG_RULE_FLAGS]) - return -EINVAL; - if (!tb[NL80211_ATTR_FREQ_RANGE_START]) - return -EINVAL; - if (!tb[NL80211_ATTR_FREQ_RANGE_END]) - return -EINVAL; - if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]) - return -EINVAL; - if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]) - return -EINVAL; - - reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]); - - freq_range->start_freq_khz = - nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]); - freq_range->end_freq_khz = - nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]); - freq_range->max_bandwidth_khz = - nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]); - - power_rule->max_eirp = - nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]); - - if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]) - power_rule->max_antenna_gain = - nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]); - - if (tb[NL80211_ATTR_DFS_CAC_TIME]) - reg_rule->dfs_cac_ms = - nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]); - - return 0; -} - static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info) { char *data = NULL; @@ -5613,6 +5575,57 @@ out_err: return err; } +#ifdef CONFIG_CFG80211_CRDA_SUPPORT +static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = { + [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 }, + [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 }, + [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 }, + [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 }, + [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 }, + [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 }, + [NL80211_ATTR_DFS_CAC_TIME] = { .type = NLA_U32 }, +}; + +static int parse_reg_rule(struct nlattr *tb[], + struct ieee80211_reg_rule *reg_rule) +{ + struct ieee80211_freq_range *freq_range = ®_rule->freq_range; + struct ieee80211_power_rule *power_rule = ®_rule->power_rule; + + if (!tb[NL80211_ATTR_REG_RULE_FLAGS]) + return -EINVAL; + if (!tb[NL80211_ATTR_FREQ_RANGE_START]) + return -EINVAL; + if (!tb[NL80211_ATTR_FREQ_RANGE_END]) + return -EINVAL; + if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]) + return -EINVAL; + if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]) + return -EINVAL; + + reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]); + + freq_range->start_freq_khz = + nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]); + freq_range->end_freq_khz = + nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]); + freq_range->max_bandwidth_khz = + nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]); + + power_rule->max_eirp = + nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]); + + if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]) + power_rule->max_antenna_gain = + nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]); + + if (tb[NL80211_ATTR_DFS_CAC_TIME]) + reg_rule->dfs_cac_ms = + nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]); + + return 0; +} + static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1]; @@ -5689,6 +5702,7 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) kfree(rd); return r; } +#endif /* CONFIG_CFG80211_CRDA_SUPPORT */ static int validate_scan_freqs(struct nlattr *freqs) { @@ -5974,14 +5988,100 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) return err; } +static int +nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans, + struct cfg80211_sched_scan_request *request, + struct nlattr **attrs) +{ + int tmp, err, i = 0; + struct nlattr *attr; + + if (!attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) { + u32 interval; + + /* + * If scan plans are not specified, + * %NL80211_ATTR_SCHED_SCAN_INTERVAL must be specified. In this + * case one scan plan will be set with the specified scan + * interval and infinite number of iterations. + */ + if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) + return -EINVAL; + + interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]); + if (!interval) + return -EINVAL; + + request->scan_plans[0].interval = + DIV_ROUND_UP(interval, MSEC_PER_SEC); + if (!request->scan_plans[0].interval) + return -EINVAL; + + if (request->scan_plans[0].interval > + wiphy->max_sched_scan_plan_interval) + request->scan_plans[0].interval = + wiphy->max_sched_scan_plan_interval; + + return 0; + } + + nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp) { + struct nlattr *plan[NL80211_SCHED_SCAN_PLAN_MAX + 1]; + + if (WARN_ON(i >= n_plans)) + return -EINVAL; + + err = nla_parse(plan, NL80211_SCHED_SCAN_PLAN_MAX, + nla_data(attr), nla_len(attr), + nl80211_plan_policy); + if (err) + return err; + + if (!plan[NL80211_SCHED_SCAN_PLAN_INTERVAL]) + return -EINVAL; + + request->scan_plans[i].interval = + nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_INTERVAL]); + if (!request->scan_plans[i].interval || + request->scan_plans[i].interval > + wiphy->max_sched_scan_plan_interval) + return -EINVAL; + + if (plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]) { + request->scan_plans[i].iterations = + nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]); + if (!request->scan_plans[i].iterations || + (request->scan_plans[i].iterations > + wiphy->max_sched_scan_plan_iterations)) + return -EINVAL; + } else if (i < n_plans - 1) { + /* + * All scan plans but the last one must specify + * a finite number of iterations + */ + return -EINVAL; + } + + i++; + } + + /* + * The last scan plan must not specify the number of + * iterations, it is supposed to run infinitely + */ + if (request->scan_plans[n_plans - 1].iterations) + return -EINVAL; + + return 0; +} + static struct cfg80211_sched_scan_request * nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, struct nlattr **attrs) { struct cfg80211_sched_scan_request *request; struct nlattr *attr; - int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i; - u32 interval; + int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i, n_plans = 0; enum ieee80211_band band; size_t ie_len; struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1]; @@ -5990,13 +6090,6 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, if (!is_valid_ie_attr(attrs[NL80211_ATTR_IE])) return ERR_PTR(-EINVAL); - if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) - return ERR_PTR(-EINVAL); - - interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]); - if (interval == 0) - return ERR_PTR(-EINVAL); - if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { n_channels = validate_scan_freqs( attrs[NL80211_ATTR_SCAN_FREQUENCIES]); @@ -6060,9 +6153,37 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, if (ie_len > wiphy->max_sched_scan_ie_len) return ERR_PTR(-EINVAL); + if (attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) { + /* + * NL80211_ATTR_SCHED_SCAN_INTERVAL must not be specified since + * each scan plan already specifies its own interval + */ + if (attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) + return ERR_PTR(-EINVAL); + + nla_for_each_nested(attr, + attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp) + n_plans++; + } else { + /* + * The scan interval attribute is kept for backward + * compatibility. If no scan plans are specified and sched scan + * interval is specified, one scan plan will be set with this + * scan interval and infinite number of iterations. + */ + if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) + return ERR_PTR(-EINVAL); + + n_plans = 1; + } + + if (!n_plans || n_plans > wiphy->max_sched_scan_plans) + return ERR_PTR(-EINVAL); + request = kzalloc(sizeof(*request) + sizeof(*request->ssids) * n_ssids + sizeof(*request->match_sets) * n_match_sets + + sizeof(*request->scan_plans) * n_plans + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); if (!request) @@ -6090,6 +6211,18 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, } request->n_match_sets = n_match_sets; + if (n_match_sets) + request->scan_plans = (void *)(request->match_sets + + n_match_sets); + else if (request->ie) + request->scan_plans = (void *)(request->ie + ie_len); + else if (n_ssids) + request->scan_plans = (void *)(request->ssids + n_ssids); + else + request->scan_plans = (void *)(request->channels + n_channels); + + request->n_scan_plans = n_plans; + i = 0; if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { /* user specified, bail out if channel not found */ @@ -6252,7 +6385,10 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, request->delay = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]); - request->interval = interval; + err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs); + if (err) + goto out_free; + request->scan_start = jiffies; return request; @@ -6605,6 +6741,11 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, jiffies_to_msecs(jiffies - intbss->ts))) goto nla_put_failure; + if (intbss->ts_boottime && + nla_put_u64(msg, NL80211_BSS_LAST_SEEN_BOOTTIME, + intbss->ts_boottime)) + goto nla_put_failure; + switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal)) @@ -8845,7 +8986,7 @@ static int nl80211_send_wowlan_tcp(struct sk_buff *msg, static int nl80211_send_wowlan_nd(struct sk_buff *msg, struct cfg80211_sched_scan_request *req) { - struct nlattr *nd, *freqs, *matches, *match; + struct nlattr *nd, *freqs, *matches, *match, *scan_plans, *scan_plan; int i; if (!req) @@ -8855,7 +8996,9 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg, if (!nd) return -ENOBUFS; - if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL, req->interval)) + if (req->n_scan_plans == 1 && + nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL, + req->scan_plans[0].interval * 1000)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay)) @@ -8882,6 +9025,23 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg, nla_nest_end(msg, matches); } + scan_plans = nla_nest_start(msg, NL80211_ATTR_SCHED_SCAN_PLANS); + if (!scan_plans) + return -ENOBUFS; + + for (i = 0; i < req->n_scan_plans; i++) { + scan_plan = nla_nest_start(msg, i + 1); + if (!scan_plan || + nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL, + req->scan_plans[i].interval) || + (req->scan_plans[i].iterations && + nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_ITERATIONS, + req->scan_plans[i].iterations))) + return -ENOBUFS; + nla_nest_end(msg, scan_plan); + } + nla_nest_end(msg, scan_plans); + nla_nest_end(msg, nd); return 0; @@ -10737,6 +10897,7 @@ static const struct genl_ops nl80211_ops[] = { .internal_flags = NL80211_FLAG_NEED_RTNL, /* can be retrieved by unprivileged users */ }, +#ifdef CONFIG_CFG80211_CRDA_SUPPORT { .cmd = NL80211_CMD_SET_REG, .doit = nl80211_set_reg, @@ -10744,6 +10905,7 @@ static const struct genl_ops nl80211_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_RTNL, }, +#endif { .cmd = NL80211_CMD_REQ_SET_REG, .doit = nl80211_req_set_reg, diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 7258246b7458..2e8d6f39ed56 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -135,10 +135,7 @@ static spinlock_t reg_indoor_lock; /* Used to track the userspace process controlling the indoor setting */ static u32 reg_is_indoor_portid; -/* Max number of consecutive attempts to communicate with CRDA */ -#define REG_MAX_CRDA_TIMEOUTS 10 - -static u32 reg_crda_timeouts; +static void restore_regulatory_settings(bool reset_user); static const struct ieee80211_regdomain *get_cfg80211_regdom(void) { @@ -226,9 +223,6 @@ static DECLARE_DELAYED_WORK(reg_check_chans, reg_check_chans_work); static void reg_todo(struct work_struct *work); static DECLARE_WORK(reg_work, reg_todo); -static void reg_timeout_work(struct work_struct *work); -static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work); - /* We keep a static world regulatory domain in case of the absence of CRDA */ static const struct ieee80211_regdomain world_regdom = { .n_reg_rules = 8, @@ -262,7 +256,7 @@ static const struct ieee80211_regdomain world_regdom = { REG_RULE(5745-10, 5825+10, 80, 6, 20, NL80211_RRF_NO_IR), - /* IEEE 802.11ad (60gHz), channels 1..3 */ + /* IEEE 802.11ad (60GHz), channels 1..3 */ REG_RULE(56160+2160*1-1080, 56160+2160*3+1080, 2160, 0, 0, 0), } }; @@ -279,6 +273,9 @@ MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); static void reg_free_request(struct regulatory_request *request) { + if (request == &core_request_world) + return; + if (request != get_last_request()) kfree(request); } @@ -453,68 +450,70 @@ reg_copy_regd(const struct ieee80211_regdomain *src_regd) } #ifdef CONFIG_CFG80211_INTERNAL_REGDB -struct reg_regdb_search_request { - char alpha2[2]; +struct reg_regdb_apply_request { struct list_head list; + const struct ieee80211_regdomain *regdom; }; -static LIST_HEAD(reg_regdb_search_list); -static DEFINE_MUTEX(reg_regdb_search_mutex); +static LIST_HEAD(reg_regdb_apply_list); +static DEFINE_MUTEX(reg_regdb_apply_mutex); -static void reg_regdb_search(struct work_struct *work) +static void reg_regdb_apply(struct work_struct *work) { - struct reg_regdb_search_request *request; - const struct ieee80211_regdomain *curdom, *regdom = NULL; - int i; + struct reg_regdb_apply_request *request; rtnl_lock(); - mutex_lock(®_regdb_search_mutex); - while (!list_empty(®_regdb_search_list)) { - request = list_first_entry(®_regdb_search_list, - struct reg_regdb_search_request, + mutex_lock(®_regdb_apply_mutex); + while (!list_empty(®_regdb_apply_list)) { + request = list_first_entry(®_regdb_apply_list, + struct reg_regdb_apply_request, list); list_del(&request->list); - for (i = 0; i < reg_regdb_size; i++) { - curdom = reg_regdb[i]; - - if (alpha2_equal(request->alpha2, curdom->alpha2)) { - regdom = reg_copy_regd(curdom); - break; - } - } - + set_regdom(request->regdom, REGD_SOURCE_INTERNAL_DB); kfree(request); } - mutex_unlock(®_regdb_search_mutex); - - if (!IS_ERR_OR_NULL(regdom)) - set_regdom(regdom, REGD_SOURCE_INTERNAL_DB); + mutex_unlock(®_regdb_apply_mutex); rtnl_unlock(); } -static DECLARE_WORK(reg_regdb_work, reg_regdb_search); +static DECLARE_WORK(reg_regdb_work, reg_regdb_apply); -static void reg_regdb_query(const char *alpha2) +static int reg_query_builtin(const char *alpha2) { - struct reg_regdb_search_request *request; + const struct ieee80211_regdomain *regdom = NULL; + struct reg_regdb_apply_request *request; + unsigned int i; - if (!alpha2) - return; + for (i = 0; i < reg_regdb_size; i++) { + if (alpha2_equal(alpha2, reg_regdb[i]->alpha2)) { + regdom = reg_regdb[i]; + break; + } + } + + if (!regdom) + return -ENODATA; - request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL); + request = kzalloc(sizeof(struct reg_regdb_apply_request), GFP_KERNEL); if (!request) - return; + return -ENOMEM; - memcpy(request->alpha2, alpha2, 2); + request->regdom = reg_copy_regd(regdom); + if (IS_ERR_OR_NULL(request->regdom)) { + kfree(request); + return -ENOMEM; + } - mutex_lock(®_regdb_search_mutex); - list_add_tail(&request->list, ®_regdb_search_list); - mutex_unlock(®_regdb_search_mutex); + mutex_lock(®_regdb_apply_mutex); + list_add_tail(&request->list, ®_regdb_apply_list); + mutex_unlock(®_regdb_apply_mutex); schedule_work(®_regdb_work); + + return 0; } /* Feel free to add any other sanity checks here */ @@ -525,9 +524,45 @@ static void reg_regdb_size_check(void) } #else static inline void reg_regdb_size_check(void) {} -static inline void reg_regdb_query(const char *alpha2) {} +static inline int reg_query_builtin(const char *alpha2) +{ + return -ENODATA; +} #endif /* CONFIG_CFG80211_INTERNAL_REGDB */ +#ifdef CONFIG_CFG80211_CRDA_SUPPORT +/* Max number of consecutive attempts to communicate with CRDA */ +#define REG_MAX_CRDA_TIMEOUTS 10 + +static u32 reg_crda_timeouts; + +static void crda_timeout_work(struct work_struct *work); +static DECLARE_DELAYED_WORK(crda_timeout, crda_timeout_work); + +static void crda_timeout_work(struct work_struct *work) +{ + REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n"); + rtnl_lock(); + reg_crda_timeouts++; + restore_regulatory_settings(true); + rtnl_unlock(); +} + +static void cancel_crda_timeout(void) +{ + cancel_delayed_work(&crda_timeout); +} + +static void cancel_crda_timeout_sync(void) +{ + cancel_delayed_work_sync(&crda_timeout); +} + +static void reset_crda_timeouts(void) +{ + reg_crda_timeouts = 0; +} + /* * This lets us keep regulatory code which is updated on a regulatory * basis in userspace. @@ -536,13 +571,11 @@ static int call_crda(const char *alpha2) { char country[12]; char *env[] = { country, NULL }; + int ret; snprintf(country, sizeof(country), "COUNTRY=%c%c", alpha2[0], alpha2[1]); - /* query internal regulatory database (if it exists) */ - reg_regdb_query(alpha2); - if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n"); return -EINVAL; @@ -554,18 +587,34 @@ static int call_crda(const char *alpha2) else pr_debug("Calling CRDA to update world regulatory domain\n"); - return kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, env); + ret = kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, env); + if (ret) + return ret; + + queue_delayed_work(system_power_efficient_wq, + &crda_timeout, msecs_to_jiffies(3142)); + return 0; +} +#else +static inline void cancel_crda_timeout(void) {} +static inline void cancel_crda_timeout_sync(void) {} +static inline void reset_crda_timeouts(void) {} +static inline int call_crda(const char *alpha2) +{ + return -ENODATA; } +#endif /* CONFIG_CFG80211_CRDA_SUPPORT */ -static enum reg_request_treatment -reg_call_crda(struct regulatory_request *request) +static bool reg_query_database(struct regulatory_request *request) { - if (call_crda(request->alpha2)) - return REG_REQ_IGNORE; + /* query internal regulatory database (if it exists) */ + if (reg_query_builtin(request->alpha2) == 0) + return true; - queue_delayed_work(system_power_efficient_wq, - ®_timeout, msecs_to_jiffies(3142)); - return REG_REQ_OK; + if (call_crda(request->alpha2) == 0) + return true; + + return false; } bool reg_is_valid_request(const char *alpha2) @@ -1081,11 +1130,11 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator) } EXPORT_SYMBOL(reg_initiator_name); -#ifdef CONFIG_CFG80211_REG_DEBUG static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd, struct ieee80211_channel *chan, const struct ieee80211_reg_rule *reg_rule) { +#ifdef CONFIG_CFG80211_REG_DEBUG const struct ieee80211_power_rule *power_rule; const struct ieee80211_freq_range *freq_range; char max_antenna_gain[32], bw[32]; @@ -1096,7 +1145,7 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd, if (!power_rule->max_antenna_gain) snprintf(max_antenna_gain, sizeof(max_antenna_gain), "N/A"); else - snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d", + snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d mBi", power_rule->max_antenna_gain); if (reg_rule->flags & NL80211_RRF_AUTO_BW) @@ -1110,19 +1159,12 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd, REG_DBG_PRINT("Updating information on frequency %d MHz with regulatory rule:\n", chan->center_freq); - REG_DBG_PRINT("%d KHz - %d KHz @ %s), (%s mBi, %d mBm)\n", + REG_DBG_PRINT("(%d KHz - %d KHz @ %s), (%s, %d mBm)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, bw, max_antenna_gain, power_rule->max_eirp); -} -#else -static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd, - struct ieee80211_channel *chan, - const struct ieee80211_reg_rule *reg_rule) -{ - return; -} #endif +} /* * Note that right now we assume the desired channel bandwidth @@ -1311,7 +1353,8 @@ static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy) return !(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS); } #else -static int reg_ignore_cell_hint(struct regulatory_request *pending_request) +static enum reg_request_treatment +reg_ignore_cell_hint(struct regulatory_request *pending_request) { return REG_REQ_IGNORE; } @@ -1846,7 +1889,7 @@ static void reg_set_request_processed(void) need_more_processing = true; spin_unlock(®_requests_lock); - cancel_delayed_work(®_timeout); + cancel_crda_timeout(); if (need_more_processing) schedule_work(®_work); @@ -1858,19 +1901,18 @@ static void reg_set_request_processed(void) * * The wireless subsystem can use this function to process * a regulatory request issued by the regulatory core. - * - * Returns one of the different reg request treatment values. */ static enum reg_request_treatment reg_process_hint_core(struct regulatory_request *core_request) { + if (reg_query_database(core_request)) { + core_request->intersect = false; + core_request->processed = false; + reg_update_last_request(core_request); + return REG_REQ_OK; + } - core_request->intersect = false; - core_request->processed = false; - - reg_update_last_request(core_request); - - return reg_call_crda(core_request); + return REG_REQ_IGNORE; } static enum reg_request_treatment @@ -1915,8 +1957,6 @@ __reg_process_hint_user(struct regulatory_request *user_request) * * The wireless subsystem can use this function to process * a regulatory request initiated by userspace. - * - * Returns one of the different reg request treatment values. */ static enum reg_request_treatment reg_process_hint_user(struct regulatory_request *user_request) @@ -1925,20 +1965,20 @@ reg_process_hint_user(struct regulatory_request *user_request) treatment = __reg_process_hint_user(user_request); if (treatment == REG_REQ_IGNORE || - treatment == REG_REQ_ALREADY_SET) { - reg_free_request(user_request); - return treatment; - } + treatment == REG_REQ_ALREADY_SET) + return REG_REQ_IGNORE; user_request->intersect = treatment == REG_REQ_INTERSECT; user_request->processed = false; - reg_update_last_request(user_request); - - user_alpha2[0] = user_request->alpha2[0]; - user_alpha2[1] = user_request->alpha2[1]; + if (reg_query_database(user_request)) { + reg_update_last_request(user_request); + user_alpha2[0] = user_request->alpha2[0]; + user_alpha2[1] = user_request->alpha2[1]; + return REG_REQ_OK; + } - return reg_call_crda(user_request); + return REG_REQ_IGNORE; } static enum reg_request_treatment @@ -1986,16 +2026,12 @@ reg_process_hint_driver(struct wiphy *wiphy, case REG_REQ_OK: break; case REG_REQ_IGNORE: - reg_free_request(driver_request); - return treatment; + return REG_REQ_IGNORE; case REG_REQ_INTERSECT: - /* fall through */ case REG_REQ_ALREADY_SET: regd = reg_copy_regd(get_cfg80211_regdom()); - if (IS_ERR(regd)) { - reg_free_request(driver_request); + if (IS_ERR(regd)) return REG_REQ_IGNORE; - } tmp = get_wiphy_regdom(wiphy); rcu_assign_pointer(wiphy->regd, regd); @@ -2006,8 +2042,6 @@ reg_process_hint_driver(struct wiphy *wiphy, driver_request->intersect = treatment == REG_REQ_INTERSECT; driver_request->processed = false; - reg_update_last_request(driver_request); - /* * Since CRDA will not be called in this case as we already * have applied the requested regulatory domain before we just @@ -2015,11 +2049,17 @@ reg_process_hint_driver(struct wiphy *wiphy, */ if (treatment == REG_REQ_ALREADY_SET) { nl80211_send_reg_change_event(driver_request); + reg_update_last_request(driver_request); reg_set_request_processed(); - return treatment; + return REG_REQ_ALREADY_SET; } - return reg_call_crda(driver_request); + if (reg_query_database(driver_request)) { + reg_update_last_request(driver_request); + return REG_REQ_OK; + } + + return REG_REQ_IGNORE; } static enum reg_request_treatment @@ -2085,12 +2125,11 @@ reg_process_hint_country_ie(struct wiphy *wiphy, case REG_REQ_OK: break; case REG_REQ_IGNORE: - /* fall through */ + return REG_REQ_IGNORE; case REG_REQ_ALREADY_SET: reg_free_request(country_ie_request); - return treatment; + return REG_REQ_ALREADY_SET; case REG_REQ_INTERSECT: - reg_free_request(country_ie_request); /* * This doesn't happen yet, not sure we * ever want to support it for this case. @@ -2102,9 +2141,12 @@ reg_process_hint_country_ie(struct wiphy *wiphy, country_ie_request->intersect = false; country_ie_request->processed = false; - reg_update_last_request(country_ie_request); + if (reg_query_database(country_ie_request)) { + reg_update_last_request(country_ie_request); + return REG_REQ_OK; + } - return reg_call_crda(country_ie_request); + return REG_REQ_IGNORE; } /* This processes *all* regulatory hints */ @@ -2118,11 +2160,11 @@ static void reg_process_hint(struct regulatory_request *reg_request) switch (reg_request->initiator) { case NL80211_REGDOM_SET_BY_CORE: - reg_process_hint_core(reg_request); - return; + treatment = reg_process_hint_core(reg_request); + break; case NL80211_REGDOM_SET_BY_USER: - reg_process_hint_user(reg_request); - return; + treatment = reg_process_hint_user(reg_request); + break; case NL80211_REGDOM_SET_BY_DRIVER: if (!wiphy) goto out_free; @@ -2138,6 +2180,12 @@ static void reg_process_hint(struct regulatory_request *reg_request) goto out_free; } + if (treatment == REG_REQ_IGNORE) + goto out_free; + + WARN(treatment != REG_REQ_OK && treatment != REG_REQ_ALREADY_SET, + "unexpected treatment value %d\n", treatment); + /* This is required so that the orig_* parameters are saved. * NOTE: treatment must be set for any case that reaches here! */ @@ -2345,7 +2393,7 @@ int regulatory_hint_user(const char *alpha2, request->user_reg_hint_type = user_reg_hint_type; /* Allow calling CRDA again */ - reg_crda_timeouts = 0; + reset_crda_timeouts(); queue_regulatory_request(request); @@ -2417,7 +2465,7 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2) request->initiator = NL80211_REGDOM_SET_BY_DRIVER; /* Allow calling CRDA again */ - reg_crda_timeouts = 0; + reset_crda_timeouts(); queue_regulatory_request(request); @@ -2473,7 +2521,7 @@ void regulatory_hint_country_ie(struct wiphy *wiphy, enum ieee80211_band band, request->country_ie_env = env; /* Allow calling CRDA again */ - reg_crda_timeouts = 0; + reset_crda_timeouts(); queue_regulatory_request(request); request = NULL; @@ -2874,11 +2922,8 @@ static int reg_set_rd_driver(const struct ieee80211_regdomain *rd, } request_wiphy = wiphy_idx_to_wiphy(driver_request->wiphy_idx); - if (!request_wiphy) { - queue_delayed_work(system_power_efficient_wq, - ®_timeout, 0); + if (!request_wiphy) return -ENODEV; - } if (!driver_request->intersect) { if (request_wiphy->regd) @@ -2935,11 +2980,8 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd, } request_wiphy = wiphy_idx_to_wiphy(country_ie_request->wiphy_idx); - if (!request_wiphy) { - queue_delayed_work(system_power_efficient_wq, - ®_timeout, 0); + if (!request_wiphy) return -ENODEV; - } if (country_ie_request->intersect) return -EINVAL; @@ -2966,7 +3008,7 @@ int set_regdom(const struct ieee80211_regdomain *rd, } if (regd_src == REGD_SOURCE_CRDA) - reg_crda_timeouts = 0; + reset_crda_timeouts(); lr = get_last_request(); @@ -3123,15 +3165,6 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy) lr->country_ie_env = ENVIRON_ANY; } -static void reg_timeout_work(struct work_struct *work) -{ - REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n"); - rtnl_lock(); - reg_crda_timeouts++; - restore_regulatory_settings(true); - rtnl_unlock(); -} - /* * See http://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii, for * UNII band definitions @@ -3217,7 +3250,7 @@ void regulatory_exit(void) struct reg_beacon *reg_beacon, *btmp; cancel_work_sync(®_work); - cancel_delayed_work_sync(®_timeout); + cancel_crda_timeout_sync(); cancel_delayed_work_sync(®_check_chans); /* Lock to suppress warnings */ diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 3a50aa2553bf..14d5369eb778 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -266,8 +266,7 @@ void __cfg80211_sched_scan_results(struct work_struct *wk) spin_lock_bh(&rdev->bss_lock); __cfg80211_bss_expire(rdev, request->scan_start); spin_unlock_bh(&rdev->bss_lock); - request->scan_start = - jiffies + msecs_to_jiffies(request->interval); + request->scan_start = jiffies; } nl80211_send_sched_scan_results(rdev, request->dev); } @@ -839,6 +838,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, found->pub.signal = tmp->pub.signal; found->pub.capability = tmp->pub.capability; found->ts = tmp->ts; + found->ts_boottime = tmp->ts_boottime; } else { struct cfg80211_internal_bss *new; struct cfg80211_internal_bss *hidden; @@ -938,14 +938,13 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, } /* Returned bss is reference counted and must be cleaned up appropriately. */ -struct cfg80211_bss* -cfg80211_inform_bss_width(struct wiphy *wiphy, - struct ieee80211_channel *rx_channel, - enum nl80211_bss_scan_width scan_width, - enum cfg80211_bss_frame_type ftype, - const u8 *bssid, u64 tsf, u16 capability, - u16 beacon_interval, const u8 *ie, size_t ielen, - s32 signal, gfp_t gfp) +struct cfg80211_bss * +cfg80211_inform_bss_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + enum cfg80211_bss_frame_type ftype, + const u8 *bssid, u64 tsf, u16 capability, + u16 beacon_interval, const u8 *ie, size_t ielen, + gfp_t gfp) { struct cfg80211_bss_ies *ies; struct ieee80211_channel *channel; @@ -957,19 +956,21 @@ cfg80211_inform_bss_width(struct wiphy *wiphy, return NULL; if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && - (signal < 0 || signal > 100))) + (data->signal < 0 || data->signal > 100))) return NULL; - channel = cfg80211_get_bss_channel(wiphy, ie, ielen, rx_channel); + channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan); if (!channel) return NULL; memcpy(tmp.pub.bssid, bssid, ETH_ALEN); tmp.pub.channel = channel; - tmp.pub.scan_width = scan_width; - tmp.pub.signal = signal; + tmp.pub.scan_width = data->scan_width; + tmp.pub.signal = data->signal; tmp.pub.beacon_interval = beacon_interval; tmp.pub.capability = capability; + tmp.ts_boottime = data->boottime_ns; + /* * If we do not know here whether the IEs are from a Beacon or Probe * Response frame, we need to pick one of the options and only use it @@ -999,7 +1000,7 @@ cfg80211_inform_bss_width(struct wiphy *wiphy, } rcu_assign_pointer(tmp.pub.ies, ies); - signal_valid = abs(rx_channel->center_freq - channel->center_freq) <= + signal_valid = abs(data->chan->center_freq - channel->center_freq) <= wiphy->max_adj_channel_rssi_comp; res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid); if (!res) @@ -1019,15 +1020,15 @@ cfg80211_inform_bss_width(struct wiphy *wiphy, /* cfg80211_bss_update gives us a referenced result */ return &res->pub; } -EXPORT_SYMBOL(cfg80211_inform_bss_width); +EXPORT_SYMBOL(cfg80211_inform_bss_data); -/* Returned bss is reference counted and must be cleaned up appropriately. */ +/* cfg80211_inform_bss_width_frame helper */ struct cfg80211_bss * -cfg80211_inform_bss_width_frame(struct wiphy *wiphy, - struct ieee80211_channel *rx_channel, - enum nl80211_bss_scan_width scan_width, - struct ieee80211_mgmt *mgmt, size_t len, - s32 signal, gfp_t gfp) +cfg80211_inform_bss_frame_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + struct ieee80211_mgmt *mgmt, size_t len, + gfp_t gfp) + { struct cfg80211_internal_bss tmp = {}, *res; struct cfg80211_bss_ies *ies; @@ -1040,8 +1041,7 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy, BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) != offsetof(struct ieee80211_mgmt, u.beacon.variable)); - trace_cfg80211_inform_bss_width_frame(wiphy, rx_channel, scan_width, mgmt, - len, signal); + trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len); if (WARN_ON(!mgmt)) return NULL; @@ -1050,14 +1050,14 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy, return NULL; if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && - (signal < 0 || signal > 100))) + (data->signal < 0 || data->signal > 100))) return NULL; if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable))) return NULL; channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable, - ielen, rx_channel); + ielen, data->chan); if (!channel) return NULL; @@ -1077,12 +1077,13 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy, memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN); tmp.pub.channel = channel; - tmp.pub.scan_width = scan_width; - tmp.pub.signal = signal; + tmp.pub.scan_width = data->scan_width; + tmp.pub.signal = data->signal; tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); + tmp.ts_boottime = data->boottime_ns; - signal_valid = abs(rx_channel->center_freq - channel->center_freq) <= + signal_valid = abs(data->chan->center_freq - channel->center_freq) <= wiphy->max_adj_channel_rssi_comp; res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid); if (!res) @@ -1102,7 +1103,7 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy, /* cfg80211_bss_update gives us a referenced result */ return &res->pub; } -EXPORT_SYMBOL(cfg80211_inform_bss_width_frame); +EXPORT_SYMBOL(cfg80211_inform_bss_frame_data); void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) { diff --git a/net/wireless/trace.h b/net/wireless/trace.h index a808279a432a..0c392d36781b 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2670,30 +2670,30 @@ TRACE_EVENT(cfg80211_get_bss, __entry->privacy) ); -TRACE_EVENT(cfg80211_inform_bss_width_frame, - TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel, - enum nl80211_bss_scan_width scan_width, - struct ieee80211_mgmt *mgmt, size_t len, - s32 signal), - TP_ARGS(wiphy, channel, scan_width, mgmt, len, signal), +TRACE_EVENT(cfg80211_inform_bss_frame, + TP_PROTO(struct wiphy *wiphy, struct cfg80211_inform_bss *data, + struct ieee80211_mgmt *mgmt, size_t len), + TP_ARGS(wiphy, data, mgmt, len), TP_STRUCT__entry( WIPHY_ENTRY CHAN_ENTRY __field(enum nl80211_bss_scan_width, scan_width) __dynamic_array(u8, mgmt, len) __field(s32, signal) + __field(u64, ts_boottime) ), TP_fast_assign( WIPHY_ASSIGN; - CHAN_ASSIGN(channel); - __entry->scan_width = scan_width; + CHAN_ASSIGN(data->chan); + __entry->scan_width = data->scan_width; if (mgmt) memcpy(__get_dynamic_array(mgmt), mgmt, len); - __entry->signal = signal; + __entry->signal = data->signal; + __entry->ts_boottime = data->boottime_ns; ), - TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d", + TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d, tsb:%llu", WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width, - __entry->signal) + __entry->signal, (unsigned long long)__entry->ts_boottime) ); DECLARE_EVENT_CLASS(cfg80211_bss_evt, diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 63e7d50e6a4f..b30514514e37 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -13,6 +13,7 @@ hostprogs-y += tracex3 hostprogs-y += tracex4 hostprogs-y += tracex5 hostprogs-y += tracex6 +hostprogs-y += trace_output hostprogs-y += lathist test_verifier-objs := test_verifier.o libbpf.o @@ -27,6 +28,7 @@ tracex3-objs := bpf_load.o libbpf.o tracex3_user.o tracex4-objs := bpf_load.o libbpf.o tracex4_user.o tracex5-objs := bpf_load.o libbpf.o tracex5_user.o tracex6-objs := bpf_load.o libbpf.o tracex6_user.o +trace_output-objs := bpf_load.o libbpf.o trace_output_user.o lathist-objs := bpf_load.o libbpf.o lathist_user.o # Tell kbuild to always build the programs @@ -40,6 +42,7 @@ always += tracex3_kern.o always += tracex4_kern.o always += tracex5_kern.o always += tracex6_kern.o +always += trace_output_kern.o always += tcbpf1_kern.o always += lathist_kern.o @@ -55,6 +58,7 @@ HOSTLOADLIBES_tracex3 += -lelf HOSTLOADLIBES_tracex4 += -lelf -lrt HOSTLOADLIBES_tracex5 += -lelf HOSTLOADLIBES_tracex6 += -lelf +HOSTLOADLIBES_trace_output += -lelf -lrt HOSTLOADLIBES_lathist += -lelf # point this to your LLVM backend with bpf support @@ -64,3 +68,6 @@ $(obj)/%.o: $(src)/%.c clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ + clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ + -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ + -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h index 21aa1b44c30c..b35c21e0b43f 100644 --- a/samples/bpf/bpf_helpers.h +++ b/samples/bpf/bpf_helpers.h @@ -37,6 +37,8 @@ static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) = (void *) BPF_FUNC_clone_redirect; static int (*bpf_redirect)(int ifindex, int flags) = (void *) BPF_FUNC_redirect; +static int (*bpf_perf_event_output)(void *ctx, void *map, int index, void *data, int size) = + (void *) BPF_FUNC_perf_event_output; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c new file mode 100644 index 000000000000..8d8d1ec429eb --- /dev/null +++ b/samples/bpf/trace_output_kern.c @@ -0,0 +1,31 @@ +#include <linux/ptrace.h> +#include <linux/version.h> +#include <uapi/linux/bpf.h> +#include "bpf_helpers.h" + +struct bpf_map_def SEC("maps") my_map = { + .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(u32), + .max_entries = 2, +}; + +SEC("kprobe/sys_write") +int bpf_prog1(struct pt_regs *ctx) +{ + struct S { + u64 pid; + u64 cookie; + } data; + + memset(&data, 0, sizeof(data)); + data.pid = bpf_get_current_pid_tgid(); + data.cookie = 0x12345678; + + bpf_perf_event_output(ctx, &my_map, 0, &data, sizeof(data)); + + return 0; +} + +char _license[] SEC("license") = "GPL"; +u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c new file mode 100644 index 000000000000..661a7d052f2c --- /dev/null +++ b/samples/bpf/trace_output_user.c @@ -0,0 +1,196 @@ +/* This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include <stdio.h> +#include <unistd.h> +#include <stdlib.h> +#include <stdbool.h> +#include <string.h> +#include <fcntl.h> +#include <poll.h> +#include <sys/ioctl.h> +#include <linux/perf_event.h> +#include <linux/bpf.h> +#include <errno.h> +#include <assert.h> +#include <sys/syscall.h> +#include <sys/ioctl.h> +#include <sys/mman.h> +#include <time.h> +#include <signal.h> +#include "libbpf.h" +#include "bpf_load.h" + +static int pmu_fd; + +int page_size; +int page_cnt = 8; +volatile struct perf_event_mmap_page *header; + +typedef void (*print_fn)(void *data, int size); + +static int perf_event_mmap(int fd) +{ + void *base; + int mmap_size; + + page_size = getpagesize(); + mmap_size = page_size * (page_cnt + 1); + + base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (base == MAP_FAILED) { + printf("mmap err\n"); + return -1; + } + + header = base; + return 0; +} + +static int perf_event_poll(int fd) +{ + struct pollfd pfd = { .fd = fd, .events = POLLIN }; + + return poll(&pfd, 1, 1000); +} + +struct perf_event_sample { + struct perf_event_header header; + __u32 size; + char data[]; +}; + +void perf_event_read(print_fn fn) +{ + __u64 data_tail = header->data_tail; + __u64 data_head = header->data_head; + __u64 buffer_size = page_cnt * page_size; + void *base, *begin, *end; + char buf[256]; + + asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */ + if (data_head == data_tail) + return; + + base = ((char *)header) + page_size; + + begin = base + data_tail % buffer_size; + end = base + data_head % buffer_size; + + while (begin != end) { + struct perf_event_sample *e; + + e = begin; + if (begin + e->header.size > base + buffer_size) { + long len = base + buffer_size - begin; + + assert(len < e->header.size); + memcpy(buf, begin, len); + memcpy(buf + len, base, e->header.size - len); + e = (void *) buf; + begin = base + e->header.size - len; + } else if (begin + e->header.size == base + buffer_size) { + begin = base; + } else { + begin += e->header.size; + } + + if (e->header.type == PERF_RECORD_SAMPLE) { + fn(e->data, e->size); + } else if (e->header.type == PERF_RECORD_LOST) { + struct { + struct perf_event_header header; + __u64 id; + __u64 lost; + } *lost = (void *) e; + printf("lost %lld events\n", lost->lost); + } else { + printf("unknown event type=%d size=%d\n", + e->header.type, e->header.size); + } + } + + __sync_synchronize(); /* smp_mb() */ + header->data_tail = data_head; +} + +static __u64 time_get_ns(void) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec * 1000000000ull + ts.tv_nsec; +} + +static __u64 start_time; + +#define MAX_CNT 100000ll + +static void print_bpf_output(void *data, int size) +{ + static __u64 cnt; + struct { + __u64 pid; + __u64 cookie; + } *e = data; + + if (e->cookie != 0x12345678) { + printf("BUG pid %llx cookie %llx sized %d\n", + e->pid, e->cookie, size); + kill(0, SIGINT); + } + + cnt++; + + if (cnt == MAX_CNT) { + printf("recv %lld events per sec\n", + MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); + kill(0, SIGINT); + } +} + +static void test_bpf_perf_event(void) +{ + struct perf_event_attr attr = { + .sample_type = PERF_SAMPLE_RAW, + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_BPF_OUTPUT, + }; + int key = 0; + + pmu_fd = perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0); + + assert(pmu_fd >= 0); + assert(bpf_update_elem(map_fd[0], &key, &pmu_fd, BPF_ANY) == 0); + ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); +} + +int main(int argc, char **argv) +{ + char filename[256]; + FILE *f; + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + if (load_bpf_file(filename)) { + printf("%s", bpf_log_buf); + return 1; + } + + test_bpf_perf_event(); + + if (perf_event_mmap(pmu_fd) < 0) + return 1; + + f = popen("taskset 1 dd if=/dev/zero of=/dev/null", "r"); + (void) f; + + start_time = time_get_ns(); + for (;;) { + perf_event_poll(pmu_fd); + perf_event_read(print_bpf_output); + } + + return 0; +} |