diff options
Diffstat (limited to 'drivers')
500 files changed, 34523 insertions, 10805 deletions
diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c index 1fd25e872ece..8d98130ecd40 100644 --- a/drivers/atm/adummy.c +++ b/drivers/atm/adummy.c @@ -71,7 +71,7 @@ static struct attribute *adummy_attrs[] = { NULL }; -static struct attribute_group adummy_group_attrs = { +static const struct attribute_group adummy_group_attrs = { .name = NULL, /* We want them in dev's root folder */ .attrs = adummy_attrs }; @@ -130,7 +130,7 @@ adummy_proc_read(struct atm_dev *dev, loff_t *pos, char *page) return 0; } -static struct atmdev_ops adummy_ops = +static const struct atmdev_ops adummy_ops = { .open = adummy_open, .close = adummy_close, diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 906705e5f776..acf16c323e38 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -2374,7 +2374,7 @@ MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); /********** module entry **********/ -static struct pci_device_id amb_pci_tbl[] = { +static const struct pci_device_id amb_pci_tbl[] = { { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 }, { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 }, { 0, } diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index 56fa16c85ebf..afebeb1c3e1e 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -342,7 +342,7 @@ static struct atmdev_ops atmtcp_v_dev_ops = { */ -static struct atmdev_ops atmtcp_c_dev_ops = { +static const struct atmdev_ops atmtcp_c_dev_ops = { .close = atmtcp_c_close, .send = atmtcp_c_send }; diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b042ec458544..ce47eb17901d 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2292,7 +2292,7 @@ err_disable: } -static struct pci_device_id eni_pci_tbl[] = { +static const struct pci_device_id eni_pci_tbl[] = { { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_FPGA), 0 /* FPGA */ }, { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_ASIC), 1 /* ASIC */ }, { 0, } diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 22dcab952a24..6b6368a56526 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -2030,7 +2030,7 @@ static void firestream_remove_one(struct pci_dev *pdev) func_exit (); } -static struct pci_device_id firestream_pci_tbl[] = { +static const struct pci_device_id firestream_pci_tbl[] = { { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50), FS_IS50}, { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155), FS_IS155}, { 0, } diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index f0433adcd8fc..f8b7e86907cc 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -2757,7 +2757,7 @@ static void fore200e_pca_remove_one(struct pci_dev *pci_dev) } -static struct pci_device_id fore200e_pca_tbl[] = { +static const struct pci_device_id fore200e_pca_tbl[] = { { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &fore200e_bus[0] }, { 0, } diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 37ee21c5a5ca..e58538c29377 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -161,7 +161,7 @@ static unsigned int clocktab[] = { CLK_LOW }; -static struct atmdev_ops he_ops = +static const struct atmdev_ops he_ops = { .open = he_open, .close = he_close, @@ -2851,7 +2851,7 @@ MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)"); module_param(sdh, bool, 0); MODULE_PARM_DESC(sdh, "use SDH framing (default 0)"); -static struct pci_device_id he_pci_tbl[] = { +static const struct pci_device_id he_pci_tbl[] = { { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 }, { 0, } }; diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 0f18480b33b5..7e76b35f422c 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2867,7 +2867,7 @@ MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames"); MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames"); MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); -static struct pci_device_id hrz_pci_tbl[] = { +static const struct pci_device_id hrz_pci_tbl[] = { { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 60bacba03d17..47f3c4ae0594 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -134,7 +134,7 @@ static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos, static void idt77252_softint(struct work_struct *work); -static struct atmdev_ops idt77252_ops = +static const struct atmdev_ops idt77252_ops = { .dev_close = idt77252_dev_close, .open = idt77252_open, @@ -3725,7 +3725,7 @@ err_out_disable_pdev: return err; } -static struct pci_device_id idt77252_pci_tbl[] = +static const struct pci_device_id idt77252_pci_tbl[] = { { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77252), 0 }, { 0, } diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index a4fa6c82261e..fc72b763fdd7 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -3266,7 +3266,7 @@ static void ia_remove_one(struct pci_dev *pdev) kfree(iadev); } -static struct pci_device_id ia_pci_tbl[] = { +static const struct pci_device_id ia_pci_tbl[] = { { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, }, { 0,} diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 1a9bc51284b0..2351dad78ff5 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2589,7 +2589,7 @@ static int lanai_init_one(struct pci_dev *pci, return result; } -static struct pci_device_id lanai_pci_tbl[] = { +static const struct pci_device_id lanai_pci_tbl[] = { { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAI2) }, { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAIHB) }, { 0, } /* terminal entry */ diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index d879f3bca107..a9702836cbae 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -154,7 +154,7 @@ static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); static struct ns_dev *cards[NS_MAX_CARDS]; static unsigned num_cards; -static struct atmdev_ops atm_ops = { +static const struct atmdev_ops atm_ops = { .open = ns_open, .close = ns_close, .ioctl = ns_ioctl, @@ -253,7 +253,7 @@ static void nicstar_remove_one(struct pci_dev *pcidev) kfree(card); } -static struct pci_device_id nicstar_pci_tbl[] = { +static const struct pci_device_id nicstar_pci_tbl[] = { { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 }, {0,} /* terminate list */ }; diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index c8f2ca6d8b29..0df1a1c80b00 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -611,7 +611,7 @@ static struct attribute *solos_attrs[] = { NULL }; -static struct attribute_group solos_attr_group = { +static const struct attribute_group solos_attr_group = { .attrs = solos_attrs, .name = "parameters", }; @@ -628,7 +628,7 @@ static struct attribute *gpio_attrs[] = { NULL }; -static struct attribute_group gpio_attr_group = { +static const struct attribute_group gpio_attr_group = { .attrs = gpio_attrs, .name = "gpio", }; @@ -1187,7 +1187,7 @@ static int psend(struct atm_vcc *vcc, struct sk_buff *skb) return 0; } -static struct atmdev_ops fpga_ops = { +static const struct atmdev_ops fpga_ops = { .open = popen, .close = pclose, .ioctl = NULL, @@ -1476,7 +1476,7 @@ static void fpga_remove(struct pci_dev *dev) kfree(card); } -static struct pci_device_id fpga_pci_tbl[] = { +static const struct pci_device_id fpga_pci_tbl[] = { { 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } }; diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 07bdd51b3b9a..1ef67db03c8e 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1642,7 +1642,7 @@ out_free: MODULE_LICENSE("GPL"); -static struct pci_device_id zatm_pci_tbl[] = { +static const struct pci_device_id zatm_pci_tbl[] = { { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER }, { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 }, { 0, } diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 7bde8d7a2816..982d5781d3ce 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -191,6 +191,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) case BCMA_CHIP_ID_BCM4707: case BCMA_CHIP_ID_BCM5357: case BCMA_CHIP_ID_BCM53572: + case BCMA_CHIP_ID_BCM53573: case BCMA_CHIP_ID_BCM47094: chip->ngpio = 32; break; diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 35952a94875e..3a6ead603e49 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -98,6 +98,7 @@ config BT_HCIUART_NOKIA depends on BT_HCIUART_SERDEV depends on PM select BT_HCIUART_H4 + select BT_BCM help Nokia H4+ is serial protocol for communication between Bluetooth device and host. This protocol is required for Bluetooth devices diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index b793853ff05f..204afe66de92 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -140,7 +140,8 @@ MODULE_DEVICE_TABLE(usb, ath3k_table); #define BTUSB_ATH3012 0x80 /* This table is to load patch and sysconfig files - * for AR3012 */ + * for AR3012 + */ static const struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 32dcac017395..194788739a83 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -684,14 +684,16 @@ static int bt3c_config(struct pcmcia_device *link) unsigned long try; /* First pass: look for a config entry that looks normal. - Two tries: without IO aliases, then with aliases */ + * Two tries: without IO aliases, then with aliases + */ for (try = 0; try < 2; try++) if (!pcmcia_loop_config(link, bt3c_check_config, (void *) try)) goto found_port; /* Second pass: try to find an entry that isn't picky about - its base address, then try to grab any standard serial port - address, and finally try to get any free port. */ + * its base address, then try to grab any standard serial port + * address, and finally try to get any free port. + */ if (!pcmcia_loop_config(link, bt3c_check_config_notpicky, NULL)) goto found_port; diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index eb794f08b238..03341ce98c32 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1455,7 +1455,8 @@ done: fw_dump_ptr = fw_dump_data; /* Dump all the memory data into single file, a userspace script will - be used to split all the memory data to multiple files*/ + * be used to split all the memory data to multiple files + */ BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump start"); for (idx = 0; idx < dump_num; idx++) { struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; @@ -1482,7 +1483,8 @@ done: } /* fw_dump_data will be free in device coredump release function - after 5 min*/ + * after 5 min + */ dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL); BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end"); } diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 28afd5d585f9..0bbdfcef2aa8 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -81,7 +81,7 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version) * and lower 2 bytes from patch will be used. */ *rome_version = (le32_to_cpu(ver->soc_id) << 16) | - (le16_to_cpu(ver->rome_ver) & 0x0000ffff); + (le16_to_cpu(ver->rome_ver) & 0x0000ffff); out: kfree_skb(skb); diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 8279094dd713..d9a99b4302ea 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -279,6 +279,8 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff) return ret; ret = fw->size; *buff = kmemdup(fw->data, ret, GFP_KERNEL); + if (!*buff) + ret = -ENOMEM; release_firmware(fw); diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index 1cb958e199eb..c8e945d19ffe 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -144,7 +144,8 @@ static int btsdio_rx_packet(struct btsdio_data *data) if (!skb) { /* Out of memory. Prepare a read retry and just * return with the expectation that the next time - * we're called we'll have more memory. */ + * we're called we'll have more memory. + */ return -ENOMEM; } diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index 7df79bb12350..310e9c2e09b6 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c @@ -614,14 +614,16 @@ static int btuart_config(struct pcmcia_device *link) int try; /* First pass: look for a config entry that looks normal. - Two tries: without IO aliases, then with aliases */ + * Two tries: without IO aliases, then with aliases + */ for (try = 0; try < 2; try++) if (!pcmcia_loop_config(link, btuart_check_config, &try)) goto found_port; /* Second pass: try to find an entry that isn't picky about - its base address, then try to grab any standard serial port - address, and finally try to get any free port. */ + * its base address, then try to grab any standard serial port + * address, and finally try to get any free port. + */ if (!pcmcia_loop_config(link, btuart_check_config_notpicky, NULL)) goto found_port; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index fa24d693af24..24cc8383fdd4 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -131,7 +131,8 @@ static const struct usb_device_id btusb_table[] = { { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM }, /* Broadcom BCM43142A0 (Foxconn/Lenovo) */ - { USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM }, + { USB_VENDOR_AND_INTERFACE_INFO(0x105b, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, /* Broadcom BCM920703 (HTC Vive) */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bb4, 0xff, 0x01, 0x01), @@ -268,6 +269,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, @@ -656,7 +658,8 @@ static void btusb_intr_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -745,7 +748,8 @@ static void btusb_bulk_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -840,7 +844,8 @@ static void btusb_isoc_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -952,7 +957,8 @@ static void btusb_diag_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -2896,7 +2902,8 @@ static int btusb_probe(struct usb_interface *intf, struct usb_device *udev = interface_to_usbdev(intf); /* Old firmware would otherwise let ath3k driver load - * patch and sysconfig files */ + * patch and sysconfig files + */ if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001) return -ENODEV; } @@ -3067,6 +3074,12 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + + /* QCA Rome devices lose their updated firmware over suspend, + * but the USB hub doesn't notice any status change. + * Explicitly request a device reset on resume. + */ + set_bit(BTUSB_RESET_RESUME, &data->flags); } #ifdef CONFIG_BT_HCIBTUSB_RTL @@ -3259,13 +3272,28 @@ static void play_deferred(struct btusb_data *data) int err; while ((urb = usb_get_from_anchor(&data->deferred))) { + usb_anchor_urb(urb, &data->tx_anchor); + err = usb_submit_urb(urb, GFP_ATOMIC); - if (err < 0) + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + BT_ERR("%s urb %p submission failed (%d)", + data->hdev->name, urb, -err); + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + usb_free_urb(urb); break; + } data->tx_in_flight++; + usb_free_urb(urb); + } + + /* Cleanup the rest deferred urbs. */ + while ((urb = usb_get_from_anchor(&data->deferred))) { + kfree(urb->setup_packet); + usb_free_urb(urb); } - usb_scuttle_anchored_urbs(&data->deferred); } static int btusb_resume(struct usb_interface *intf) diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index 85a3978b064f..5ef8000f90a9 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c @@ -93,8 +93,7 @@ static void st_reg_completion_cb(void *priv_data, int data) complete(&lhst->wait_reg_completion); } -/* Called by Shared Transport layer when receive data is - * available */ +/* Called by Shared Transport layer when receive data is available */ static long st_receive(void *priv_data, struct sk_buff *skb) { struct ti_st *lhst = priv_data; @@ -198,7 +197,8 @@ static int ti_st_open(struct hci_dev *hdev) } /* Is ST registration callback - * called with ERROR status? */ + * called with ERROR status? + */ if (hst->reg_status != 0) { BT_ERR("ST registration completed with invalid " "status %d", hst->reg_status); @@ -276,7 +276,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb) static int bt_ti_probe(struct platform_device *pdev) { - static struct ti_st *hst; + struct ti_st *hst; struct hci_dev *hdev; int err; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 6a662d0161b4..6b42372c53ef 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -176,7 +176,7 @@ static irqreturn_t bcm_host_wake(int irq, void *data) static int bcm_request_irq(struct bcm_data *bcm) { struct bcm_device *bdev = bcm->dev; - int err = 0; + int err; /* If this is not a platform device, do not enable PM functionalities */ mutex_lock(&bcm_device_lock); @@ -185,21 +185,23 @@ static int bcm_request_irq(struct bcm_data *bcm) goto unlock; } - if (bdev->irq > 0) { - err = devm_request_irq(&bdev->pdev->dev, bdev->irq, - bcm_host_wake, IRQF_TRIGGER_RISING, - "host_wake", bdev); - if (err) - goto unlock; + if (bdev->irq <= 0) { + err = -EOPNOTSUPP; + goto unlock; + } - device_init_wakeup(&bdev->pdev->dev, true); + err = devm_request_irq(&bdev->pdev->dev, bdev->irq, bcm_host_wake, + IRQF_TRIGGER_RISING, "host_wake", bdev); + if (err) + goto unlock; - pm_runtime_set_autosuspend_delay(&bdev->pdev->dev, - BCM_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(&bdev->pdev->dev); - pm_runtime_set_active(&bdev->pdev->dev); - pm_runtime_enable(&bdev->pdev->dev); - } + device_init_wakeup(&bdev->pdev->dev, true); + + pm_runtime_set_autosuspend_delay(&bdev->pdev->dev, + BCM_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&bdev->pdev->dev); + pm_runtime_set_active(&bdev->pdev->dev); + pm_runtime_enable(&bdev->pdev->dev); unlock: mutex_unlock(&bcm_device_lock); diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 4e328d7d47bb..3b82a87224a9 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -172,7 +172,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, const struct h4_recv_pkt *pkts, int pkts_count) { struct hci_uart *hu = hci_get_drvdata(hdev); - u8 alignment = hu->alignment; + u8 alignment = hu->alignment ? hu->alignment : 1; while (count) { int i, len; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 8397b716fa65..a746627e784e 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -457,7 +457,8 @@ static int hci_uart_tty_open(struct tty_struct *tty) BT_DBG("tty %p", tty); /* Error if the tty has no write op instead of leaving an exploitable - hole */ + * hole + */ if (tty->ops->write == NULL) return -EOPNOTSUPP; diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index c982943f0747..424c15aa7bb7 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -622,7 +622,8 @@ static int download_firmware(struct ll_device *lldev) cmd = (struct hci_command *)action_ptr; if (cmd->opcode == 0xff36) { /* ignore remote change - * baud rate HCI VS command */ + * baud rate HCI VS command + */ bt_dev_warn(lldev->hu.hdev, "change remote baud rate command in firmware"); break; } @@ -742,14 +743,8 @@ static int hci_ti_probe(struct serdev_device *serdev) static void hci_ti_remove(struct serdev_device *serdev) { struct ll_device *lldev = serdev_device_get_drvdata(serdev); - struct hci_uart *hu = &lldev->hu; - struct hci_dev *hdev = hu->hdev; - cancel_work_sync(&hu->write_work); - - hci_unregister_dev(hdev); - hci_free_dev(hdev); - hu->proto->close(hu); + hci_uart_unregister_device(&lldev->hu); } static const struct of_device_id hci_ti_of_match[] = { diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index 181a15b549e5..3539fd03f47e 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -767,16 +767,8 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev) static void nokia_bluetooth_serdev_remove(struct serdev_device *serdev) { struct nokia_bt_dev *btdev = serdev_device_get_drvdata(serdev); - struct hci_uart *hu = &btdev->hu; - struct hci_dev *hdev = hu->hdev; - cancel_work_sync(&hu->write_work); - - hci_unregister_dev(hdev); - hci_free_dev(hdev); - hu->proto->close(hu); - - pm_runtime_disable(&btdev->serdev->dev); + hci_uart_unregister_device(&btdev->hu); } static int nokia_bluetooth_runtime_suspend(struct device *dev) diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index aea930101dd2..b725ac4f7ff6 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -354,3 +354,16 @@ err_alloc: return err; } EXPORT_SYMBOL_GPL(hci_uart_register_device); + +void hci_uart_unregister_device(struct hci_uart *hu) +{ + struct hci_dev *hdev = hu->hdev; + + hci_unregister_dev(hdev); + hci_free_dev(hdev); + + cancel_work_sync(&hu->write_work); + + hu->proto->close(hu); +} +EXPORT_SYMBOL_GPL(hci_uart_unregister_device); diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index c6e9e1cf63f8..d9cd95d81149 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -112,6 +112,7 @@ struct hci_uart { int hci_uart_register_proto(const struct hci_uart_proto *p); int hci_uart_unregister_proto(const struct hci_uart_proto *p); int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p); +void hci_uart_unregister_device(struct hci_uart *hu); int hci_uart_tx_wakeup(struct hci_uart *hu); int hci_uart_init_ready(struct hci_uart *hu); diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig index 19982a4a9bba..18f5ed082f41 100644 --- a/drivers/infiniband/hw/bnxt_re/Kconfig +++ b/drivers/infiniband/hw/bnxt_re/Kconfig @@ -1,6 +1,7 @@ config INFINIBAND_BNXT_RE tristate "Broadcom Netxtreme HCA support" depends on ETHERNET && NETDEVICES && PCI && INET && DCB + depends on MAY_USE_DEVLINK select NET_VENDOR_BROADCOM select BNXT ---help--- diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 9ca691d6c13b..46c189ad8d94 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -55,7 +55,7 @@ struct capictr_event { /* ------------------------------------------------------------- */ -static struct capi_version driver_version = {2, 0, 1, 1 << 4}; +static const struct capi_version driver_version = {2, 0, 1, 1 << 4}; static char driver_serial[CAPI_SERIAL_LEN] = "0004711"; static char capi_manufakturer[64] = "AVM Berlin"; diff --git a/drivers/isdn/hardware/eicon/divacapi.h b/drivers/isdn/hardware/eicon/divacapi.h index a315a2914d70..c4868a0d82f4 100644 --- a/drivers/isdn/hardware/eicon/divacapi.h +++ b/drivers/isdn/hardware/eicon/divacapi.h @@ -26,15 +26,7 @@ /*#define DEBUG */ - - - - - - - - - +#include <linux/types.h> #define IMPLEMENT_DTMF 1 #define IMPLEMENT_LINE_INTERCONNECT2 1 @@ -82,8 +74,6 @@ #define CODEC_PERMANENT 0x02 #define ADV_VOICE 0x03 #define MAX_CIP_TYPES 5 /* kind of CIP types for group optimization */ -#define C_IND_MASK_DWORDS ((MAX_APPL + 32) >> 5) - #define FAX_CONNECT_INFO_BUFFER_SIZE 256 #define NCPI_BUFFER_SIZE 256 @@ -265,8 +255,8 @@ struct _PLCI { word ncci_ring_list; byte inc_dis_ncci_table[MAX_CHANNELS_PER_PLCI]; t_std_internal_command internal_command_queue[MAX_INTERNAL_COMMAND_LEVELS]; - dword c_ind_mask_table[C_IND_MASK_DWORDS]; - dword group_optimization_mask_table[C_IND_MASK_DWORDS]; + DECLARE_BITMAP(c_ind_mask_table, MAX_APPL); + DECLARE_BITMAP(group_optimization_mask_table, MAX_APPL); byte RBuffer[200]; dword msg_in_queue[MSG_IN_QUEUE_SIZE/sizeof(dword)]; API_SAVE saved_msg; diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index 3b11422b1cce..eadd1ed1e014 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -23,9 +23,7 @@ * */ - - - +#include <linux/bitmap.h> #include "platform.h" #include "di_defs.h" @@ -35,19 +33,9 @@ #include "mdm_msg.h" #include "divasync.h" - - #define FILE_ "MESSAGE.C" #define dprintf - - - - - - - - /*------------------------------------------------------------------*/ /* This is options supported for all adapters that are server by */ /* XDI driver. Allo it is not necessary to ask it from every adapter*/ @@ -72,9 +60,6 @@ static dword diva_xdi_extended_features = 0; /*------------------------------------------------------------------*/ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci); -static void set_group_ind_mask(PLCI *plci); -static void clear_group_ind_mask_bit(PLCI *plci, word b); -static byte test_group_ind_mask_bit(PLCI *plci, word b); void AutomaticLaw(DIVA_CAPI_ADAPTER *); word CapiRelease(word); word CapiRegister(word); @@ -1087,106 +1072,6 @@ static void plci_remove(PLCI *plci) } /*------------------------------------------------------------------*/ -/* Application Group function helpers */ -/*------------------------------------------------------------------*/ - -static void set_group_ind_mask(PLCI *plci) -{ - word i; - - for (i = 0; i < C_IND_MASK_DWORDS; i++) - plci->group_optimization_mask_table[i] = 0xffffffffL; -} - -static void clear_group_ind_mask_bit(PLCI *plci, word b) -{ - plci->group_optimization_mask_table[b >> 5] &= ~(1L << (b & 0x1f)); -} - -static byte test_group_ind_mask_bit(PLCI *plci, word b) -{ - return ((plci->group_optimization_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0); -} - -/*------------------------------------------------------------------*/ -/* c_ind_mask operations for arbitrary MAX_APPL */ -/*------------------------------------------------------------------*/ - -static void clear_c_ind_mask(PLCI *plci) -{ - word i; - - for (i = 0; i < C_IND_MASK_DWORDS; i++) - plci->c_ind_mask_table[i] = 0; -} - -static byte c_ind_mask_empty(PLCI *plci) -{ - word i; - - i = 0; - while ((i < C_IND_MASK_DWORDS) && (plci->c_ind_mask_table[i] == 0)) - i++; - return (i == C_IND_MASK_DWORDS); -} - -static void set_c_ind_mask_bit(PLCI *plci, word b) -{ - plci->c_ind_mask_table[b >> 5] |= (1L << (b & 0x1f)); -} - -static void clear_c_ind_mask_bit(PLCI *plci, word b) -{ - plci->c_ind_mask_table[b >> 5] &= ~(1L << (b & 0x1f)); -} - -static byte test_c_ind_mask_bit(PLCI *plci, word b) -{ - return ((plci->c_ind_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0); -} - -static void dump_c_ind_mask(PLCI *plci) -{ - word i, j, k; - dword d; - char *p; - char buf[40]; - - for (i = 0; i < C_IND_MASK_DWORDS; i += 4) - { - p = buf + 36; - *p = '\0'; - for (j = 0; j < 4; j++) - { - if (i + j < C_IND_MASK_DWORDS) - { - d = plci->c_ind_mask_table[i + j]; - for (k = 0; k < 8; k++) - { - *(--p) = hex_asc_lo(d); - d >>= 4; - } - } - else if (i != 0) - { - for (k = 0; k < 8; k++) - *(--p) = ' '; - } - *(--p) = ' '; - } - dbug(1, dprintf("c_ind_mask =%s", (char *) p)); - } -} - - - - - -#define dump_plcis(a) - - - -/*------------------------------------------------------------------*/ /* translation function for each message */ /*------------------------------------------------------------------*/ @@ -1457,13 +1342,13 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, return 1; } else if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); - dump_c_ind_mask(plci); + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); Reject = GET_WORD(parms[0].info); dbug(1, dprintf("Reject=0x%x", Reject)); if (Reject) { - if (c_ind_mask_empty(plci)) + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if ((Reject & 0xff00) == 0x3400) { @@ -1553,11 +1438,8 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, sig_req(plci, CALL_RES, 0); } - for (i = 0; i < max_appl; i++) { - if (test_c_ind_mask_bit(plci, i)) { - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); - } - } + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); } } return 1; @@ -1584,13 +1466,10 @@ static byte disconnect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, { if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); plci->appl = appl; - for (i = 0; i < max_appl; i++) - { - if (test_c_ind_mask_bit(plci, i)) - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); - } + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); plci->State = OUTG_DIS_PENDING; } if (plci->Sig.Id && plci->appl) @@ -1634,7 +1513,7 @@ static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, { /* clear ind mask bit, just in case of collsion of */ /* DISCONNECT_IND and CONNECT_RES */ - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); ncci_free_receive_buffers(plci, 0); if (plci_remove_check(plci)) { @@ -1642,7 +1521,7 @@ static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, } if (plci->State == INC_DIS_PENDING || plci->State == SUSPENDING) { - if (c_ind_mask_empty(plci)) { + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if (plci->State != SUSPENDING) plci->State = IDLE; dbug(1, dprintf("chs=%d", plci->channels)); if (!plci->channels) { @@ -3351,13 +3230,11 @@ static byte select_b_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, } plci->State = INC_CON_CONNECTED_ALERT; plci->appl = appl; - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); - dump_c_ind_mask(plci); - for (i = 0; i < max_appl; i++) /* disconnect the other appls */ - { /* its quasi a connect */ - if (test_c_ind_mask_bit(plci, i)) - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); - } + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); + /* disconnect the other appls its quasi a connect */ + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); } api_save_msg(msg, "s", &plci->saved_msg); @@ -5692,19 +5569,17 @@ static void sig_ind(PLCI *plci) cip = find_cip(a, parms[4], parms[6]); cip_mask = 1L << cip; dbug(1, dprintf("cip=%d,cip_mask=%lx", cip, cip_mask)); - clear_c_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); if (!remove_started && !a->adapter_disabled) { - set_c_ind_mask_bit(plci, MAX_APPL); group_optimization(a, plci); - for (i = 0; i < max_appl; i++) { + for_each_set_bit(i, plci->group_optimization_mask_table, max_appl) { if (application[i].Id && (a->CIP_Mask[i] & 1 || a->CIP_Mask[i] & cip_mask) - && CPN_filter_ok(parms[0], a, i) - && test_group_ind_mask_bit(plci, i)) { + && CPN_filter_ok(parms[0], a, i)) { dbug(1, dprintf("storedcip_mask[%d]=0x%lx", i, a->CIP_Mask[i])); - set_c_ind_mask_bit(plci, i); - dump_c_ind_mask(plci); + __set_bit(i, plci->c_ind_mask_table); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); plci->State = INC_CON_PENDING; plci->call_dir = (plci->call_dir & ~(CALL_DIR_OUT | CALL_DIR_ORIGINATE)) | CALL_DIR_IN | CALL_DIR_ANSWER; @@ -5750,10 +5625,9 @@ static void sig_ind(PLCI *plci) SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, true)); } } - clear_c_ind_mask_bit(plci, MAX_APPL); - dump_c_ind_mask(plci); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); } - if (c_ind_mask_empty(plci)) { + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { sig_req(plci, HANGUP, 0); send_req(plci); plci->State = IDLE; @@ -5994,13 +5868,13 @@ static void sig_ind(PLCI *plci) break; case RESUME: - clear_c_ind_mask_bit(plci, (word)(plci->appl->Id - 1)); + __clear_bit(plci->appl->Id - 1, plci->c_ind_mask_table); PUT_WORD(&resume_cau[4], GOOD); sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau); break; case SUSPEND: - clear_c_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); if (plci->NL.Id && !plci->nl_remove_id) { mixer_remove(plci); @@ -6037,15 +5911,12 @@ static void sig_ind(PLCI *plci) if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - for (i = 0; i < max_appl; i++) - { - if (test_c_ind_mask_bit(plci, i)) - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); - } + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); } else { - clear_c_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); } if (!plci->appl) { @@ -6055,7 +5926,7 @@ static void sig_ind(PLCI *plci) a->listen_active--; } plci->State = INC_DIS_PENDING; - if (c_ind_mask_empty(plci)) + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { plci->State = IDLE; if (plci->NL.Id && !plci->nl_remove_id) @@ -6341,14 +6212,10 @@ static void SendInfo(PLCI *plci, dword Id, byte **parms, byte iesent) || Info_Number == DSP || Info_Number == UUI) { - for (j = 0; j < max_appl; j++) - { - if (test_c_ind_mask_bit(plci, j)) - { - dbug(1, dprintf("Ovl_Ind")); - iesent = true; - sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element); - } + for_each_set_bit(j, plci->c_ind_mask_table, max_appl) { + dbug(1, dprintf("Ovl_Ind")); + iesent = true; + sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element); } } } /* all other signalling states */ @@ -6416,14 +6283,10 @@ static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type, } else if (!plci->appl && Info_Number) { /* overlap receiving broadcast */ - for (j = 0; j < max_appl; j++) - { - if (test_c_ind_mask_bit(plci, j)) - { - iesent = true; - dbug(1, dprintf("Mlt_Ovl_Ind")); - sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element); - } + for_each_set_bit(j, plci->c_ind_mask_table, max_appl) { + iesent = true; + dbug(1, dprintf("Mlt_Ovl_Ind")); + sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element); } } /* all other signalling states */ else if (Info_Number @@ -7270,7 +7133,6 @@ static word get_plci(DIVA_CAPI_ADAPTER *a) word i, j; PLCI *plci; - dump_plcis(a); for (i = 0; i < a->max_plci && a->plci[i].Id; i++); if (i == a->max_plci) { dbug(1, dprintf("get_plci: out of PLCIs")); @@ -7321,8 +7183,8 @@ static word get_plci(DIVA_CAPI_ADAPTER *a) plci->ncci_ring_list = 0; for (j = 0; j < MAX_CHANNELS_PER_PLCI; j++) plci->inc_dis_ncci_table[j] = 0; - clear_c_ind_mask(plci); - set_group_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); + bitmap_fill(plci->group_optimization_mask_table, MAX_APPL); plci->fax_connect_info_length = 0; plci->nsf_control_bits = 0; plci->ncpi_state = 0x00; @@ -9373,10 +9235,10 @@ word CapiRelease(word Id) if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - if (test_c_ind_mask_bit(plci, (word)(Id - 1))) + if (test_bit(Id - 1, plci->c_ind_mask_table)) { - clear_c_ind_mask_bit(plci, (word)(Id - 1)); - if (c_ind_mask_empty(plci)) + __clear_bit(Id - 1, plci->c_ind_mask_table); + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { sig_req(plci, HANGUP, 0); send_req(plci); @@ -9384,10 +9246,10 @@ word CapiRelease(word Id) } } } - if (test_c_ind_mask_bit(plci, (word)(Id - 1))) + if (test_bit(Id - 1, plci->c_ind_mask_table)) { - clear_c_ind_mask_bit(plci, (word)(Id - 1)); - if (c_ind_mask_empty(plci)) + __clear_bit(Id - 1, plci->c_ind_mask_table); + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if (!plci->appl) { @@ -9452,7 +9314,7 @@ word CapiRelease(word Id) static word plci_remove_check(PLCI *plci) { if (!plci) return true; - if (!plci->NL.Id && c_ind_mask_empty(plci)) + if (!plci->NL.Id && bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if (plci->Sig.Id == 0xff) plci->Sig.Id = 0; @@ -14735,7 +14597,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) word appl_number_group_type[MAX_APPL]; PLCI *auxplci; - set_group_ind_mask(plci); /* all APPLs within this inc. call are allowed to dial in */ + /* all APPLs within this inc. call are allowed to dial in */ + bitmap_fill(plci->group_optimization_mask_table, MAX_APPL); if (!a->group_optimization_enabled) { @@ -14771,13 +14634,12 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) if (a->plci[k].Id) { auxplci = &a->plci[k]; - if (auxplci->appl == &application[i]) /* application has a busy PLCI */ - { + if (auxplci->appl == &application[i]) { + /* application has a busy PLCI */ busy = true; dbug(1, dprintf("Appl 0x%x is busy", i + 1)); - } - else if (test_c_ind_mask_bit(auxplci, i)) /* application has an incoming call pending */ - { + } else if (test_bit(i, plci->c_ind_mask_table)) { + /* application has an incoming call pending */ busy = true; dbug(1, dprintf("Appl 0x%x has inc. call pending", i + 1)); } @@ -14826,7 +14688,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) if (appl_number_group_type[i] == appl_number_group_type[j]) { dbug(1, dprintf("Appl 0x%x is member of group 0x%x, no call", j + 1, appl_number_group_type[j])); - clear_group_ind_mask_bit(plci, j); /* disable call on other group members */ + /* disable call on other group members */ + __clear_bit(j, plci->group_optimization_mask_table); appl_number_group_type[j] = 0; /* remove disabled group member from group list */ } } @@ -14834,7 +14697,7 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) } else /* application should not get a call */ { - clear_group_ind_mask_bit(plci, i); + __clear_bit(i, plci->group_optimization_mask_table); } } diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h index 4157311d569d..5f8f1d9cac11 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.h +++ b/drivers/isdn/hardware/mISDN/hfcsusb.h @@ -337,7 +337,7 @@ static const char *HFC_NT_LAYER1_STATES[HFC_MAX_NT_LAYER1_STATE + 1] = { }; /* supported devices */ -static struct usb_device_id hfcsusb_idtab[] = { +static const struct usb_device_id hfcsusb_idtab[] = { { USB_DEVICE(0x0959, 0x2bd0), .driver_info = (unsigned long) &((struct hfcsusb_vdata) diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c index ef4748083efd..e8212185d386 100644 --- a/drivers/isdn/hisax/hfc_usb.c +++ b/drivers/isdn/hisax/hfc_usb.c @@ -65,7 +65,7 @@ typedef struct { } hfcsusb_vdata; /* VID/PID device list */ -static struct usb_device_id hfcusb_idtab[] = { +static const struct usb_device_id hfcusb_idtab[] = { { USB_DEVICE(0x0959, 0x2bd0), .driver_info = (unsigned long) &((hfcsusb_vdata) diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index a306de4318d7..9375cef22420 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -311,9 +311,7 @@ module_param(ipddp_mode, int, 0); static int __init ipddp_init_module(void) { dev_ipddp = ipddp_init(); - if (IS_ERR(dev_ipddp)) - return PTR_ERR(dev_ipddp); - return 0; + return PTR_ERR_OR_ZERO(dev_ipddp); } static void __exit ipddp_cleanup_module(void) diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h index cbb4f8566bbe..d09b2b46ab63 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -20,7 +20,7 @@ #include <linux/if_arcnet.h> #ifdef __KERNEL__ -#include <linux/irqreturn.h> +#include <linux/interrupt.h> /* * RECON_THRESHOLD is the maximum number of RECON messages to receive diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 01cab9548785..eb7f76753c9c 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -109,7 +109,7 @@ static struct attribute *com20020_state_attrs[] = { NULL, }; -static struct attribute_group com20020_state_group = { +static const struct attribute_group com20020_state_group = { .name = NULL, .attrs = com20020_state_attrs, }; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 770623a0cc01..040b493f60ae 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -759,7 +759,7 @@ static struct attribute *per_bond_attrs[] = { NULL, }; -static struct attribute_group bonding_group = { +static const struct attribute_group bonding_group = { .name = "bonding", .attrs = per_bond_attrs, }; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 0e0df0ba288c..f37ce0e1b603 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1232,7 +1232,7 @@ static struct attribute *at91_sysfs_attrs[] = { NULL, }; -static struct attribute_group at91_sysfs_attr_group = { +static const struct attribute_group at91_sysfs_attr_group = { .attrs = at91_sysfs_attrs, }; diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 2ba1a81500c1..12a53c8e8e1d 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1875,7 +1875,7 @@ static struct attribute *ican3_sysfs_attrs[] = { NULL, }; -static struct attribute_group ican3_sysfs_attr_group = { +static const struct attribute_group ican3_sysfs_attr_group = { .attrs = ican3_sysfs_attrs, }; diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 7f36d3e3c98b..274f3679f33d 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1053,49 +1053,6 @@ int b53_vlan_del(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_vlan_del); -int b53_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct b53_device *dev = ds->priv; - u16 vid, vid_start = 0, pvid; - struct b53_vlan *vl; - int err = 0; - - if (is5325(dev) || is5365(dev)) - vid_start = 1; - - b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); - - /* Use our software cache for dumps, since we do not have any HW - * operation returning only the used/valid VLANs - */ - for (vid = vid_start; vid < dev->num_vlans; vid++) { - vl = &dev->vlans[vid]; - - if (!vl->valid) - continue; - - if (!(vl->members & BIT(port))) - continue; - - vlan->vid_begin = vlan->vid_end = vid; - vlan->flags = 0; - - if (vl->untag & BIT(port)) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - if (pvid == vid) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } - - return err; -} -EXPORT_SYMBOL(b53_vlan_dump); - /* Address Resolution Logic routines */ static int b53_arl_op_wait(struct b53_device *dev) { @@ -1213,9 +1170,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, return b53_arl_rw_op(dev, 0); } -int b53_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +int b53_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) { struct b53_device *priv = ds->priv; @@ -1225,27 +1181,16 @@ int b53_fdb_prepare(struct dsa_switch *ds, int port, if (is5325(priv) || is5365(priv)) return -EOPNOTSUPP; - return 0; -} -EXPORT_SYMBOL(b53_fdb_prepare); - -void b53_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - struct b53_device *priv = ds->priv; - - if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true)) - pr_err("%s: failed to add MAC address\n", __func__); + return b53_arl_op(priv, 0, port, addr, vid, true); } EXPORT_SYMBOL(b53_fdb_add); int b53_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct b53_device *priv = ds->priv; - return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false); + return b53_arl_op(priv, 0, port, addr, vid, false); } EXPORT_SYMBOL(b53_fdb_del); @@ -1282,8 +1227,7 @@ static void b53_arl_search_rd(struct b53_device *dev, u8 idx, } static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { if (!ent->is_valid) return 0; @@ -1291,16 +1235,11 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, if (port != ent->port) return 0; - ether_addr_copy(fdb->addr, ent->mac); - fdb->vid = ent->vid; - fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE; - - return cb(&fdb->obj); + return cb(ent->mac, ent->vid, ent->is_static, data); } int b53_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct b53_device *priv = ds->priv; struct b53_arl_entry results[2]; @@ -1318,13 +1257,13 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, return ret; b53_arl_search_rd(priv, 0, &results[0]); - ret = b53_fdb_copy(port, &results[0], fdb, cb); + ret = b53_fdb_copy(port, &results[0], cb, data); if (ret) return ret; if (priv->num_arl_entries > 2) { b53_arl_search_rd(priv, 1, &results[1]); - ret = b53_fdb_copy(port, &results[1], fdb, cb); + ret = b53_fdb_copy(port, &results[1], cb, data); if (ret) return ret; @@ -1564,8 +1503,6 @@ static const struct dsa_switch_ops b53_switch_ops = { .port_vlan_prepare = b53_vlan_prepare, .port_vlan_add = b53_vlan_add, .port_vlan_del = b53_vlan_del, - .port_vlan_dump = b53_vlan_dump, - .port_fdb_prepare = b53_fdb_prepare, .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 155a9c48c317..01bd8cbe9a3f 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -393,20 +393,12 @@ void b53_vlan_add(struct dsa_switch *ds, int port, struct switchdev_trans *trans); int b53_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); -int b53_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb); -int b53_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); -void b53_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); +int b53_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid); int b53_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb); + const unsigned char *addr, u16 vid); int b53_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb); + dsa_fdb_dump_cb_t *cb, void *data); int b53_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress); void b53_mirror_del(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 648f91b58d1e..bbcb4053e04e 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -327,12 +327,8 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) { - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_eee *p = &priv->port_sts[port].eee; int ret; - p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); - ret = phy_init_eee(phy, 0); if (ret) return 0; @@ -342,8 +338,8 @@ static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, return 1; } -static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) +static int bcm_sf2_sw_get_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; @@ -356,22 +352,14 @@ static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, return 0; } -static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, - struct phy_device *phydev, - struct ethtool_eee *e) +static int bcm_sf2_sw_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; p->eee_enabled = e->eee_enabled; - - if (!p->eee_enabled) { - bcm_sf2_eee_enable_set(ds, port, false); - } else { - p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); - if (!p->eee_enabled) - return -EOPNOTSUPP; - } + bcm_sf2_eee_enable_set(ds, port, e->eee_enabled); return 0; } @@ -800,7 +788,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = ds->dst[ds->index].cpu_dp->netdev; + struct net_device *p = ds->dst->cpu_dp->netdev; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_wolinfo pwol; @@ -823,7 +811,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = ds->dst[ds->index].cpu_dp->netdev; + struct net_device *p = ds->dst->cpu_dp->netdev; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->dst->cpu_dp->index; struct ethtool_wolinfo pwol; @@ -1023,8 +1011,8 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .set_wol = bcm_sf2_sw_set_wol, .port_enable = bcm_sf2_port_setup, .port_disable = bcm_sf2_port_disable, - .get_eee = bcm_sf2_sw_get_eee, - .set_eee = bcm_sf2_sw_set_eee, + .get_mac_eee = bcm_sf2_sw_get_mac_eee, + .set_mac_eee = bcm_sf2_sw_set_mac_eee, .port_bridge_join = b53_br_join, .port_bridge_leave = b53_br_leave, .port_stp_state_set = b53_br_set_stp_state, @@ -1033,8 +1021,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .port_vlan_prepare = b53_vlan_prepare, .port_vlan_add = b53_vlan_add, .port_vlan_del = b53_vlan_del, - .port_vlan_dump = b53_vlan_dump, - .port_fdb_prepare = b53_fdb_prepare, .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index fdd8f3872102..7819a9fe8321 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -257,44 +257,7 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, return 0; } -static int dsa_loop_port_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct dsa_loop_priv *ps = ds->priv; - struct mii_bus *bus = ps->bus; - struct dsa_loop_vlan *vl; - u16 vid, vid_start = 0; - int err = 0; - - dev_dbg(ds->dev, "%s\n", __func__); - - /* Just do a sleeping operation to make lockdep checks effective */ - mdiobus_read(bus, ps->port_base + port, MII_BMSR); - - for (vid = vid_start; vid < DSA_LOOP_VLANS; vid++) { - vl = &ps->vlans[vid]; - - if (!(vl->members & BIT(port))) - continue; - - vlan->vid_begin = vlan->vid_end = vid; - vlan->flags = 0; - - if (vl->untagged & BIT(port)) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - if (ps->pvid == vid) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } - - return err; -} - -static struct dsa_switch_ops dsa_loop_driver = { +static const struct dsa_switch_ops dsa_loop_driver = { .get_tag_protocol = dsa_loop_get_protocol, .setup = dsa_loop_setup, .get_strings = dsa_loop_get_strings, @@ -310,7 +273,6 @@ static struct dsa_switch_ops dsa_loop_driver = { .port_vlan_prepare = dsa_loop_port_vlan_prepare, .port_vlan_add = dsa_loop_port_vlan_add, .port_vlan_del = dsa_loop_port_vlan_del, - .port_vlan_dump = dsa_loop_port_vlan_dump, }; static int dsa_loop_drv_probe(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index cd76e61f1fca..b471413d3df9 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -20,6 +20,11 @@ #include "lan9303.h" +#define LAN9303_NUM_PORTS 3 + +/* 13.2 System Control and Status Registers + * Multiply register number by 4 to get address offset. + */ #define LAN9303_CHIP_REV 0x14 # define LAN9303_CHIP_ID 0x9303 #define LAN9303_IRQ_CFG 0x15 @@ -53,6 +58,9 @@ #define LAN9303_VIRT_PHY_BASE 0x70 #define LAN9303_VIRT_SPECIAL_CTRL 0x77 +/*13.4 Switch Fabric Control and Status Registers + * Accessed indirectly via SWITCH_CSR_CMD, SWITCH_CSR_DATA. + */ #define LAN9303_SW_DEV_ID 0x0000 #define LAN9303_SW_RESET 0x0001 #define LAN9303_SW_RESET_RESET BIT(0) @@ -153,9 +161,7 @@ # define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT1 (BIT(9) | BIT(8)) # define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0 (BIT(1) | BIT(0)) -#define LAN9303_PORT_0_OFFSET 0x400 -#define LAN9303_PORT_1_OFFSET 0x800 -#define LAN9303_PORT_2_OFFSET 0xc00 +#define LAN9303_SWITCH_PORT_REG(port, reg0) (0x400 * (port) + (reg0)) /* the built-in PHYs are of type LAN911X */ #define MII_LAN911X_SPECIAL_MODES 0x12 @@ -242,7 +248,7 @@ static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val) return regmap_write(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, val); } -static int lan9303_port_phy_reg_wait_for_completion(struct lan9303 *chip) +static int lan9303_indirect_phy_wait_for_completion(struct lan9303 *chip) { int ret, i; u32 reg; @@ -262,7 +268,7 @@ static int lan9303_port_phy_reg_wait_for_completion(struct lan9303 *chip) return -EIO; } -static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum) +static int lan9303_indirect_phy_read(struct lan9303 *chip, int addr, int regnum) { int ret; u32 val; @@ -272,7 +278,7 @@ static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum) mutex_lock(&chip->indirect_mutex); - ret = lan9303_port_phy_reg_wait_for_completion(chip); + ret = lan9303_indirect_phy_wait_for_completion(chip); if (ret) goto on_error; @@ -281,7 +287,7 @@ static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum) if (ret) goto on_error; - ret = lan9303_port_phy_reg_wait_for_completion(chip); + ret = lan9303_indirect_phy_wait_for_completion(chip); if (ret) goto on_error; @@ -299,8 +305,8 @@ on_error: return ret; } -static int lan9303_phy_reg_write(struct lan9303 *chip, int addr, int regnum, - unsigned int val) +static int lan9303_indirect_phy_write(struct lan9303 *chip, int addr, + int regnum, u16 val) { int ret; u32 reg; @@ -311,7 +317,7 @@ static int lan9303_phy_reg_write(struct lan9303 *chip, int addr, int regnum, mutex_lock(&chip->indirect_mutex); - ret = lan9303_port_phy_reg_wait_for_completion(chip); + ret = lan9303_indirect_phy_wait_for_completion(chip); if (ret) goto on_error; @@ -328,6 +334,12 @@ on_error: return ret; } +const struct lan9303_phy_ops lan9303_indirect_phy_ops = { + .phy_read = lan9303_indirect_phy_read, + .phy_write = lan9303_indirect_phy_write, +}; +EXPORT_SYMBOL_GPL(lan9303_indirect_phy_ops); + static int lan9303_switch_wait_for_completion(struct lan9303 *chip) { int ret, i; @@ -416,6 +428,20 @@ on_error: return ret; } +static int lan9303_write_switch_port(struct lan9303 *chip, int port, + u16 regnum, u32 val) +{ + return lan9303_write_switch_reg( + chip, LAN9303_SWITCH_PORT_REG(port, regnum), val); +} + +static int lan9303_read_switch_port(struct lan9303 *chip, int port, + u16 regnum, u32 *val) +{ + return lan9303_read_switch_reg( + chip, LAN9303_SWITCH_PORT_REG(port, regnum), val); +} + static int lan9303_detect_phy_setup(struct lan9303 *chip) { int reg; @@ -427,14 +453,15 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) * Special reg 18 of phy 3 reads as 0x0000, if 'phy_addr_sel_strap' is 0 * and the IDs are 0-1-2, else it contains something different from * 0x0000, which means 'phy_addr_sel_strap' is 1 and the IDs are 1-2-3. + * 0xffff is returned on MDIO read with no response. */ - reg = lan9303_port_phy_reg_read(chip, 3, MII_LAN911X_SPECIAL_MODES); + reg = chip->ops->phy_read(chip, 3, MII_LAN911X_SPECIAL_MODES); if (reg < 0) { dev_err(chip->dev, "Failed to detect phy config: %d\n", reg); return reg; } - if (reg != 0) + if ((reg != 0) && (reg != 0xffff)) chip->phy_addr_sel_strap = 1; else chip->phy_addr_sel_strap = 0; @@ -445,40 +472,37 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) return 0; } -#define LAN9303_MAC_RX_CFG_OFFS (LAN9303_MAC_RX_CFG_0 - LAN9303_PORT_0_OFFSET) -#define LAN9303_MAC_TX_CFG_OFFS (LAN9303_MAC_TX_CFG_0 - LAN9303_PORT_0_OFFSET) - -static int lan9303_disable_packet_processing(struct lan9303 *chip, - unsigned int port) +static int lan9303_disable_processing_port(struct lan9303 *chip, + unsigned int port) { int ret; /* disable RX, but keep register reset default values else */ - ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port, - LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES); + ret = lan9303_write_switch_port(chip, port, LAN9303_MAC_RX_CFG_0, + LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES); if (ret) return ret; /* disable TX, but keep register reset default values else */ - return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port, + return lan9303_write_switch_port(chip, port, LAN9303_MAC_TX_CFG_0, LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT | LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE); } -static int lan9303_enable_packet_processing(struct lan9303 *chip, - unsigned int port) +static int lan9303_enable_processing_port(struct lan9303 *chip, + unsigned int port) { int ret; /* enable RX and keep register reset default values else */ - ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port, - LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES | - LAN9303_MAC_RX_CFG_X_RX_ENABLE); + ret = lan9303_write_switch_port(chip, port, LAN9303_MAC_RX_CFG_0, + LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES | + LAN9303_MAC_RX_CFG_X_RX_ENABLE); if (ret) return ret; /* enable TX and keep register reset default values else */ - return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port, + return lan9303_write_switch_port(chip, port, LAN9303_MAC_TX_CFG_0, LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT | LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE | LAN9303_MAC_TX_CFG_X_TX_ENABLE); @@ -543,15 +567,16 @@ static int lan9303_handle_reset(struct lan9303 *chip) /* stop processing packets for all ports */ static int lan9303_disable_processing(struct lan9303 *chip) { - int ret; + int p; - ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_0_OFFSET); - if (ret) - return ret; - ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET); - if (ret) - return ret; - return lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET); + for (p = 0; p < LAN9303_NUM_PORTS; p++) { + int ret = lan9303_disable_processing_port(chip, p); + + if (ret) + return ret; + } + + return 0; } static int lan9303_check_device(struct lan9303 *chip) @@ -621,7 +646,7 @@ static int lan9303_setup(struct dsa_switch *ds) if (ret) dev_err(chip->dev, "failed to separate ports %d\n", ret); - ret = lan9303_enable_packet_processing(chip, LAN9303_PORT_0_OFFSET); + ret = lan9303_enable_processing_port(chip, 0); if (ret) dev_err(chip->dev, "failed to re-enable switching %d\n", ret); @@ -687,19 +712,18 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { struct lan9303 *chip = ds->priv; - u32 reg; - unsigned int u, poff; - int ret; - - poff = port * 0x400; + unsigned int u; for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) { - ret = lan9303_read_switch_reg(chip, - lan9303_mib[u].offset + poff, - ®); + u32 reg; + int ret; + + ret = lan9303_read_switch_port( + chip, port, lan9303_mib[u].offset, ®); + if (ret) - dev_warn(chip->dev, "Reading status reg %u failed\n", - lan9303_mib[u].offset + poff); + dev_warn(chip->dev, "Reading status port %d reg %u failed\n", + port, lan9303_mib[u].offset); data[u] = reg; } } @@ -719,7 +743,7 @@ static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum) if (phy > phy_base + 2) return -ENODEV; - return lan9303_port_phy_reg_read(chip, phy, regnum); + return chip->ops->phy_read(chip, phy, regnum); } static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, @@ -733,7 +757,7 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, if (phy > phy_base + 2) return -ENODEV; - return lan9303_phy_reg_write(chip, phy, regnum, val); + return chip->ops->phy_write(chip, phy, regnum, val); } static int lan9303_port_enable(struct dsa_switch *ds, int port, @@ -744,11 +768,8 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port, /* enable internal packet processing */ switch (port) { case 1: - return lan9303_enable_packet_processing(chip, - LAN9303_PORT_1_OFFSET); case 2: - return lan9303_enable_packet_processing(chip, - LAN9303_PORT_2_OFFSET); + return lan9303_enable_processing_port(chip, port); default: dev_dbg(chip->dev, "Error: request to power up invalid port %d\n", port); @@ -765,14 +786,10 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, /* disable internal packet processing */ switch (port) { case 1: - lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET); - lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 1, - MII_BMCR, BMCR_PDOWN); - break; case 2: - lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET); - lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 2, - MII_BMCR, BMCR_PDOWN); + lan9303_disable_processing_port(chip, port); + lan9303_phy_write(ds, chip->phy_addr_sel_strap + port, + MII_BMCR, BMCR_PDOWN); break; default: dev_dbg(chip->dev, @@ -780,7 +797,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, } } -static struct dsa_switch_ops lan9303_switch_ops = { +static const struct dsa_switch_ops lan9303_switch_ops = { .get_tag_protocol = lan9303_get_tag_protocol, .setup = lan9303_setup, .get_strings = lan9303_get_strings, @@ -794,7 +811,7 @@ static struct dsa_switch_ops lan9303_switch_ops = { static int lan9303_register_switch(struct lan9303 *chip) { - chip->ds = dsa_switch_alloc(chip->dev, DSA_MAX_PORTS); + chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS); if (!chip->ds) return -ENOMEM; diff --git a/drivers/net/dsa/lan9303.h b/drivers/net/dsa/lan9303.h index d1512dad2d90..4d8be555ff4d 100644 --- a/drivers/net/dsa/lan9303.h +++ b/drivers/net/dsa/lan9303.h @@ -2,6 +2,15 @@ #include <linux/device.h> #include <net/dsa.h> +struct lan9303; + +struct lan9303_phy_ops { + /* PHY 1 and 2 access*/ + int (*phy_read)(struct lan9303 *chip, int port, int regnum); + int (*phy_write)(struct lan9303 *chip, int port, + int regnum, u16 val); +}; + struct lan9303 { struct device *dev; struct regmap *regmap; @@ -11,9 +20,11 @@ struct lan9303 { bool phy_addr_sel_strap; struct dsa_switch *ds; struct mutex indirect_mutex; /* protect indexed register access */ + const struct lan9303_phy_ops *ops; }; extern const struct regmap_access_table lan9303_register_set; +extern const struct lan9303_phy_ops lan9303_indirect_phy_ops; int lan9303_probe(struct lan9303 *chip, struct device_node *np); int lan9303_remove(struct lan9303 *chip); diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c index ab3ce0da5071..24ec20f7f444 100644 --- a/drivers/net/dsa/lan9303_i2c.c +++ b/drivers/net/dsa/lan9303_i2c.c @@ -63,6 +63,8 @@ static int lan9303_i2c_probe(struct i2c_client *client, i2c_set_clientdata(client, sw_dev); sw_dev->chip.dev = &client->dev; + sw_dev->chip.ops = &lan9303_indirect_phy_ops; + ret = lan9303_probe(&sw_dev->chip, client->dev.of_node); if (ret != 0) return ret; diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c index 93c36c0541cf..fc16668a487f 100644 --- a/drivers/net/dsa/lan9303_mdio.c +++ b/drivers/net/dsa/lan9303_mdio.c @@ -40,6 +40,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val) { struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; + reg <<= 2; /* reg num to offset */ mutex_lock(&sw_dev->device->bus->mdio_lock); lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff); lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff); @@ -57,6 +58,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val) { struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; + reg <<= 2; /* reg num to offset */ mutex_lock(&sw_dev->device->bus->mdio_lock); *val = lan9303_mdio_real_read(sw_dev->device, reg); *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16); @@ -65,6 +67,25 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val) return 0; } +int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg, u16 val) +{ + struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev); + + return mdiobus_write_nested(sw_dev->device->bus, phy, reg, val); +} + +int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg) +{ + struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev); + + return mdiobus_read_nested(sw_dev->device->bus, phy, reg); +} + +static const struct lan9303_phy_ops lan9303_mdio_phy_ops = { + .phy_read = lan9303_mdio_phy_read, + .phy_write = lan9303_mdio_phy_write, +}; + static const struct regmap_config lan9303_mdio_regmap_config = { .reg_bits = 8, .val_bits = 32, @@ -106,6 +127,8 @@ static int lan9303_mdio_probe(struct mdio_device *mdiodev) dev_set_drvdata(&mdiodev->dev, sw_dev); sw_dev->chip.dev = &mdiodev->dev; + sw_dev->chip.ops = &lan9303_mdio_phy_ops; + ret = lan9303_probe(&sw_dev->chip, mdiodev->dev.of_node); if (ret != 0) return ret; diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index b313ecdf2919..56cd6d365352 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -638,55 +638,6 @@ static int ksz_port_vlan_del(struct dsa_switch *ds, int port, return 0; } -static int ksz_port_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct ksz_device *dev = ds->priv; - u16 vid; - u16 data; - struct vlan_table *vlan_cache; - int err = 0; - - mutex_lock(&dev->vlan_mutex); - - /* use dev->vlan_cache due to lack of searching valid vlan entry */ - for (vid = vlan->vid_begin; vid < dev->num_vlans; vid++) { - vlan_cache = &dev->vlan_cache[vid]; - - if (!(vlan_cache->table[0] & VLAN_VALID)) - continue; - - vlan->vid_begin = vid; - vlan->vid_end = vid; - vlan->flags = 0; - if (vlan_cache->table[2] & BIT(port)) { - if (vlan_cache->table[1] & BIT(port)) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &data); - if (vid == (data & 0xFFFFF)) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } - } - - mutex_unlock(&dev->vlan_mutex); - - return err; -} - -static int ksz_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - /* nothing needed */ - - return 0; -} - struct alu_struct { /* entry 1 */ u8 is_static:1; @@ -706,30 +657,31 @@ struct alu_struct { u8 mac[ETH_ALEN]; }; -static void ksz_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +static int ksz_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) { struct ksz_device *dev = ds->priv; u32 alu_table[4]; u32 data; + int ret = 0; mutex_lock(&dev->alu_mutex); /* find any entry with mac & vid */ - data = fdb->vid << ALU_FID_INDEX_S; - data |= ((fdb->addr[0] << 8) | fdb->addr[1]); + data = vid << ALU_FID_INDEX_S; + data |= ((addr[0] << 8) | addr[1]); ksz_write32(dev, REG_SW_ALU_INDEX_0, data); - data = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); - data |= ((fdb->addr[4] << 8) | fdb->addr[5]); + data = ((addr[2] << 24) | (addr[3] << 16)); + data |= ((addr[4] << 8) | addr[5]); ksz_write32(dev, REG_SW_ALU_INDEX_1, data); /* start read operation */ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); /* wait to be finished */ - if (wait_alu_ready(dev, ALU_START, 1000) < 0) { + ret = wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) { dev_dbg(dev->dev, "Failed to read ALU\n"); goto exit; } @@ -740,27 +692,30 @@ static void ksz_port_fdb_add(struct dsa_switch *ds, int port, /* update ALU entry */ alu_table[0] = ALU_V_STATIC_VALID; alu_table[1] |= BIT(port); - if (fdb->vid) + if (vid) alu_table[1] |= ALU_V_USE_FID; - alu_table[2] = (fdb->vid << ALU_V_FID_S); - alu_table[2] |= ((fdb->addr[0] << 8) | fdb->addr[1]); - alu_table[3] = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); - alu_table[3] |= ((fdb->addr[4] << 8) | fdb->addr[5]); + alu_table[2] = (vid << ALU_V_FID_S); + alu_table[2] |= ((addr[0] << 8) | addr[1]); + alu_table[3] = ((addr[2] << 24) | (addr[3] << 16)); + alu_table[3] |= ((addr[4] << 8) | addr[5]); write_table(ds, alu_table); ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); /* wait to be finished */ - if (wait_alu_ready(dev, ALU_START, 1000) < 0) - dev_dbg(dev->dev, "Failed to read ALU\n"); + ret = wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) + dev_dbg(dev->dev, "Failed to write ALU\n"); exit: mutex_unlock(&dev->alu_mutex); + + return ret; } static int ksz_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct ksz_device *dev = ds->priv; u32 alu_table[4]; @@ -770,12 +725,12 @@ static int ksz_port_fdb_del(struct dsa_switch *ds, int port, mutex_lock(&dev->alu_mutex); /* read any entry with mac & vid */ - data = fdb->vid << ALU_FID_INDEX_S; - data |= ((fdb->addr[0] << 8) | fdb->addr[1]); + data = vid << ALU_FID_INDEX_S; + data |= ((addr[0] << 8) | addr[1]); ksz_write32(dev, REG_SW_ALU_INDEX_0, data); - data = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); - data |= ((fdb->addr[4] << 8) | fdb->addr[5]); + data = ((addr[2] << 24) | (addr[3] << 16)); + data |= ((addr[4] << 8) | addr[5]); ksz_write32(dev, REG_SW_ALU_INDEX_1, data); /* start read operation */ @@ -850,12 +805,11 @@ static void convert_alu(struct alu_struct *alu, u32 *alu_table) } static int ksz_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct ksz_device *dev = ds->priv; int ret = 0; - u32 data; + u32 ksz_data; u32 alu_table[4]; struct alu_struct alu; int timeout; @@ -868,8 +822,8 @@ static int ksz_port_fdb_dump(struct dsa_switch *ds, int port, do { timeout = 1000; do { - ksz_read32(dev, REG_SW_ALU_CTRL__4, &data); - if ((data & ALU_VALID) || !(data & ALU_START)) + ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data); + if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START)) break; usleep_range(1, 10); } while (timeout-- > 0); @@ -886,18 +840,11 @@ static int ksz_port_fdb_dump(struct dsa_switch *ds, int port, convert_alu(&alu, alu_table); if (alu.port_forward & BIT(port)) { - fdb->vid = alu.fid; - if (alu.is_static) - fdb->ndm_state = NUD_NOARP; - else - fdb->ndm_state = NUD_REACHABLE; - ether_addr_copy(fdb->addr, alu.mac); - - ret = cb(&fdb->obj); + ret = cb(alu.mac, alu.fid, alu.is_static, data); if (ret) goto exit; } - } while (data & ALU_START); + } while (ksz_data & ALU_START); exit: @@ -1065,14 +1012,6 @@ exit: return ret; } -static int ksz_port_mdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_mdb *mdb, - switchdev_obj_dump_cb_t *cb) -{ - /* this is not called by switch layer */ - return 0; -} - static int ksz_port_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress) @@ -1129,15 +1068,12 @@ static const struct dsa_switch_ops ksz_switch_ops = { .port_vlan_prepare = ksz_port_vlan_prepare, .port_vlan_add = ksz_port_vlan_add, .port_vlan_del = ksz_port_vlan_del, - .port_vlan_dump = ksz_port_vlan_dump, - .port_fdb_prepare = ksz_port_fdb_prepare, .port_fdb_dump = ksz_port_fdb_dump, .port_fdb_add = ksz_port_fdb_add, .port_fdb_del = ksz_port_fdb_del, .port_mdb_prepare = ksz_port_mdb_prepare, .port_mdb_add = ksz_port_mdb_add, .port_mdb_del = ksz_port_mdb_del, - .port_mdb_dump = ksz_port_mdb_dump, .port_mirror_add = ksz_port_mirror_add, .port_mirror_del = ksz_port_mirror_del, }; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 264b281eb86b..c142b97add2c 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -839,49 +839,31 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, } static int -mt7530_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +mt7530_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) { struct mt7530_priv *priv = ds->priv; int ret; + u8 port_mask = BIT(port); - /* Because auto-learned entrie shares the same FDB table. - * an entry is reserved with no port_mask to make sure fdb_add - * is called while the entry is still available. - */ mutex_lock(&priv->reg_mutex); - mt7530_fdb_write(priv, fdb->vid, 0, fdb->addr, -1, STATIC_ENT); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); mutex_unlock(&priv->reg_mutex); return ret; } -static void -mt7530_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - struct mt7530_priv *priv = ds->priv; - u8 port_mask = BIT(port); - - mutex_lock(&priv->reg_mutex); - mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_ENT); - mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); - mutex_unlock(&priv->reg_mutex); -} - static int mt7530_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct mt7530_priv *priv = ds->priv; int ret; u8 port_mask = BIT(port); mutex_lock(&priv->reg_mutex); - mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_EMP); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP); ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); mutex_unlock(&priv->reg_mutex); @@ -890,8 +872,7 @@ mt7530_port_fdb_del(struct dsa_switch *ds, int port, static int mt7530_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mt7530_priv *priv = ds->priv; struct mt7530_fdb _fdb = { 0 }; @@ -909,11 +890,8 @@ mt7530_port_fdb_dump(struct dsa_switch *ds, int port, if (rsp & ATC_SRCH_HIT) { mt7530_fdb_read(priv, &_fdb); if (_fdb.port_mask & BIT(port)) { - ether_addr_copy(fdb->addr, _fdb.mac); - fdb->vid = _fdb.vid; - fdb->ndm_state = _fdb.noarp ? - NUD_NOARP : NUD_REACHABLE; - ret = cb(&fdb->obj); + ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp, + data); if (ret < 0) break; } @@ -1039,7 +1017,7 @@ mt7530_setup(struct dsa_switch *ds) return 0; } -static struct dsa_switch_ops mt7530_switch_ops = { +static const struct dsa_switch_ops mt7530_switch_ops = { .get_tag_protocol = mtk_get_tag_protocol, .setup = mt7530_setup, .get_strings = mt7530_get_strings, @@ -1053,7 +1031,6 @@ static struct dsa_switch_ops mt7530_switch_ops = { .port_stp_state_set = mt7530_stp_state_set, .port_bridge_join = mt7530_port_bridge_join, .port_bridge_leave = mt7530_port_bridge_leave, - .port_fdb_prepare = mt7530_port_fdb_prepare, .port_fdb_add = mt7530_port_fdb_add, .port_fdb_del = mt7530_port_fdb_del, .port_fdb_dump = mt7530_port_fdb_dump, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 5bcdd33101b0..918d8f0fe091 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -810,63 +810,18 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, mutex_unlock(&chip->reg_lock); } -static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) +static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { - struct mv88e6xxx_chip *chip = ds->priv; - u16 reg; - int err; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) - return -EOPNOTSUPP; - - mutex_lock(&chip->reg_lock); - - err = mv88e6xxx_phy_read(chip, port, 16, ®); - if (err) - goto out; - - e->eee_enabled = !!(reg & 0x0200); - e->tx_lpi_enabled = !!(reg & 0x0100); - - err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); - if (err) - goto out; - - e->eee_active = !!(reg & MV88E6352_PORT_STS_EEE); -out: - mutex_unlock(&chip->reg_lock); - - return err; + /* Nothing to do on the port's MAC */ + return 0; } -static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, - struct phy_device *phydev, struct ethtool_eee *e) +static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { - struct mv88e6xxx_chip *chip = ds->priv; - u16 reg; - int err; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) - return -EOPNOTSUPP; - - mutex_lock(&chip->reg_lock); - - err = mv88e6xxx_phy_read(chip, port, 16, ®); - if (err) - goto out; - - reg &= ~0x0300; - if (e->eee_enabled) - reg |= 0x0200; - if (e->tx_lpi_enabled) - reg |= 0x0100; - - err = mv88e6xxx_phy_write(chip, port, 16, reg); -out: - mutex_unlock(&chip->reg_lock); - - return err; + /* Nothing to do on the port's MAC */ + return 0; } static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) @@ -926,6 +881,22 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: failed to update state\n", port); } +static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip) +{ + if (chip->info->ops->pot_clear) + return chip->info->ops->pot_clear(chip); + + return 0; +} + +static int mv88e6xxx_rsvd2cpu_setup(struct mv88e6xxx_chip *chip) +{ + if (chip->info->ops->mgmt_rsvd2cpu) + return chip->info->ops->mgmt_rsvd2cpu(chip); + + return 0; +} + static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip) { int err; @@ -1040,61 +1011,6 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, return chip->info->ops->vtu_loadpurge(chip, entry); } -static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct mv88e6xxx_chip *chip = ds->priv; - struct mv88e6xxx_vtu_entry next = { - .vid = chip->info->max_vid, - }; - u16 pvid; - int err; - - if (!chip->info->max_vid) - return -EOPNOTSUPP; - - mutex_lock(&chip->reg_lock); - - err = mv88e6xxx_port_get_pvid(chip, port, &pvid); - if (err) - goto unlock; - - do { - err = mv88e6xxx_vtu_getnext(chip, &next); - if (err) - break; - - if (!next.valid) - break; - - if (next.member[port] == - MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) - continue; - - /* reinit and dump this VLAN obj */ - vlan->vid_begin = next.vid; - vlan->vid_end = next.vid; - vlan->flags = 0; - - if (next.member[port] == - MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - - if (next.vid == pvid) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } while (next.vid < chip->info->max_vid); - -unlock: - mutex_unlock(&chip->reg_lock); - - return err; -} - static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) { DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); @@ -1435,38 +1351,28 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry); } -static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - /* We don't need any dynamic resource from the kernel (yet), - * so skip the prepare phase. - */ - return 0; -} - -static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) { struct mv88e6xxx_chip *chip = ds->priv; + int err; mutex_lock(&chip->reg_lock); - if (mv88e6xxx_port_db_load_purge(chip, port, fdb->addr, fdb->vid, - MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)) - dev_err(ds->dev, "p%d: failed to load unicast MAC address\n", - port); + err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, + MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC); mutex_unlock(&chip->reg_lock); + + return err; } static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct mv88e6xxx_chip *chip = ds->priv; int err; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_db_load_purge(chip, port, fdb->addr, fdb->vid, + err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, MV88E6XXX_G1_ATU_DATA_STATE_UNUSED); mutex_unlock(&chip->reg_lock); @@ -1475,10 +1381,10 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, u16 fid, u16 vid, int port, - struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mv88e6xxx_atu_entry addr; + bool is_static; int err; addr.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED; @@ -1495,33 +1401,12 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, if (addr.trunk || (addr.portvec & BIT(port)) == 0) continue; - if (obj->id == SWITCHDEV_OBJ_ID_PORT_FDB) { - struct switchdev_obj_port_fdb *fdb; - - if (!is_unicast_ether_addr(addr.mac)) - continue; - - fdb = SWITCHDEV_OBJ_PORT_FDB(obj); - fdb->vid = vid; - ether_addr_copy(fdb->addr, addr.mac); - if (addr.state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC) - fdb->ndm_state = NUD_NOARP; - else - fdb->ndm_state = NUD_REACHABLE; - } else if (obj->id == SWITCHDEV_OBJ_ID_PORT_MDB) { - struct switchdev_obj_port_mdb *mdb; - - if (!is_multicast_ether_addr(addr.mac)) - continue; - - mdb = SWITCHDEV_OBJ_PORT_MDB(obj); - mdb->vid = vid; - ether_addr_copy(mdb->addr, addr.mac); - } else { - return -EOPNOTSUPP; - } + if (!is_unicast_ether_addr(addr.mac)) + continue; - err = cb(obj); + is_static = (addr.state == + MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC); + err = cb(addr.mac, vid, is_static, data); if (err) return err; } while (!is_broadcast_ether_addr(addr.mac)); @@ -1530,8 +1415,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, } static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, - struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mv88e6xxx_vtu_entry vlan = { .vid = chip->info->max_vid, @@ -1544,7 +1428,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, if (err) return err; - err = mv88e6xxx_port_db_dump_fid(chip, fid, 0, port, obj, cb); + err = mv88e6xxx_port_db_dump_fid(chip, fid, 0, port, cb, data); if (err) return err; @@ -1558,7 +1442,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, break; err = mv88e6xxx_port_db_dump_fid(chip, vlan.fid, vlan.vid, port, - obj, cb); + cb, data); if (err) return err; } while (vlan.vid < chip->info->max_vid); @@ -1567,14 +1451,13 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, } static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mv88e6xxx_chip *chip = ds->priv; int err; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_db_dump(chip, port, &fdb->obj, cb); + err = mv88e6xxx_port_db_dump(chip, port, cb, data); mutex_unlock(&chip->reg_lock); return err; @@ -2116,7 +1999,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) goto unlock; /* Setup Switch Global 2 Registers */ - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_GLOBAL2)) { + if (chip->info->global2_addr) { err = mv88e6xxx_g2_setup(chip); if (err) goto unlock; @@ -2142,16 +2025,13 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (err) goto unlock; - /* Some generations have the configuration of sending reserved - * management frames to the CPU in global2, others in - * global1. Hence it does not fit the two setup functions - * above. - */ - if (chip->info->ops->mgmt_rsvd2cpu) { - err = chip->info->ops->mgmt_rsvd2cpu(chip); - if (err) - goto unlock; - } + err = mv88e6xxx_pot_setup(chip); + if (err) + goto unlock; + + err = mv88e6xxx_rsvd2cpu_setup(chip); + if (err) + goto unlock; unlock: mutex_unlock(&chip->reg_lock); @@ -2236,7 +2116,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, if (np) { bus->name = np->full_name; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pOF", np); } else { bus->name = "mv88e6xxx SMI"; snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++); @@ -2385,7 +2265,8 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2408,7 +2289,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, .stats_get_stats = mv88e6095_stats_get_stats, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2441,7 +2322,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2467,7 +2349,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2496,7 +2379,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2533,6 +2416,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2563,7 +2447,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2587,7 +2472,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2619,7 +2505,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2653,7 +2540,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2686,7 +2574,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2720,7 +2609,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2746,7 +2636,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2782,6 +2672,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2816,6 +2707,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2850,6 +2742,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2884,7 +2777,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2920,6 +2814,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2952,14 +2847,15 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .stats_get_stats = mv88e6320_stats_get_stats, .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6321_ops = { - /* MV88E6XXX_FAMILY_6321 */ + /* MV88E6XXX_FAMILY_6320 */ .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -3018,6 +2914,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3049,7 +2946,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3081,7 +2979,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3115,7 +3014,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3153,6 +3053,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -3190,6 +3091,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -3206,12 +3108,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6097, .ops = &mv88e6085_ops, }, @@ -3224,11 +3128,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6095, .ops = &mv88e6095_ops, }, @@ -3241,12 +3146,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6097, .ops = &mv88e6097_ops, }, @@ -3259,12 +3166,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6123_ops, }, @@ -3277,11 +3186,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6185, .ops = &mv88e6131_ops, }, @@ -3294,11 +3204,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .atu_move_port_mask = 0x1f, + .g2_irqs = 10, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6341, .ops = &mv88e6141_ops, }, @@ -3311,12 +3223,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6161_ops, }, @@ -3329,12 +3243,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6165_ops, }, @@ -3347,12 +3263,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6171_ops, }, @@ -3365,12 +3283,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6172_ops, }, @@ -3383,12 +3303,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6175_ops, }, @@ -3401,12 +3323,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6176_ops, }, @@ -3419,11 +3343,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6185, .ops = &mv88e6185_ops, }, @@ -3436,12 +3361,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .tag_protocol = DSA_TAG_PROTO_DSA, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .pvt = true, + .multi_chip = true, .atu_move_port_mask = 0x1f, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6190_ops, }, @@ -3454,12 +3381,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6190x_ops, }, @@ -3472,12 +3401,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6191_ops, }, @@ -3490,12 +3421,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6240_ops, }, @@ -3508,12 +3441,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6290_ops, }, @@ -3526,12 +3461,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6320, .ops = &mv88e6320_ops, }, @@ -3544,11 +3480,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6320, .ops = &mv88e6321_ops, }, @@ -3561,11 +3498,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .atu_move_port_mask = 0x1f, + .g2_irqs = 10, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6341, .ops = &mv88e6341_ops, }, @@ -3578,12 +3517,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6350_ops, }, @@ -3596,12 +3537,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6351_ops, }, @@ -3614,12 +3557,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6352_ops, }, [MV88E6390] = { @@ -3631,12 +3576,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6390_ops, }, [MV88E6390X] = { @@ -3648,12 +3595,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6390x_ops, }, }; @@ -3723,7 +3672,7 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, { if (sw_addr == 0) chip->smi_ops = &mv88e6xxx_smi_single_chip_ops; - else if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_MULTI_CHIP)) + else if (chip->info->multi_chip) chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops; else return -EINVAL; @@ -3828,20 +3777,6 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port, return err; } -static int mv88e6xxx_port_mdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_mdb *mdb, - switchdev_obj_dump_cb_t *cb) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int err; - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_db_dump(chip, port, &mdb->obj, cb); - mutex_unlock(&chip->reg_lock); - - return err; -} - static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .probe = mv88e6xxx_drv_probe, .get_tag_protocol = mv88e6xxx_get_tag_protocol, @@ -3853,8 +3788,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_sset_count = mv88e6xxx_get_sset_count, .port_enable = mv88e6xxx_port_enable, .port_disable = mv88e6xxx_port_disable, - .set_eee = mv88e6xxx_set_eee, - .get_eee = mv88e6xxx_get_eee, + .get_mac_eee = mv88e6xxx_get_mac_eee, + .set_mac_eee = mv88e6xxx_set_mac_eee, .get_eeprom_len = mv88e6xxx_get_eeprom_len, .get_eeprom = mv88e6xxx_get_eeprom, .set_eeprom = mv88e6xxx_set_eeprom, @@ -3869,15 +3804,12 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, .port_vlan_add = mv88e6xxx_port_vlan_add, .port_vlan_del = mv88e6xxx_port_vlan_del, - .port_vlan_dump = mv88e6xxx_port_vlan_dump, - .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, .port_fdb_add = mv88e6xxx_port_fdb_add, .port_fdb_del = mv88e6xxx_port_fdb_del, .port_fdb_dump = mv88e6xxx_port_fdb_dump, .port_mdb_prepare = mv88e6xxx_port_mdb_prepare, .port_mdb_add = mv88e6xxx_port_mdb_add, .port_mdb_del = mv88e6xxx_port_mdb_del, - .port_mdb_dump = mv88e6xxx_port_mdb_dump, .crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join, .crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave, }; @@ -3971,7 +3903,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (err) goto out; - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT)) { + if (chip->info->g2_irqs > 0) { err = mv88e6xxx_g2_irq_setup(chip); if (err) goto out_g1_irq; @@ -3991,7 +3923,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) out_mdio: mv88e6xxx_mdios_unregister(chip); out_g2_irq: - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT) && chip->irq > 0) + if (chip->info->g2_irqs > 0 && chip->irq > 0) mv88e6xxx_g2_irq_free(chip); out_g1_irq: if (chip->irq > 0) { @@ -4013,7 +3945,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_mdios_unregister(chip); if (chip->irq > 0) { - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT)) + if (chip->info->g2_irqs > 0) mv88e6xxx_g2_irq_free(chip); mv88e6xxx_g1_irq_free(chip); } diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 086444016352..334f6f7544ba 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -97,133 +97,6 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */ }; -enum mv88e6xxx_cap { - /* Energy Efficient Ethernet. - */ - MV88E6XXX_CAP_EEE, - - /* Multi-chip Addressing Mode. - * Some chips respond to only 2 registers of its own SMI device address - * when it is non-zero, and use indirect access to internal registers. - */ - MV88E6XXX_CAP_SMI_CMD, /* (0x00) SMI Command */ - MV88E6XXX_CAP_SMI_DATA, /* (0x01) SMI Data */ - - /* Switch Global (1) Registers. - */ - MV88E6XXX_CAP_G1_ATU_FID, /* (0x01) ATU FID Register */ - MV88E6XXX_CAP_G1_VTU_FID, /* (0x02) VTU FID Register */ - - /* Switch Global 2 Registers. - * The device contains a second set of global 16-bit registers. - */ - MV88E6XXX_CAP_GLOBAL2, - MV88E6XXX_CAP_G2_INT, /* (0x00) Interrupt Status */ - MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */ - MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */ - MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */ - - /* Per VLAN Spanning Tree Unit (STU). - * The Port State database, if present, is accessed through VTU - * operations and dedicated SID registers. See MV88E6352_G1_VTU_SID. - */ - MV88E6XXX_CAP_STU, - - /* VLAN Table Unit. - * The VTU is used to program 802.1Q VLANs. See MV88E6XXX_G1_VTU_OP. - */ - MV88E6XXX_CAP_VTU, -}; - -/* Bitmask of capabilities */ -#define MV88E6XXX_FLAG_EEE BIT_ULL(MV88E6XXX_CAP_EEE) - -#define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD) -#define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA) - -#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID) - -#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2) -#define MV88E6XXX_FLAG_G2_INT BIT_ULL(MV88E6XXX_CAP_G2_INT) -#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X) -#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X) -#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT) - -/* Multi-chip Addressing Mode */ -#define MV88E6XXX_FLAGS_MULTI_CHIP \ - (MV88E6XXX_FLAG_SMI_CMD | \ - MV88E6XXX_FLAG_SMI_DATA) - -#define MV88E6XXX_FLAGS_FAMILY_6095 \ - (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6097 \ - (MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6165 \ - (MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6185 \ - (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6320 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6341 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6351 \ - (MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6352 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6390 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAGS_MULTI_CHIP) - struct mv88e6xxx_ops; struct mv88e6xxx_info { @@ -235,11 +108,18 @@ struct mv88e6xxx_info { unsigned int max_vid; unsigned int port_base_addr; unsigned int global1_addr; + unsigned int global2_addr; unsigned int age_time_coeff; unsigned int g1_irqs; + unsigned int g2_irqs; bool pvt; + + /* Multi-chip Addressing Mode. + * Some chips respond to only 2 registers of its own SMI device address + * when it is non-zero, and use indirect access to internal registers. + */ + bool multi_chip; enum dsa_tag_protocol tag_protocol; - unsigned long long flags; /* Mask for FromPort and ToPort value of PortVec used in ATU Move * operation. 0 means that the ATU Move operation is not supported. @@ -359,6 +239,9 @@ struct mv88e6xxx_ops { struct mii_bus *bus, int addr, int reg, u16 val); + /* Priority Override Table operations */ + int (*pot_clear)(struct mv88e6xxx_chip *chip); + /* PHY Polling Unit (PPU) operations */ int (*ppu_enable)(struct mv88e6xxx_chip *chip); int (*ppu_disable)(struct mv88e6xxx_chip *chip); @@ -449,7 +332,6 @@ struct mv88e6xxx_ops { int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port); const struct mv88e6xxx_irq_ops *watchdog_ops; - /* Can be either in g1 or g2, so don't use a prefix */ int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); /* Power on/off a SERDES interface */ @@ -482,12 +364,6 @@ struct mv88e6xxx_hw_stat { int type; }; -static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip, - unsigned long flags) -{ - return (chip->info->flags & flags) == flags; -} - static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip) { return chip->info->pvt; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 158d0f499874..16f556261022 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -22,48 +22,99 @@ static int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val) { - return mv88e6xxx_read(chip, MV88E6XXX_G2, reg, val); + return mv88e6xxx_read(chip, chip->info->global2_addr, reg, val); } static int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val) { - return mv88e6xxx_write(chip, MV88E6XXX_G2, reg, val); + return mv88e6xxx_write(chip, chip->info->global2_addr, reg, val); } static int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update) { - return mv88e6xxx_update(chip, MV88E6XXX_G2, reg, update); + return mv88e6xxx_update(chip, chip->info->global2_addr, reg, update); } static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask) { - return mv88e6xxx_wait(chip, MV88E6XXX_G2, reg, mask); + return mv88e6xxx_wait(chip, chip->info->global2_addr, reg, mask); +} + +/* Offset 0x00: Interrupt Source Register */ + +static int mv88e6xxx_g2_int_source(struct mv88e6xxx_chip *chip, u16 *src) +{ + /* Read (and clear most of) the Interrupt Source bits */ + return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_INT_SRC, src); +} + +/* Offset 0x01: Interrupt Mask Register */ + +static int mv88e6xxx_g2_int_mask(struct mv88e6xxx_chip *chip, u16 mask) +{ + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_INT_MASK, mask); } /* Offset 0x02: Management Enable 2x */ + +static int mv88e6xxx_g2_mgmt_enable_2x(struct mv88e6xxx_chip *chip, u16 en2x) +{ + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_2X, en2x); +} + /* Offset 0x03: Management Enable 0x */ -int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +static int mv88e6xxx_g2_mgmt_enable_0x(struct mv88e6xxx_chip *chip, u16 en0x) +{ + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_0X, en0x); +} + +/* Offset 0x05: Switch Management Register */ + +static int mv88e6xxx_g2_switch_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip, + bool enable) +{ + u16 val; + int err; + + err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SWITCH_MGMT, &val); + if (err) + return err; + + if (enable) + val |= MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU; + else + val &= ~MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU; + + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, val); +} + +int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) { int err; /* Consider the frames with reserved multicast destination - * addresses matching 01:80:c2:00:00:2x as MGMT. + * addresses matching 01:80:c2:00:00:0x as MGMT. */ - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) { - err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_2X, 0xffff); - if (err) - return err; - } + err = mv88e6xxx_g2_mgmt_enable_0x(chip, 0xffff); + if (err) + return err; + + return mv88e6xxx_g2_switch_mgmt_rsvd2cpu(chip, true); +} + +int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +{ + int err; /* Consider the frames with reserved multicast destination - * addresses matching 01:80:c2:00:00:0x as MGMT. + * addresses matching 01:80:c2:00:00:2x as MGMT. */ - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X)) - return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_0X, - 0xffff); + err = mv88e6xxx_g2_mgmt_enable_2x(chip, 0xffff); + if (err) + return err; - return 0; + return mv88e6185_g2_mgmt_rsvd2cpu(chip); } /* Offset 0x06: Device Mapping Table register */ @@ -260,7 +311,7 @@ static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer, return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_PRIO_OVERRIDE, val); } -static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip) +int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) { int i, err; @@ -933,7 +984,7 @@ static irqreturn_t mv88e6xxx_g2_irq_thread_fn(int irq, void *dev_id) u16 reg; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_INT_SOURCE, ®); + err = mv88e6xxx_g2_int_source(chip, ®); mutex_unlock(&chip->reg_lock); if (err) goto out; @@ -959,8 +1010,11 @@ static void mv88e6xxx_g2_irq_bus_lock(struct irq_data *d) static void mv88e6xxx_g2_irq_bus_sync_unlock(struct irq_data *d) { struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d); + int err; - mv88e6xxx_g2_write(chip, MV88E6XXX_G2_INT_MASK, ~chip->g2_irq.masked); + err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked); + if (err) + dev_err(chip->dev, "failed to mask interrupts\n"); mutex_unlock(&chip->reg_lock); } @@ -1063,9 +1117,6 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) * port at the highest priority. */ reg = MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI | (0x7 << 4); - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) || - mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) - reg |= MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU | 0x7; err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, reg); if (err) return err; @@ -1080,12 +1131,5 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) if (err) return err; - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) { - /* Clear the priority override table. */ - err = mv88e6xxx_g2_clear_pot(chip); - if (err) - return err; - } - return 0; } diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 317ffd8f323d..669f59017b12 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -17,14 +17,27 @@ #include "chip.h" -#define MV88E6XXX_G2 0x1c - /* Offset 0x00: Interrupt Source Register */ -#define MV88E6XXX_G2_INT_SOURCE 0x00 +#define MV88E6XXX_G2_INT_SRC 0x00 +#define MV88E6XXX_G2_INT_SRC_WDOG 0x8000 +#define MV88E6XXX_G2_INT_SRC_JAM_LIMIT 0x4000 +#define MV88E6XXX_G2_INT_SRC_DUPLEX_MISMATCH 0x2000 +#define MV88E6XXX_G2_INT_SRC_WAKE_EVENT 0x1000 +#define MV88E6352_G2_INT_SRC_SERDES 0x0800 +#define MV88E6352_G2_INT_SRC_PHY 0x001f +#define MV88E6390_G2_INT_SRC_PHY 0x07fe + #define MV88E6XXX_G2_INT_SOURCE_WATCHDOG 15 /* Offset 0x01: Interrupt Mask Register */ -#define MV88E6XXX_G2_INT_MASK 0x01 +#define MV88E6XXX_G2_INT_MASK 0x01 +#define MV88E6XXX_G2_INT_MASK_WDOG 0x8000 +#define MV88E6XXX_G2_INT_MASK_JAM_LIMIT 0x4000 +#define MV88E6XXX_G2_INT_MASK_DUPLEX_MISMATCH 0x2000 +#define MV88E6XXX_G2_INT_MASK_WAKE_EVENT 0x1000 +#define MV88E6352_G2_INT_MASK_SERDES 0x0800 +#define MV88E6352_G2_INT_MASK_PHY 0x001f +#define MV88E6390_G2_INT_MASK_PHY 0x07fe /* Offset 0x02: MGMT Enable Register 2x */ #define MV88E6XXX_G2_MGMT_EN_2X 0x02 @@ -245,7 +258,11 @@ int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); -int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); + +int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); + +int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip); extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; @@ -254,7 +271,7 @@ extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) { - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_GLOBAL2)) { + if (chip->info->global2_addr) { dev_err(chip->dev, "this chip requires CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 enabled\n"); return -EOPNOTSUPP; } @@ -347,7 +364,17 @@ static inline void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip) { } -static inline int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +static inline int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) { return -EOPNOTSUPP; } diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 3500ac0ea848..436668bd50dc 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -13,7 +13,6 @@ #include <linux/mdio.h> #include <linux/module.h> -#include <net/dsa.h> #include "chip.h" #include "phy.h" diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 8f3991bf1851..b16d5f0e6e9c 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -216,9 +216,6 @@ /* Offset 0x13: OutFiltered Counter */ #define MV88E6XXX_PORT_OUT_FILTERED 0x13 -/* Offset 0x16: LED Control */ -#define MV88E6XXX_PORT_LED_CONTROL 0x16 - /* Offset 0x18: IEEE Priority Mapping Table */ #define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE 0x18 #define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE 0x8000 diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index b3bee7eab45f..17977f06cb98 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -637,8 +637,8 @@ qca8k_get_sset_count(struct dsa_switch *ds) return ARRAY_SIZE(ar8327_mib); } -static void -qca8k_eee_enable_set(struct dsa_switch *ds, int port, bool enable) +static int +qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); @@ -646,73 +646,21 @@ qca8k_eee_enable_set(struct dsa_switch *ds, int port, bool enable) mutex_lock(&priv->reg_mutex); reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL); - if (enable) + if (eee->eee_enabled) reg |= lpi_en; else reg &= ~lpi_en; qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); mutex_unlock(&priv->reg_mutex); -} - -static int -qca8k_eee_init(struct dsa_switch *ds, int port, - struct phy_device *phy) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - struct ethtool_eee *p = &priv->port_sts[port].eee; - int ret; - - p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); - - ret = phy_init_eee(phy, 0); - if (ret) - return ret; - - qca8k_eee_enable_set(ds, port, true); return 0; } static int -qca8k_set_eee(struct dsa_switch *ds, int port, - struct phy_device *phydev, - struct ethtool_eee *e) +qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - struct ethtool_eee *p = &priv->port_sts[port].eee; - int ret = 0; - - p->eee_enabled = e->eee_enabled; - - if (e->eee_enabled) { - p->eee_enabled = qca8k_eee_init(ds, port, phydev); - if (!p->eee_enabled) - ret = -EOPNOTSUPP; - } - qca8k_eee_enable_set(ds, port, p->eee_enabled); - - return ret; -} - -static int -qca8k_get_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - struct ethtool_eee *p = &priv->port_sts[port].eee; - struct net_device *netdev = ds->ports[port].netdev; - int ret; - - ret = phy_ethtool_get_eee(netdev->phydev, p); - if (!ret) - e->eee_active = - !!(p->supported & p->advertised & p->lp_advertised); - else - e->eee_active = 0; - - e->eee_enabled = p->eee_enabled; - - return ret; + /* Nothing to do on the port's MAC */ + return 0; } static void @@ -829,69 +777,44 @@ qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, } static int -qca8k_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - - /* The FDB table for static and auto learned entries is the same. We - * need to reserve an entry with no port_mask set to make sure that - * when port_fdb_add is called an entry is still available. Otherwise - * the last free entry might have been used up by auto learning - */ - return qca8k_port_fdb_insert(priv, fdb->addr, 0, fdb->vid); -} - -static void qca8k_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u16 port_mask = BIT(port); - /* Update the FDB entry adding the port_mask */ - qca8k_port_fdb_insert(priv, fdb->addr, port_mask, fdb->vid); + return qca8k_port_fdb_insert(priv, addr, port_mask, vid); } static int qca8k_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u16 port_mask = BIT(port); - u16 vid = fdb->vid; if (!vid) vid = 1; - return qca8k_fdb_del(priv, fdb->addr, port_mask, vid); + return qca8k_fdb_del(priv, addr, port_mask, vid); } static int qca8k_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; struct qca8k_fdb _fdb = { 0 }; int cnt = QCA8K_NUM_FDB_RECORDS; + bool is_static; int ret = 0; mutex_lock(&priv->reg_mutex); while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) { if (!_fdb.aging) break; - - ether_addr_copy(fdb->addr, _fdb.mac); - fdb->vid = _fdb.vid; - if (_fdb.aging == QCA8K_ATU_STATUS_STATIC) - fdb->ndm_state = NUD_NOARP; - else - fdb->ndm_state = NUD_REACHABLE; - - ret = cb(&fdb->obj); + is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC); + ret = cb(_fdb.mac, _fdb.vid, is_static, data); if (ret) break; } @@ -914,14 +837,13 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .phy_write = qca8k_phy_write, .get_ethtool_stats = qca8k_get_ethtool_stats, .get_sset_count = qca8k_get_sset_count, - .get_eee = qca8k_get_eee, - .set_eee = qca8k_set_eee, + .get_mac_eee = qca8k_get_mac_eee, + .set_mac_eee = qca8k_set_mac_eee, .port_enable = qca8k_port_enable, .port_disable = qca8k_port_disable, .port_stp_state_set = qca8k_port_stp_state_set, .port_bridge_join = qca8k_port_bridge_join, .port_bridge_leave = qca8k_port_bridge_leave, - .port_fdb_prepare = qca8k_port_fdb_prepare, .port_fdb_add = qca8k_port_fdb_add, .port_fdb_del = qca8k_port_fdb_del, .port_fdb_dump = qca8k_port_fdb_dump, diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 1ed4fac6cd6d..1cf8a920d4ff 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -156,7 +156,6 @@ enum qca8k_fdb_cmd { }; struct ar8xxx_port_status { - struct ethtool_eee eee; int enabled; }; diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index d0c165d2086e..d0a1f9ce3168 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -345,7 +345,7 @@ static void dummy_setup(struct net_device *dev) dev->flags &= ~IFF_MULTICAST; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST; - dev->features |= NETIF_F_ALL_TSO | NETIF_F_UFO; + dev->features |= NETIF_F_ALL_TSO; dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; dev->features |= NETIF_F_GSO_ENCAP_ALL; dev->hw_features |= dev->features; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index ecef3ee87b17..2fd9b80b39b0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1918,18 +1918,18 @@ static void xgbe_poll_controller(struct net_device *netdev) } #endif /* End CONFIG_NET_POLL_CONTROLLER */ -static int xgbe_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index, - __be16 proto, - struct tc_to_netdev *tc_to_netdev) +static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct tc_mqprio_qopt *mqprio = type_data; u8 tc; - if (tc_to_netdev->type != TC_SETUP_MQPRIO) - return -EINVAL; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - tc = tc_to_netdev->mqprio->num_tc; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + tc = mqprio->num_tc; if (tc > pdata->hw_feat.tc_cnt) return -EINVAL; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 0938294f640a..e9282c924621 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -129,6 +129,7 @@ #include <net/dcbnl.h> #include <linux/completion.h> #include <linux/cpumask.h> +#include <linux/interrupt.h> #define XGBE_DRV_NAME "amd-xgbe" #define XGBE_DRV_VERSION "1.0.3" diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 96dd5300e0e5..e58b157b7d7c 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -114,8 +114,8 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) int j, rev, rc = -EBUSY; if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { - printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n", - mace->full_name); + printk(KERN_ERR "can't use MACE %pOF: need 3 addrs and 3 irqs\n", + mace); return -ENODEV; } @@ -123,8 +123,8 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) if (addr == NULL) { addr = of_get_property(mace, "local-mac-address", NULL); if (addr == NULL) { - printk(KERN_ERR "Can't get mac-address for MACE %s\n", - mace->full_name); + printk(KERN_ERR "Can't get mac-address for MACE %pOF\n", + mace); return -ENODEV; } } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 9a0817938eca..4b445750b93e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -134,7 +134,10 @@ static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, } #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) +int aq_ring_rx_clean(struct aq_ring_s *self, + struct napi_struct *napi, + int *work_done, + int budget) { struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); int err = 0; @@ -240,7 +243,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) skb_record_rx_queue(skb, self->idx); - netif_receive_skb(skb); + napi_gro_receive(napi, skb); ++self->stats.rx.packets; self->stats.rx.bytes += skb->len; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index eecd6d1c4d73..782176c5f4f8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -148,7 +148,10 @@ int aq_ring_init(struct aq_ring_s *self); void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self); void aq_ring_tx_clean(struct aq_ring_s *self); -int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); +int aq_ring_rx_clean(struct aq_ring_s *self, + struct napi_struct *napi, + int *work_done, + int budget); int aq_ring_rx_fill(struct aq_ring_s *self); #endif /* AQ_RING_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index ad5b4d4dac7f..ec390c5eed35 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -78,6 +78,7 @@ __acquires(&self->lock) if (ring[AQ_VEC_RX_ID].sw_head != ring[AQ_VEC_RX_ID].hw_head) { err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], + napi, &work_done, budget - work_done); if (err < 0) diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 68de2f2652f2..3241af1ce718 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -720,6 +720,18 @@ static int arc_emac_set_address(struct net_device *ndev, void *p) return 0; } +static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + return phy_mii_ioctl(dev->phydev, rq, cmd); +} + + static const struct net_device_ops arc_emac_netdev_ops = { .ndo_open = arc_emac_open, .ndo_stop = arc_emac_stop, @@ -727,6 +739,7 @@ static const struct net_device_ops arc_emac_netdev_ops = { .ndo_set_mac_address = arc_emac_set_address, .ndo_get_stats = arc_emac_stats, .ndo_set_rx_mode = arc_emac_set_rx_mode, + .ndo_do_ioctl = arc_emac_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = arc_emac_poll_controller, #endif diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 96413808c726..1456cb18f830 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -61,10 +61,12 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" + depends on OF && HAS_IOMEM select MII select PHYLIB select FIXED_PHY select BCM7XXX_PHY + select MDIO_BCM_UNIMAC help This driver supports the built-in Ethernet MACs found in the Broadcom BCM7xxx Set Top Box family chipset. @@ -193,6 +195,7 @@ config SYSTEMPORT config BNXT tristate "Broadcom NetXtreme-C/E support" depends on PCI + depends on MAY_USE_DEVLINK select FW_LOADER select LIBCRC32C ---help--- diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index dc3052751bc1..b3a21418f511 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -201,10 +201,10 @@ static int bcm_sysport_set_features(struct net_device *dev, */ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { /* general stats */ - STAT_NETDEV(rx_packets), - STAT_NETDEV(tx_packets), - STAT_NETDEV(rx_bytes), - STAT_NETDEV(tx_bytes), + STAT_NETDEV64(rx_packets), + STAT_NETDEV64(tx_packets), + STAT_NETDEV64(rx_bytes), + STAT_NETDEV64(tx_bytes), STAT_NETDEV(rx_errors), STAT_NETDEV(tx_errors), STAT_NETDEV(rx_dropped), @@ -316,6 +316,7 @@ static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) { switch (type) { case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_NETDEV64: case BCM_SYSPORT_STAT_RXCHK: case BCM_SYSPORT_STAT_RBUF: case BCM_SYSPORT_STAT_SOFT: @@ -398,6 +399,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) s = &bcm_sysport_gstrings_stats[i]; switch (s->type) { case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_NETDEV64: case BCM_SYSPORT_STAT_SOFT: continue; case BCM_SYSPORT_STAT_MIB_RX: @@ -434,7 +436,10 @@ static void bcm_sysport_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_stats64 *stats64 = &priv->stats64; + struct u64_stats_sync *syncp = &priv->syncp; struct bcm_sysport_tx_ring *ring; + unsigned int start; int i, j; if (netif_running(dev)) @@ -447,14 +452,22 @@ static void bcm_sysport_get_stats(struct net_device *dev, s = &bcm_sysport_gstrings_stats[i]; if (s->type == BCM_SYSPORT_STAT_NETDEV) p = (char *)&dev->stats; + else if (s->type == BCM_SYSPORT_STAT_NETDEV64) + p = (char *)stats64; else p = (char *)priv; if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) continue; - p += s->stat_offset; - data[j] = *(unsigned long *)p; + + if (s->stat_sizeof == sizeof(u64)) + do { + start = u64_stats_fetch_begin_irq(syncp); + data[i] = *(u64 *)p; + } while (u64_stats_fetch_retry_irq(syncp, start)); + else + data[i] = *(u32 *)p; j++; } @@ -666,6 +679,7 @@ static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int budget) { + struct bcm_sysport_stats64 *stats64 = &priv->stats64; struct net_device *ndev = priv->netdev; unsigned int processed = 0, to_process; struct bcm_sysport_cb *cb; @@ -769,6 +783,10 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, skb->protocol = eth_type_trans(skb, ndev); ndev->stats.rx_packets++; ndev->stats.rx_bytes += len; + u64_stats_update_begin(&priv->syncp); + stats64->rx_packets++; + stats64->rx_bytes += len; + u64_stats_update_end(&priv->syncp); napi_gro_receive(&priv->napi, skb); next: @@ -791,17 +809,15 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, struct device *kdev = &priv->pdev->dev; if (cb->skb) { - ring->bytes += cb->skb->len; *bytes_compl += cb->skb->len; dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); - ring->packets++; (*pkts_compl)++; bcm_sysport_free_cb(cb); /* SKB fragment */ } else if (dma_unmap_addr(cb, dma_addr)) { - ring->bytes += dma_unmap_len(cb, dma_len); + *bytes_compl += dma_unmap_len(cb, dma_len); dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); dma_unmap_addr_set(cb, dma_addr, 0); @@ -812,9 +828,9 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { - struct net_device *ndev = priv->netdev; unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; + struct net_device *ndev = priv->netdev; struct bcm_sysport_cb *cb; u32 hw_ind; @@ -853,6 +869,11 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, last_c_index &= (num_tx_cbs - 1); } + u64_stats_update_begin(&priv->syncp); + ring->packets += pkts_compl; + ring->bytes += bytes_compl; + u64_stats_update_end(&priv->syncp); + ring->c_index = c_index; netif_dbg(priv, tx_done, ndev, @@ -1675,22 +1696,41 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p) return 0; } -static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev) +static void bcm_sysport_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct bcm_sysport_priv *priv = netdev_priv(dev); - unsigned long tx_bytes = 0, tx_packets = 0; + struct bcm_sysport_stats64 *stats64 = &priv->stats64; struct bcm_sysport_tx_ring *ring; + u64 tx_packets = 0, tx_bytes = 0; + unsigned int start; unsigned int q; + netdev_stats_to_stats64(stats, &dev->stats); + for (q = 0; q < dev->num_tx_queues; q++) { ring = &priv->tx_rings[q]; - tx_bytes += ring->bytes; - tx_packets += ring->packets; + do { + start = u64_stats_fetch_begin_irq(&priv->syncp); + tx_bytes = ring->bytes; + tx_packets = ring->packets; + } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); + + stats->tx_bytes += tx_bytes; + stats->tx_packets += tx_packets; } - dev->stats.tx_bytes = tx_bytes; - dev->stats.tx_packets = tx_packets; - return &dev->stats; + /* lockless update tx_bytes and tx_packets */ + u64_stats_update_begin(&priv->syncp); + stats64->tx_bytes = stats->tx_bytes; + stats64->tx_packets = stats->tx_packets; + u64_stats_update_end(&priv->syncp); + + do { + start = u64_stats_fetch_begin_irq(&priv->syncp); + stats->rx_packets = stats64->rx_packets; + stats->rx_bytes = stats64->rx_bytes; + } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); } static void bcm_sysport_netif_start(struct net_device *dev) @@ -1954,7 +1994,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bcm_sysport_poll_controller, #endif - .ndo_get_stats = bcm_sysport_get_nstats, + .ndo_get_stats64 = bcm_sysport_get_stats64, }; #define REV_FMT "v%2x.%02x" @@ -2102,6 +2142,8 @@ static int bcm_sysport_probe(struct platform_device *pdev) /* libphy will adjust the link state accordingly */ netif_carrier_off(dev); + u64_stats_init(&priv->syncp); + ret = register_netdev(dev); if (ret) { dev_err(&pdev->dev, "failed to register net_device\n"); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 77a51c167a69..80b4ffff63b7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -603,6 +603,7 @@ struct bcm_sysport_mib { /* HW maintains a large list of counters */ enum bcm_sysport_stat_type { BCM_SYSPORT_STAT_NETDEV = -1, + BCM_SYSPORT_STAT_NETDEV64, BCM_SYSPORT_STAT_MIB_RX, BCM_SYSPORT_STAT_MIB_TX, BCM_SYSPORT_STAT_RUNT, @@ -619,6 +620,13 @@ enum bcm_sysport_stat_type { .type = BCM_SYSPORT_STAT_NETDEV, \ } +#define STAT_NETDEV64(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct bcm_sysport_stats64 *)0)->m), \ + .stat_offset = offsetof(struct bcm_sysport_stats64, m), \ + .type = BCM_SYSPORT_STAT_NETDEV64, \ +} + #define STAT_MIB(str, m, _type) { \ .stat_string = str, \ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \ @@ -659,6 +667,14 @@ struct bcm_sysport_stats { u16 reg_offset; }; +struct bcm_sysport_stats64 { + /* 64bit stats on 32bit/64bit Machine */ + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; +}; + /* Software house keeping helper structure */ struct bcm_sysport_cb { struct sk_buff *skb; /* SKB for RX packets */ @@ -743,5 +759,10 @@ struct bcm_sysport_priv { /* Ethtool */ u32 msg_enable; + + struct bcm_sysport_stats64 stats64; + + /* For atomic update generic 64bit value on 32bit Machine */ + struct u64_stats_sync syncp; }; #endif /* __BCM_SYSPORT_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 67fe3d826566..1216c1f1e052 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4284,15 +4284,17 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) return 0; } -int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct tc_mqprio_qopt *mqprio = type_data; + + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return bnx2x_setup_tc(dev, tc->mqprio->num_tc); + return bnx2x_setup_tc(dev, mqprio->num_tc); } /* called with rtnl_lock */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index c26688d2f326..a5265e1344f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -486,8 +486,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); -int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data); int bnx2x_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index a7ca45b251cb..d141a22ac50b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e7c8539cbddf..6e14fc4fe2c8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -33,6 +33,7 @@ #include <linux/mii.h> #include <linux/if.h> #include <linux/if_vlan.h> +#include <linux/if_bridge.h> #include <linux/rtc.h> #include <linux/bpf.h> #include <net/ip.h> @@ -56,6 +57,7 @@ #include "bnxt_ethtool.h" #include "bnxt_dcb.h" #include "bnxt_xdp.h" +#include "bnxt_vfr.h" #define BNXT_TX_TIMEOUT (5 * HZ) @@ -243,6 +245,16 @@ const u16 bnxt_lhint_arr[] = { TX_BD_FLAGS_LHINT_2048_AND_LARGER, }; +static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) +{ + struct metadata_dst *md_dst = skb_metadata_dst(skb); + + if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) + return 0; + + return md_dst->u.port_info.port_id; +} + static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -287,7 +299,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_buf->nr_frags = last_frag; vlan_tag_flags = 0; - cfa_action = 0; + cfa_action = bnxt_xmit_get_cfa_action(skb); if (skb_vlan_tag_present(skb)) { vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | skb_vlan_tag_get(skb); @@ -322,7 +334,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_push1->tx_bd_hsize_lflags = 0; tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); - tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + tx_push1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); end = pdata + length; end = PTR_ALIGN(end, 8) - 1; @@ -427,7 +440,8 @@ normal_tx: txbd->tx_bd_len_flags_type = cpu_to_le32(flags); txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); - txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + txbd1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); for (i = 0; i < last_frag; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -1032,7 +1046,10 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, bnxt_sched_reset(bp, rxr); return; } - + /* Store cfa_code in tpa_info to use in tpa_end + * completion processing. + */ + tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); prod_rx_buf->data = tpa_info->data; prod_rx_buf->data_ptr = tpa_info->data_ptr; @@ -1267,6 +1284,17 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, return skb; } +/* Given the cfa_code of a received packet determine which + * netdev (vf-rep or PF) the packet is destined to. + */ +static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) +{ + struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); + + /* if vf-rep dev is NULL, the must belongs to the PF */ + return dev ? dev : bp->dev; +} + static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, @@ -1360,7 +1388,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return NULL; } } - skb->protocol = eth_type_trans(skb, bp->dev); + + skb->protocol = + eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); @@ -1387,6 +1417,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return skb; } +static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, + struct sk_buff *skb) +{ + if (skb->dev != bp->dev) { + /* this packet belongs to a vf-rep */ + bnxt_vf_rep_rx(bp, skb); + return; + } + skb_record_rx_queue(skb, bnapi->index); + napi_gro_receive(&bnapi->napi, skb); +} + /* returns the following: * 1 - 1 packet successfully received * 0 - successful TPA_START, packet not completed yet @@ -1403,7 +1445,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, struct rx_cmp *rxcmp; struct rx_cmp_ext *rxcmp1; u32 tmp_raw_cons = *raw_cons; - u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); + u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); struct bnxt_sw_rx_bd *rx_buf; unsigned int len; u8 *data_ptr, agg_bufs, cmp_type; @@ -1445,8 +1487,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, rc = -ENOMEM; if (likely(skb)) { - skb_record_rx_queue(skb, bnapi->index); - napi_gro_receive(&bnapi->napi, skb); + bnxt_deliver_skb(bp, bnapi, skb); rc = 1; } *event |= BNXT_RX_EVENT; @@ -1535,7 +1576,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); } - skb->protocol = eth_type_trans(skb, dev); + cfa_code = RX_CMP_CFA_CODE(rxcmp1); + skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); if ((rxcmp1->rx_cmp_flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && @@ -1560,8 +1602,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } } - skb_record_rx_queue(skb, bnapi->index); - napi_gro_receive(&bnapi->napi, skb); + bnxt_deliver_skb(bp, bnapi, skb); rc = 1; next_rx: @@ -4577,6 +4618,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + u16 flags; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); @@ -4593,15 +4635,15 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; } #endif - if (BNXT_PF(bp)) { - u16 flags = le16_to_cpu(resp->flags); - - if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | - FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) - bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; - if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST) - bp->flags |= BNXT_FLAG_MULTI_HOST; + flags = le16_to_cpu(resp->flags); + if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | + FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { + bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; + if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) + bp->flags |= BNXT_FLAG_FW_DCBX_AGENT; } + if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) + bp->flags |= BNXT_FLAG_MULTI_HOST; switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: @@ -4610,6 +4652,13 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) bp->port_partition_type = resp->port_partition_type; break; } + if (bp->hwrm_spec_code < 0x10707 || + resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) + bp->br_mode = BRIDGE_MODE_VEB; + else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) + bp->br_mode = BRIDGE_MODE_VEPA; + else + bp->br_mode = BRIDGE_MODE_UNDEF; func_qcfg_exit: mutex_unlock(&bp->hwrm_cmd_lock); @@ -4911,6 +4960,26 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, } } +static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); + if (br_mode == BRIDGE_MODE_VEB) + req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; + else if (br_mode == BRIDGE_MODE_VEPA) + req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; + else + return -EINVAL; + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + return rc; +} + static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; @@ -5559,12 +5628,10 @@ void bnxt_tx_disable(struct bnxt *bp) { int i; struct bnxt_tx_ring_info *txr; - struct netdev_queue *txq; if (bp->tx_ring) { for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; - txq = netdev_get_tx_queue(bp->dev, i); txr->dev_state = BNXT_DEV_STATE_CLOSING; } } @@ -5577,11 +5644,9 @@ void bnxt_tx_enable(struct bnxt *bp) { int i; struct bnxt_tx_ring_info *txr; - struct netdev_queue *txq; for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; - txq = netdev_get_tx_queue(bp->dev, i); txr->dev_state = 0; } netif_tx_wake_all_queues(bp->dev); @@ -5646,7 +5711,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) if (rc) goto hwrm_phy_qcaps_exit; - if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) { + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { struct ethtool_eee *eee = &bp->eee; u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); @@ -5686,13 +5751,15 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); link_info->phy_link_status = resp->link; - link_info->duplex = resp->duplex; + link_info->duplex = resp->duplex_cfg; + if (bp->hwrm_spec_code >= 0x10800) + link_info->duplex = resp->duplex_state; link_info->pause = resp->pause; link_info->auto_mode = resp->auto_mode; link_info->auto_pause_setting = resp->auto_pause; link_info->lp_pause = resp->link_partner_adv_pause; link_info->force_pause_setting = resp->force_pause; - link_info->duplex_setting = resp->duplex; + link_info->duplex_setting = resp->duplex_cfg; if (link_info->phy_link_status == BNXT_LINK_LINK) link_info->link_speed = le16_to_cpu(resp->link_speed); else @@ -6214,6 +6281,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* Poll link status and check for SFP+ module status */ bnxt_get_port_module_status(bp); + /* VF-reps may need to be re-opened after the PF is re-opened */ + if (BNXT_PF(bp)) + bnxt_vf_reps_open(bp); return 0; open_err: @@ -6302,6 +6372,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (rc) netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); } + + /* Close the VF-reps before closing PF */ + if (BNXT_PF(bp)) + bnxt_vf_reps_close(bp); #endif /* Change device state to avoid TX queue wake up's */ bnxt_tx_disable(bp); @@ -6813,7 +6887,8 @@ static void bnxt_timer(unsigned long data) if (atomic_read(&bp->intr_sem) != 0) goto bnxt_restart_timer; - if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) { + if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && + bp->stats_coal_ticks) { set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); schedule_work(&bp->sp_task); } @@ -7162,15 +7237,17 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) return 0; } -static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *ntc) +static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { - if (ntc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct tc_mqprio_qopt *mqprio = type_data; + + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc); + return bnxt_setup_mq_tc(dev, mqprio->num_tc); } #ifdef CONFIG_RFS_ACCEL @@ -7422,6 +7499,102 @@ static void bnxt_udp_tunnel_del(struct net_device *dev, schedule_work(&bp->sp_task); } +static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, + int nlflags) +{ + struct bnxt *bp = netdev_priv(dev); + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, + nlflags, filter_mask, NULL); +} + +static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 flags) +{ + struct bnxt *bp = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem, rc = 0; + + if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; + + nla_for_each_nested(attr, br_spec, rem) { + u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + + mode = nla_get_u16(attr); + if (mode == bp->br_mode) + break; + + rc = bnxt_hwrm_set_br_mode(bp, mode); + if (!rc) + bp->br_mode = mode; + break; + } + return rc; +} + +static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + rc = snprintf(buf, len, "p%d", bp->pf.port_id); + + if (rc >= len) + return -EOPNOTSUPP; + return 0; +} + +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) +{ + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return -EOPNOTSUPP; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + /* In SRIOV each PF-pool (PF + child VFs) serves as a + * switching domain, the PF's perm mac-addr can be used + * as the unique parent-id + */ + attr->u.ppid.id_len = ETH_ALEN; + ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int bnxt_swdev_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + return bnxt_port_attr_get(netdev_priv(dev), attr); +} + +static const struct switchdev_ops bnxt_switchdev_ops = { + .switchdev_port_attr_get = bnxt_swdev_port_attr_get +}; + static const struct net_device_ops bnxt_netdev_ops = { .ndo_open = bnxt_open, .ndo_start_xmit = bnxt_start_xmit, @@ -7453,6 +7626,9 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, .ndo_xdp = bnxt_xdp, + .ndo_bridge_getlink = bnxt_bridge_getlink, + .ndo_bridge_setlink = bnxt_bridge_setlink, + .ndo_get_phys_port_name = bnxt_get_phys_port_name }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -7460,8 +7636,10 @@ static void bnxt_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); - if (BNXT_PF(bp)) + if (BNXT_PF(bp)) { bnxt_sriov_disable(bp); + bnxt_dl_unregister(bp); + } pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); @@ -7710,6 +7888,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; + SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); @@ -7764,6 +7943,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_BNXT_SRIOV init_waitqueue_head(&bp->sriov_cfg_wait); + mutex_init(&bp->sriov_lock); #endif bp->gro_func = bnxt_gro_func_5730x; if (BNXT_CHIP_P4_PLUS(bp)) @@ -7855,6 +8035,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_clr_int; + if (BNXT_PF(bp)) + bnxt_dl_register(bp); + netdev_info(dev, "%s found at mem %lx, node addr %pM\n", board_info[ent->driver_data].name, (long)pci_resource_start(pdev, 0), dev->dev_addr); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index f34691f85602..2d84d5719b70 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,13 +12,16 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.7.0" +#define DRV_MODULE_VERSION "1.8.0" #define DRV_VER_MAJ 1 -#define DRV_VER_MIN 7 +#define DRV_VER_MIN 8 #define DRV_VER_UPD 0 #include <linux/interrupt.h> +#include <net/devlink.h> +#include <net/dst_metadata.h> +#include <net/switchdev.h> struct tx_bd { __le32 tx_bd_len_flags_type; @@ -242,6 +245,10 @@ struct rx_cmp_ext { ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \ RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3) +#define RX_CMP_CFA_CODE(rxcmpl1) \ + ((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \ + RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT) + struct rx_agg_cmp { __le32 rx_agg_cmp_len_flags_type; #define RX_AGG_CMP_TYPE (0x3f << 0) @@ -311,6 +318,10 @@ struct rx_tpa_start_cmp_ext { __le32 rx_tpa_start_cmp_hdr_info; }; +#define TPA_START_CFA_CODE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT) + struct rx_tpa_end_cmp { __le32 rx_tpa_end_cmp_len_flags_type; #define RX_TPA_END_CMP_TYPE (0x3f << 0) @@ -618,6 +629,8 @@ struct bnxt_tpa_info { #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ ((hdr_info) & 0x1ff) + + u16 cfa_code; /* cfa_code in TPA start compl */ }; struct bnxt_rx_ring_info { @@ -825,8 +838,8 @@ struct bnxt_link_info { u8 loop_back; u8 link_up; u8 duplex; -#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF -#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL +#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF +#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL u8 pause; #define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX #define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX @@ -928,6 +941,24 @@ struct bnxt_test_info { #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 +struct bnxt_vf_rep_stats { + u64 packets; + u64 bytes; + u64 dropped; +}; + +struct bnxt_vf_rep { + struct bnxt *bp; + struct net_device *dev; + struct metadata_dst *dst; + u16 vf_idx; + u16 tx_cfa_action; + u16 rx_cfa_code; + + struct bnxt_vf_rep_stats rx_stats; + struct bnxt_vf_rep_stats tx_stats; +}; + struct bnxt { void __iomem *bar0; void __iomem *bar1; @@ -1027,6 +1058,7 @@ struct bnxt { #define BNXT_FLAG_MULTI_HOST 0x100000 #define BNXT_FLAG_SHORT_CMD 0x200000 #define BNXT_FLAG_DOUBLE_DB 0x400000 + #define BNXT_FLAG_FW_DCBX_AGENT 0x800000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ @@ -1164,6 +1196,7 @@ struct bnxt { u8 nge_port_cnt; __le16 nge_fw_dst_port_id; u8 port_partition_type; + u16 br_mode; u16 rx_coal_ticks; u16 rx_coal_ticks_irq; @@ -1206,6 +1239,12 @@ struct bnxt { wait_queue_head_t sriov_cfg_wait; bool sriov_cfg; #define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000) + + /* lock to protect VF-rep creation/cleanup via + * multiple paths such as ->sriov_configure() and + * devlink ->eswitch_mode_set() + */ + struct mutex sriov_lock; #endif #define BNXT_NTP_FLTR_MAX_FLTR 4096 @@ -1232,6 +1271,12 @@ struct bnxt { struct bnxt_led_info leds[BNXT_MAX_LED]; struct bpf_prog *xdp_prog; + + /* devlink interface and vf-rep structs */ + struct devlink *dl; + enum devlink_eswitch_mode eswitch_mode; + struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ + u16 *cfa_code_map; /* cfa_code -> vf_idx map */ }; #define BNXT_RX_STATS_OFFSET(counter) \ @@ -1306,4 +1351,5 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); void bnxt_restore_pf_fw_resources(struct bnxt *bp); +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 5c6dd0ce209f..aa1f3a2c7a78 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -93,6 +93,12 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cos2bw.tsa = QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS; cos2bw.bw_weight = ets->tc_tx_bw[i]; + /* older firmware requires min_bw to be set to the + * same weight value in percent. + */ + cos2bw.min_bw = + cpu_to_le32((ets->tc_tx_bw[i] * 100) | + BW_VALUE_UNIT_PERCENT1_100); } memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); if (i == 0) { @@ -549,13 +555,18 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode) { struct bnxt *bp = netdev_priv(dev); - /* only support IEEE */ - if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE)) + /* All firmware DCBX settings are set in NVRAM */ + if (bp->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) return 1; if (mode & DCB_CAP_DCBX_HOST) { if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) return 1; + + /* only support IEEE */ + if ((mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_VER_IEEE)) + return 1; } if (mode == bp->dcbx_cap) @@ -584,7 +595,7 @@ void bnxt_dcb_init(struct bnxt *bp) bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE; if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) bp->dcbx_cap |= DCB_CAP_DCBX_HOST; - else + else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT) bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; bp->dev->dcbnl_ops = &dcbnl_ops; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index ecd0a5e46a49..d2e0af960bf5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -26,6 +26,7 @@ struct bnxt_cos2bw_cfg { u8 queue_id; __le32 min_bw; __le32 max_bw; +#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) u8 tsa; u8 pri_lvl; u8 bw_weight; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index be6acadcb202..08b870d7d466 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -86,9 +86,11 @@ static int bnxt_set_coalesce(struct net_device *dev, if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { u32 stats_ticks = coal->stats_block_coalesce_usecs; - stats_ticks = clamp_t(u32, stats_ticks, - BNXT_MIN_STATS_COAL_TICKS, - BNXT_MAX_STATS_COAL_TICKS); + /* Allow 0, which means disable. */ + if (stats_ticks) + stats_ticks = clamp_t(u32, stats_ticks, + BNXT_MIN_STATS_COAL_TICKS, + BNXT_MAX_STATS_COAL_TICKS); stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); bp->stats_coal_ticks = stats_ticks; update_stats = true; @@ -198,19 +200,23 @@ static const struct { #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) +static int bnxt_get_num_stats(struct bnxt *bp) +{ + int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + + if (bp->flags & BNXT_FLAG_PORT_STATS) + num_stats += BNXT_NUM_PORT_STATS; + + return num_stats; +} + static int bnxt_get_sset_count(struct net_device *dev, int sset) { struct bnxt *bp = netdev_priv(dev); switch (sset) { - case ETH_SS_STATS: { - int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; - - if (bp->flags & BNXT_FLAG_PORT_STATS) - num_stats += BNXT_NUM_PORT_STATS; - - return num_stats; - } + case ETH_SS_STATS: + return bnxt_get_num_stats(bp); case ETH_SS_TEST: if (!bp->num_tests) return -EOPNOTSUPP; @@ -225,11 +231,8 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, { u32 i, j = 0; struct bnxt *bp = netdev_priv(dev); - u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings; u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; - memset(buf, 0, buf_size); - if (!bp->bnapi) return; @@ -520,7 +523,7 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) struct flow_keys *fkeys; int i, rc = -EINVAL; - if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR) + if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) return rc; for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { @@ -835,7 +838,7 @@ static void bnxt_get_drvinfo(struct net_device *dev, strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); - info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + info->n_stats = bnxt_get_num_stats(bp); info->testinfo_len = bp->num_tests; /* TODO CHIMP_FW: eeprom dump details */ info->eedump_len = 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 7dc71bb95837..3ba22e8ee914 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -11,14 +11,14 @@ #ifndef BNXT_HSI_H #define BNXT_HSI_H -/* HSI and HWRM Specification 1.7.6 */ +/* HSI and HWRM Specification 1.8.0 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 7 -#define HWRM_VERSION_UPDATE 6 +#define HWRM_VERSION_MINOR 8 +#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 2 /* non-zero means beta version */ +#define HWRM_VERSION_RSVD 0 /* non-zero means beta version */ -#define HWRM_VERSION_STR "1.7.6.2" +#define HWRM_VERSION_STR "1.8.0.0" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -813,7 +813,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL - #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -835,9 +835,8 @@ struct hwrm_func_qcfg_output { u8 port_pf_cnt; #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL __le16 dflt_vnic_id; - u8 host_cnt; - #define FUNC_QCFG_RESP_HOST_CNT_UNAVAIL 0x0UL u8 unused_0; + u8 unused_1; __le32 min_bw; #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 @@ -874,12 +873,56 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL - u8 unused_1; + u8 unused_2; __le16 alloc_vfs; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; __le16 alloc_sp_tx_rings; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_vlan_cfg */ +/* Input (48 bytes) */ +struct hwrm_func_vlan_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0; + u8 unused_1; + __le32 enables; + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL + __le16 stag_vid; + u8 stag_pcp; u8 unused_2; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_3; + __be16 ctag_tpid; + __le32 rsvd1; + __le32 rsvd2; + __le32 unused_4; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vlan_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; u8 valid; }; @@ -902,6 +945,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL + #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -1456,9 +1500,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL - u8 duplex; - #define PORT_PHY_QCFG_RESP_DUPLEX_HALF 0x0UL - #define PORT_PHY_QCFG_RESP_DUPLEX_FULL 0x1UL + u8 duplex_cfg; + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL u8 pause; #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL @@ -1573,6 +1617,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL u8 media_type; #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL @@ -1651,14 +1698,16 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + u8 duplex_state; + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL u8 unused_1; - u8 unused_2; char phy_vendor_name[16]; char phy_vendor_partnumber[16]; - __le32 unused_3; + __le32 unused_2; + u8 unused_3; u8 unused_4; u8 unused_5; - u8 unused_6; u8 valid; }; @@ -1744,6 +1793,51 @@ struct hwrm_port_mac_cfg_output { u8 valid; }; +/* hwrm_port_mac_ptp_qcfg */ +/* Input (24 bytes) */ +struct hwrm_port_mac_ptp_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (80 bytes) */ +struct hwrm_port_mac_ptp_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL + u8 unused_0; + __le16 unused_1; + __le32 rx_ts_reg_off_lower; + __le32 rx_ts_reg_off_upper; + __le32 rx_ts_reg_off_seq_id; + __le32 rx_ts_reg_off_src_id_0; + __le32 rx_ts_reg_off_src_id_1; + __le32 rx_ts_reg_off_src_id_2; + __le32 rx_ts_reg_off_domain_id; + __le32 rx_ts_reg_off_fifo; + __le32 rx_ts_reg_off_fifo_adv; + __le32 rx_ts_reg_off_granularity; + __le32 tx_ts_reg_off_lower; + __le32 tx_ts_reg_off_upper; + __le32 tx_ts_reg_off_seq_id; + __le32 tx_ts_reg_off_fifo; + __le32 tx_ts_reg_off_granularity; + __le32 unused_2; + u8 unused_3; + u8 unused_4; + u8 unused_5; + u8 valid; +}; + /* hwrm_port_qstats */ /* Input (40 bytes) */ struct hwrm_port_qstats_input { @@ -1874,10 +1968,10 @@ struct hwrm_port_phy_qcaps_output { __le16 req_type; __le16 seq_id; __le16 resp_len; - u8 eee_supported; - #define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL - #define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1 + u8 flags; + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1 u8 unused_0; __le16 supported_speeds_force_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL @@ -3152,6 +3246,95 @@ struct hwrm_queue_cos2bw_cfg_output { u8 valid; }; +/* hwrm_queue_dscp_qcaps */ +/* Input (24 bytes) */ +struct hwrm_queue_dscp_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_dscp_bits; + u8 unused_0; + __le16 max_entries; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_qcfg */ +/* Input (32 bytes) */ +struct hwrm_queue_dscp2pri_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + u8 port_id; + u8 unused_0; + __le16 dest_data_buffer_size; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp2pri_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 entry_cnt; + u8 default_pri; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_cfg */ +/* Input (40 bytes) */ +struct hwrm_queue_dscp2pri_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le32 flags; + #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL + __le32 enables; + #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL + u8 port_id; + u8 default_pri; + __le16 entry_cnt; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp2pri_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* hwrm_vnic_alloc */ /* Input (24 bytes) */ struct hwrm_vnic_alloc_input { @@ -4038,7 +4221,7 @@ struct hwrm_cfa_encap_record_alloc_input { #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL u8 unused_0; __le16 unused_1; - __le32 encap_data[16]; + __le32 encap_data[20]; }; /* Output (16 bytes) */ @@ -4120,8 +4303,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL u8 ip_protocol; #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x6UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x11UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL __le16 dst_id; __le16 mirror_vnic_id; u8 tunnel_type; @@ -4224,6 +4407,58 @@ struct hwrm_cfa_ntuple_filter_cfg_output { u8 valid; }; +/* hwrm_cfa_vfr_alloc */ +/* Input (32 bytes) */ +struct hwrm_cfa_vfr_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 reserved; + __le32 unused_0; + char vfr_name[32]; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_vfr_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code; + __le16 tx_cfa_action; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_vfr_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_vfr_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char vfr_name[32]; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_vfr_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* hwrm_tunnel_dst_port_query */ /* Input (24 bytes) */ struct hwrm_tunnel_dst_port_query_input { @@ -4448,12 +4683,13 @@ struct hwrm_fw_reset_input { #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL u8 selfrst_status; #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL - __le16 unused_0[3]; + u8 host_idx; + u8 unused_0[5]; }; /* Output (16 bytes) */ @@ -4487,7 +4723,7 @@ struct hwrm_fw_qstatus_input { #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL u8 unused_0[7]; }; @@ -4572,6 +4808,16 @@ struct hwrm_fw_set_structured_data_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_fw_set_structured_data_cmd_err { + u8 code; + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + u8 unused_0[7]; +}; + /* hwrm_fw_get_structured_data */ /* Input (32 bytes) */ struct hwrm_fw_get_structured_data_input { @@ -4611,6 +4857,14 @@ struct hwrm_fw_get_structured_data_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_fw_get_structured_data_cmd_err { + u8 code; + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + u8 unused_0[7]; +}; + /* hwrm_exec_fwd_resp */ /* Input (128 bytes) */ struct hwrm_exec_fwd_resp_input { @@ -5411,7 +5665,7 @@ struct cmd_nums { #define HWRM_PORT_LPBK_CLR_STATS (0x26UL) #define HWRM_PORT_PHY_QCFG (0x27UL) #define HWRM_PORT_MAC_QCFG (0x28UL) - #define RESERVED7 (0x29UL) + #define HWRM_PORT_MAC_PTP_QCFG (0x29UL) #define HWRM_PORT_PHY_QCAPS (0x2aUL) #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL) #define HWRM_PORT_PHY_I2C_READ (0x2cUL) @@ -5421,14 +5675,17 @@ struct cmd_nums { #define HWRM_QUEUE_QPORTCFG (0x30UL) #define HWRM_QUEUE_QCFG (0x31UL) #define HWRM_QUEUE_CFG (0x32UL) - #define RESERVED2 (0x33UL) - #define RESERVED3 (0x34UL) + #define HWRM_FUNC_VLAN_CFG (0x33UL) + #define HWRM_FUNC_VLAN_QCFG (0x34UL) #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL) #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL) #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL) #define HWRM_QUEUE_PRI2COS_CFG (0x38UL) #define HWRM_QUEUE_COS2BW_QCFG (0x39UL) #define HWRM_QUEUE_COS2BW_CFG (0x3aUL) + #define HWRM_QUEUE_DSCP_QCAPS (0x3bUL) + #define HWRM_QUEUE_DSCP2PRI_QCFG (0x3cUL) + #define HWRM_QUEUE_DSCP2PRI_CFG (0x3dUL) #define HWRM_VNIC_ALLOC (0x40UL) #define HWRM_VNIC_FREE (0x41UL) #define HWRM_VNIC_CFG (0x42UL) @@ -5455,7 +5712,7 @@ struct cmd_nums { #define HWRM_CFA_L2_FILTER_FREE (0x91UL) #define HWRM_CFA_L2_FILTER_CFG (0x92UL) #define HWRM_CFA_L2_SET_RX_MASK (0x93UL) - #define RESERVED4 (0x94UL) + #define HWRM_CFA_VLAN_ANTISPOOF_CFG (0x94UL) #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL) #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL) #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL) @@ -5494,6 +5751,8 @@ struct cmd_nums { #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL) #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL) #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL) + #define HWRM_CFA_VFR_ALLOC (0xfdUL) + #define HWRM_CFA_VFR_FREE (0xfeUL) #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL) #define HWRM_CFA_VF_PAIR_FREE (0x101UL) #define HWRM_CFA_VF_PAIR_INFO (0x102UL) @@ -5502,6 +5761,9 @@ struct cmd_nums { #define HWRM_CFA_FLOW_FLUSH (0x105UL) #define HWRM_CFA_FLOW_STATS (0x106UL) #define HWRM_CFA_FLOW_INFO (0x107UL) + #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL) + #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL) + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL) #define HWRM_SELFTEST_QLIST (0x200UL) #define HWRM_SELFTEST_EXEC (0x201UL) #define HWRM_SELFTEST_IRQ (0x202UL) @@ -5510,6 +5772,8 @@ struct cmd_nums { #define HWRM_DBG_WRITE_DIRECT (0xff12UL) #define HWRM_DBG_WRITE_INDIRECT (0xff13UL) #define HWRM_DBG_DUMP (0xff14UL) + #define HWRM_DBG_ERASE_NVM (0xff15UL) + #define HWRM_DBG_CFG (0xff16UL) #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL) #define HWRM_NVM_VALIDATE_OPTION (0xffefUL) #define HWRM_NVM_FLUSH (0xfff0UL) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index b8e7248294d9..d37925a8a65b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -18,6 +18,7 @@ #include "bnxt.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" +#include "bnxt_vfr.h" #include "bnxt_ethtool.h" #ifdef CONFIG_BNXT_SRIOV @@ -587,6 +588,10 @@ void bnxt_sriov_disable(struct bnxt *bp) if (!num_vfs) return; + /* synchronize VF and VF-rep create and destroy */ + mutex_lock(&bp->sriov_lock); + bnxt_vf_reps_destroy(bp); + if (pci_vfs_assigned(bp->pdev)) { bnxt_hwrm_fwd_async_event_cmpl( bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); @@ -597,6 +602,7 @@ void bnxt_sriov_disable(struct bnxt *bp) /* Free the HW resources reserved for various VF's */ bnxt_hwrm_func_vf_resource_free(bp, num_vfs); } + mutex_unlock(&bp->sriov_lock); bnxt_free_vf_resources(bp); @@ -794,8 +800,10 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) PORT_PHY_QCFG_RESP_LINK_LINK; phy_qcfg_resp.link_speed = cpu_to_le16( PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); - phy_qcfg_resp.duplex = - PORT_PHY_QCFG_RESP_DUPLEX_FULL; + phy_qcfg_resp.duplex_cfg = + PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL; + phy_qcfg_resp.duplex_state = + PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL; phy_qcfg_resp.pause = (PORT_PHY_QCFG_RESP_PAUSE_TX | PORT_PHY_QCFG_RESP_PAUSE_RX); @@ -804,7 +812,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) /* force link down */ phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; phy_qcfg_resp.link_speed = 0; - phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF; + phy_qcfg_resp.duplex_state = + PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF; phy_qcfg_resp.pause = 0; } rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c new file mode 100644 index 000000000000..b05c5d0ee3f9 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -0,0 +1,495 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/jhash.h> + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" + +#ifdef CONFIG_BNXT_SRIOV + +#define CFA_HANDLE_INVALID 0xffff +#define VF_IDX_INVALID 0xffff + +static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx, + u16 *tx_cfa_action, u16 *rx_cfa_code) +{ + struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_vfr_alloc_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1); + req.vf_id = cpu_to_le16(vf_idx); + sprintf(req.vfr_name, "vfr%d", vf_idx); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); + *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); + netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", + *tx_cfa_action, *rx_cfa_code); + } else { + netdev_info(bp->dev, "%s error rc=%d", __func__, rc); + } + + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) +{ + struct hwrm_cfa_vfr_free_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1); + sprintf(req.vfr_name, "vfr%d", vf_idx); + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_info(bp->dev, "%s error rc=%d", __func__, rc); + return rc; +} + +static int bnxt_vf_rep_open(struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct bnxt *bp = vf_rep->bp; + + /* Enable link and TX only if the parent PF is open. */ + if (netif_running(bp->dev)) { + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + } + return 0; +} + +static int bnxt_vf_rep_close(struct net_device *dev) +{ + netif_carrier_off(dev); + netif_tx_disable(dev); + + return 0; +} + +static netdev_tx_t bnxt_vf_rep_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + int rc, len = skb->len; + + skb_dst_drop(skb); + dst_hold((struct dst_entry *)vf_rep->dst); + skb_dst_set(skb, (struct dst_entry *)vf_rep->dst); + skb->dev = vf_rep->dst->u.port_info.lower_dev; + + rc = dev_queue_xmit(skb); + if (!rc) { + vf_rep->tx_stats.packets++; + vf_rep->tx_stats.bytes += len; + } + return rc; +} + +static void +bnxt_vf_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + stats->rx_packets = vf_rep->rx_stats.packets; + stats->rx_bytes = vf_rep->rx_stats.bytes; + stats->tx_packets = vf_rep->tx_stats.packets; + stats->tx_bytes = vf_rep->tx_stats.bytes; +} + +struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) +{ + u16 vf_idx; + + if (cfa_code && bp->cfa_code_map && BNXT_PF(bp)) { + vf_idx = bp->cfa_code_map[cfa_code]; + if (vf_idx != VF_IDX_INVALID) + return bp->vf_reps[vf_idx]->dev; + } + return NULL; +} + +void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev); + struct bnxt_vf_rep_stats *rx_stats; + + rx_stats = &vf_rep->rx_stats; + vf_rep->rx_stats.bytes += skb->len; + vf_rep->rx_stats.packets++; + + netif_receive_skb(skb); +} + +static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct pci_dev *pf_pdev = vf_rep->bp->pdev; + int rc; + + rc = snprintf(buf, len, "pf%dvf%d", PCI_FUNC(pf_pdev->devfn), + vf_rep->vf_idx); + if (rc >= len) + return -EOPNOTSUPP; + return 0; +} + +static void bnxt_vf_rep_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +static int bnxt_vf_rep_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + /* as only PORT_PARENT_ID is supported currently use common code + * between PF and VF-rep for now. + */ + return bnxt_port_attr_get(vf_rep->bp, attr); +} + +static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = { + .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get +}; + +static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = { + .get_drvinfo = bnxt_vf_rep_get_drvinfo +}; + +static const struct net_device_ops bnxt_vf_rep_netdev_ops = { + .ndo_open = bnxt_vf_rep_open, + .ndo_stop = bnxt_vf_rep_close, + .ndo_start_xmit = bnxt_vf_rep_xmit, + .ndo_get_stats64 = bnxt_vf_rep_get_stats64, + .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name +}; + +/* Called when the parent PF interface is closed: + * As the mode transition from SWITCHDEV to LEGACY + * happens under the rtnl_lock() this routine is safe + * under the rtnl_lock() + */ +void bnxt_vf_reps_close(struct bnxt *bp) +{ + struct bnxt_vf_rep *vf_rep; + u16 num_vfs, i; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + num_vfs = pci_num_vf(bp->pdev); + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + if (netif_running(vf_rep->dev)) + bnxt_vf_rep_close(vf_rep->dev); + } +} + +/* Called when the parent PF interface is opened (re-opened): + * As the mode transition from SWITCHDEV to LEGACY + * happen under the rtnl_lock() this routine is safe + * under the rtnl_lock() + */ +void bnxt_vf_reps_open(struct bnxt *bp) +{ + int i; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + for (i = 0; i < pci_num_vf(bp->pdev); i++) + bnxt_vf_rep_open(bp->vf_reps[i]->dev); +} + +static void __bnxt_vf_reps_destroy(struct bnxt *bp) +{ + u16 num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + int i; + + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + if (vf_rep) { + dst_release((struct dst_entry *)vf_rep->dst); + + if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) + hwrm_cfa_vfr_free(bp, vf_rep->vf_idx); + + if (vf_rep->dev) { + /* if register_netdev failed, then netdev_ops + * would have been set to NULL + */ + if (vf_rep->dev->netdev_ops) + unregister_netdev(vf_rep->dev); + free_netdev(vf_rep->dev); + } + } + } + + kfree(bp->vf_reps); + bp->vf_reps = NULL; +} + +void bnxt_vf_reps_destroy(struct bnxt *bp) +{ + bool closed = false; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + if (!bp->vf_reps) + return; + + /* Ensure that parent PF's and VF-reps' RX/TX has been quiesced + * before proceeding with VF-rep cleanup. + */ + rtnl_lock(); + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + closed = true; + } + /* un-publish cfa_code_map so that RX path can't see it anymore */ + kfree(bp->cfa_code_map); + bp->cfa_code_map = NULL; + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + + if (closed) + bnxt_open_nic(bp, false, false); + rtnl_unlock(); + + /* Need to call vf_reps_destroy() outside of rntl_lock + * as unregister_netdev takes rtnl_lock + */ + __bnxt_vf_reps_destroy(bp); +} + +/* Use the OUI of the PF's perm addr and report the same mac addr + * for the same VF-rep each time + */ +static void bnxt_vf_rep_eth_addr_gen(u8 *src_mac, u16 vf_idx, u8 *mac) +{ + u32 addr; + + ether_addr_copy(mac, src_mac); + + addr = jhash(src_mac, ETH_ALEN, 0) + vf_idx; + mac[3] = (u8)(addr & 0xFF); + mac[4] = (u8)((addr >> 8) & 0xFF); + mac[5] = (u8)((addr >> 16) & 0xFF); +} + +static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, + struct net_device *dev) +{ + struct net_device *pf_dev = bp->dev; + + dev->netdev_ops = &bnxt_vf_rep_netdev_ops; + dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops; + SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops); + /* Just inherit all the featues of the parent PF as the VF-R + * uses the RX/TX rings of the parent PF + */ + dev->hw_features = pf_dev->hw_features; + dev->gso_partial_features = pf_dev->gso_partial_features; + dev->vlan_features = pf_dev->vlan_features; + dev->hw_enc_features = pf_dev->hw_enc_features; + dev->features |= pf_dev->features; + bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx, + dev->perm_addr); + ether_addr_copy(dev->dev_addr, dev->perm_addr); +} + +static int bnxt_vf_reps_create(struct bnxt *bp) +{ + u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + struct net_device *dev; + int rc, i; + + bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); + if (!bp->vf_reps) + return -ENOMEM; + + /* storage for cfa_code to vf-idx mapping */ + cfa_code_map = kmalloc(sizeof(*bp->cfa_code_map) * MAX_CFA_CODE, + GFP_KERNEL); + if (!cfa_code_map) { + rc = -ENOMEM; + goto err; + } + for (i = 0; i < MAX_CFA_CODE; i++) + cfa_code_map[i] = VF_IDX_INVALID; + + for (i = 0; i < num_vfs; i++) { + dev = alloc_etherdev(sizeof(*vf_rep)); + if (!dev) { + rc = -ENOMEM; + goto err; + } + + vf_rep = netdev_priv(dev); + bp->vf_reps[i] = vf_rep; + vf_rep->dev = dev; + vf_rep->bp = bp; + vf_rep->vf_idx = i; + vf_rep->tx_cfa_action = CFA_HANDLE_INVALID; + + /* get cfa handles from FW */ + rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, + &vf_rep->tx_cfa_action, + &vf_rep->rx_cfa_code); + if (rc) { + rc = -ENOLINK; + goto err; + } + cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx; + + vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!vf_rep->dst) { + rc = -ENOMEM; + goto err; + } + /* only cfa_action is needed to mux a packet while TXing */ + vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action; + vf_rep->dst->u.port_info.lower_dev = bp->dev; + + bnxt_vf_rep_netdev_init(bp, vf_rep, dev); + rc = register_netdev(dev); + if (rc) { + /* no need for unregister_netdev in cleanup */ + dev->netdev_ops = NULL; + goto err; + } + } + + /* publish cfa_code_map only after all VF-reps have been initialized */ + bp->cfa_code_map = cfa_code_map; + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + netif_keep_dst(bp->dev); + return 0; + +err: + netdev_info(bp->dev, "%s error=%d", __func__, rc); + kfree(cfa_code_map); + __bnxt_vf_reps_destroy(bp); + return rc; +} + +/* Devlink related routines */ +static int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(devlink); + + *mode = bp->eswitch_mode; + return 0; +} + +static int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(devlink); + int rc = 0; + + mutex_lock(&bp->sriov_lock); + if (bp->eswitch_mode == mode) { + netdev_info(bp->dev, "already in %s eswitch mode", + mode == DEVLINK_ESWITCH_MODE_LEGACY ? + "legacy" : "switchdev"); + rc = -EINVAL; + goto done; + } + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + bnxt_vf_reps_destroy(bp); + break; + + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (pci_num_vf(bp->pdev) == 0) { + netdev_info(bp->dev, + "Enable VFs before setting swtichdev mode"); + rc = -EPERM; + goto done; + } + rc = bnxt_vf_reps_create(bp); + break; + + default: + rc = -EINVAL; + goto done; + } +done: + mutex_unlock(&bp->sriov_lock); + return rc; +} + +static const struct devlink_ops bnxt_dl_ops = { + .eswitch_mode_set = bnxt_dl_eswitch_mode_set, + .eswitch_mode_get = bnxt_dl_eswitch_mode_get +}; + +int bnxt_dl_register(struct bnxt *bp) +{ + struct devlink *dl; + int rc; + + if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) + return 0; + + if (bp->hwrm_spec_code < 0x10800) { + netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); + return -ENOTSUPP; + } + + dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + if (!dl) { + netdev_warn(bp->dev, "devlink_alloc failed"); + return -ENOMEM; + } + + bnxt_link_bp_to_dl(dl, bp); + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + rc = devlink_register(dl, &bp->pdev->dev); + if (rc) { + bnxt_link_bp_to_dl(dl, NULL); + devlink_free(dl); + netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); + return rc; + } + + return 0; +} + +void bnxt_dl_unregister(struct bnxt *bp) +{ + struct devlink *dl = bp->dl; + + if (!dl) + return; + + devlink_unregister(dl); + devlink_free(dl); +} + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h new file mode 100644 index 000000000000..e55a3b693e20 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -0,0 +1,72 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_VFR_H +#define BNXT_VFR_H + +#ifdef CONFIG_BNXT_SRIOV + +#define MAX_CFA_CODE 65536 + +/* Struct to hold housekeeping info needed by devlink interface */ +struct bnxt_dl { + struct bnxt *bp; /* back ptr to the controlling dev */ +}; + +static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) +{ + return ((struct bnxt_dl *)devlink_priv(dl))->bp; +} + +static inline void bnxt_link_bp_to_dl(struct devlink *dl, struct bnxt *bp) +{ + struct bnxt_dl *bp_dl = devlink_priv(dl); + + bp_dl->bp = bp; + if (bp) + bp->dl = dl; +} + +int bnxt_dl_register(struct bnxt *bp); +void bnxt_dl_unregister(struct bnxt *bp); +void bnxt_vf_reps_destroy(struct bnxt *bp); +void bnxt_vf_reps_close(struct bnxt *bp); +void bnxt_vf_reps_open(struct bnxt *bp); +void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb); +struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code); + +#else + +static inline int bnxt_dl_register(struct bnxt *bp) +{ + return 0; +} + +static inline void bnxt_dl_unregister(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_reps_close(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_reps_open(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) +{ +} + +static inline struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) +{ + return NULL; +} +#endif /* CONFIG_BNXT_SRIOV */ +#endif /* BNXT_VFR_H */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 3a34fdba5301..b1fdd3cc10d1 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -657,6 +657,7 @@ struct bcmgenet_priv { struct clk *clk; struct platform_device *pdev; + struct platform_device *mii_pdev; /* WOL */ struct clk *clk_wol; diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 30cb97b4a1d7..18f5723be2c9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -24,62 +24,10 @@ #include <linux/of_net.h> #include <linux/of_mdio.h> #include <linux/platform_data/bcmgenet.h> +#include <linux/platform_data/mdio-bcm-unimac.h> #include "bcmgenet.h" -/* read a value from the MII */ -static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location) -{ - int ret; - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; - - bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | - (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD); - /* Start MDIO transaction*/ - reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - reg |= MDIO_START_BUSY; - bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); - wait_event_timeout(priv->wq, - !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) - & MDIO_START_BUSY), - HZ / 100); - ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - - /* Some broken devices are known not to release the line during - * turn-around, e.g: Broadcom BCM53125 external switches, so check for - * that condition here and ignore the MDIO controller read failure - * indication. - */ - if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (ret & MDIO_READ_FAIL)) - return -EIO; - - return ret & 0xffff; -} - -/* write a value to the MII */ -static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, - int location, u16 val) -{ - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; - - bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | - (location << MDIO_REG_SHIFT) | (0xffff & val)), - UMAC_MDIO_CMD); - reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - reg |= MDIO_START_BUSY; - bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); - wait_event_timeout(priv->wq, - !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) & - MDIO_START_BUSY), - HZ / 100); - - return 0; -} - /* setup netdev link state when PHY link status change and * update UMAC and RGMII block when link up */ @@ -393,104 +341,121 @@ int bcmgenet_mii_probe(struct net_device *dev) return 0; } -/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with - * their internal MDIO management controller making them fail to successfully - * be read from or written to for the first transaction. We insert a dummy - * BMSR read here to make sure that phy_get_device() and get_phy_id() can - * correctly read the PHY MII_PHYSID1/2 registers and successfully register a - * PHY device for this peripheral. - * - * Once the PHY driver is registered, we can workaround subsequent reads from - * there (e.g: during system-wide power management). - * - * bus->reset is invoked before mdiobus_scan during mdiobus_register and is - * therefore the right location to stick that workaround. Since we do not want - * to read from non-existing PHYs, we either use bus->phy_mask or do a manual - * Device Tree scan to limit the search area. - */ -static int bcmgenet_mii_bus_reset(struct mii_bus *bus) +static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) { - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - struct device_node *np = priv->mdio_dn; - struct device_node *child = NULL; - u32 read_mask = 0; - int addr = 0; + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + char *compat; - if (!np) { - read_mask = 1 << priv->phy_addr; - } else { - for_each_available_child_of_node(np, child) { - addr = of_mdio_parse_addr(&dev->dev, child); - if (addr < 0) - continue; + compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); + if (!compat) + return NULL; - read_mask |= 1 << addr; - } + priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); + kfree(compat); + if (!priv->mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); + return NULL; } - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - if (read_mask & 1 << addr) { - dev_dbg(&dev->dev, "Workaround for PHY @ %d\n", addr); - mdiobus_read(bus, addr, MII_BMSR); - } + return priv->mdio_dn; +} + +static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, + struct unimac_mdio_pdata *ppd) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + ppd->phy_mask = 1 << pd->phy_address; + else + ppd->phy_mask = 0; } +} +static int bcmgenet_mii_wait(void *wait_func_data) +{ + struct bcmgenet_priv *priv = wait_func_data; + + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); return 0; } -static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) +static int bcmgenet_mii_register(struct bcmgenet_priv *priv) { - struct mii_bus *bus; + struct platform_device *pdev = priv->pdev; + struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + struct unimac_mdio_pdata ppd; + struct platform_device *ppdev; + struct resource *pres, res; + int id, ret; + + pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + memset(&res, 0, sizeof(res)); + memset(&ppd, 0, sizeof(ppd)); + + ppd.wait_func = bcmgenet_mii_wait; + ppd.wait_func_data = priv; + ppd.bus_name = "bcmgenet MII bus"; + + /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD + * and is 2 * 32-bits word long, 8 bytes total. + */ + res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD; + res.end = res.start + 8; + res.flags = IORESOURCE_MEM; - if (priv->mii_bus) - return 0; + if (dn) + id = of_alias_get_id(dn, "eth"); + else + id = pdev->id; - priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) { - pr_err("failed to allocate\n"); + ppdev = platform_device_alloc(UNIMAC_MDIO_DRV_NAME, id); + if (!ppdev) return -ENOMEM; - } - bus = priv->mii_bus; - bus->priv = priv->dev; - bus->name = "bcmgenet MII bus"; - bus->parent = &priv->pdev->dev; - bus->read = bcmgenet_mii_read; - bus->write = bcmgenet_mii_write; - bus->reset = bcmgenet_mii_bus_reset; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", - priv->pdev->name, priv->pdev->id); + /* Retain this platform_device pointer for later cleanup */ + priv->mii_pdev = ppdev; + ppdev->dev.parent = &pdev->dev; + ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); + if (pdata) + bcmgenet_mii_pdata_init(priv, &ppd); + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto out; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto out; + + ret = platform_device_add(ppdev); + if (ret) + goto out; return 0; +out: + platform_device_put(ppdev); + return ret; } static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) { struct device_node *dn = priv->pdev->dev.of_node; struct device *kdev = &priv->pdev->dev; - struct phy_device *phydev = NULL; - char *compat; + struct phy_device *phydev; int phy_mode; int ret; - compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); - if (!compat) - return -ENOMEM; - - priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); - kfree(compat); - if (!priv->mdio_dn) { - dev_err(kdev, "unable to find MDIO bus node\n"); - return -ENODEV; - } - - ret = of_mdiobus_register(priv->mii_bus, priv->mdio_dn); - if (ret) { - dev_err(kdev, "failed to register MDIO bus\n"); - return ret; - } - /* Fetch the PHY phandle */ priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); @@ -537,33 +502,23 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; struct bcmgenet_platform_data *pd = kdev->platform_data; - struct mii_bus *mdio = priv->mii_bus; + char phy_name[MII_BUS_ID_SIZE + 3]; + char mdio_bus_id[MII_BUS_ID_SIZE]; struct phy_device *phydev; - int ret; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, pd->phy_address); + /* * Internal or external PHY with MDIO access */ - if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - mdio->phy_mask = ~(1 << pd->phy_address); - else - mdio->phy_mask = 0; - - ret = mdiobus_register(mdio); - if (ret) { - dev_err(kdev, "failed to register MDIO bus\n"); - return ret; - } - - if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - phydev = mdiobus_get_phy(mdio, pd->phy_address); - else - phydev = phy_find_first(mdio); - + phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); if (!phydev) { dev_err(kdev, "failed to register PHY device\n"); - mdiobus_unregister(mdio); return -ENODEV; } } else { @@ -609,10 +564,9 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) int bcmgenet_mii_init(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct device_node *dn = priv->pdev->dev.of_node; int ret; - ret = bcmgenet_mii_alloc(priv); + ret = bcmgenet_mii_register(priv); if (ret) return ret; @@ -623,11 +577,7 @@ int bcmgenet_mii_init(struct net_device *dev) return 0; out: - if (of_phy_is_fixed_link(dn)) - of_phy_deregister_fixed_link(dn); - of_node_put(priv->phy_dn); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); + bcmgenet_mii_exit(dev); return ret; } @@ -639,6 +589,6 @@ void bcmgenet_mii_exit(struct net_device *dev) if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); + platform_device_unregister(priv->mii_pdev); + platform_device_put(priv->mii_pdev); } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 26d25749c3e4..6df2cad61647 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -68,7 +68,7 @@ #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) #define GEM_MTU_MIN_SIZE ETH_MIN_MTU -#define MACB_NETIF_LSO (NETIF_F_TSO | NETIF_F_UFO) +#define MACB_NETIF_LSO NETIF_F_TSO #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) #define MACB_WOL_ENABLED (0x1 << 1) diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c index 9906fda76087..248a8fc45069 100644 --- a/drivers/net/ethernet/cadence/macb_pci.c +++ b/drivers/net/ethernet/cadence/macb_pci.c @@ -128,7 +128,7 @@ static void macb_remove(struct pci_dev *pdev) clk_unregister(plat_data->hclk); } -static struct pci_device_id dev_id_table[] = { +static const struct pci_device_id dev_id_table[] = { { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), }, { 0, } }; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index ebd353bc78ff..b78e296c4cba 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -105,6 +105,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "tx_total_sent", "tx_total_fwd", "tx_err_pko", + "tx_err_pki", "tx_err_link", "tx_err_drop", @@ -648,33 +649,21 @@ lio_ethtool_get_ringparam(struct net_device *netdev, rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); - } else if (OCTEON_CN23XX_PF(oct)) { - struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); - + } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; - rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx); - tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx); - } - - if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) { - ering->rx_pending = 0; - ering->rx_max_pending = 0; - ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = rx_pending; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = rx_max_pending; - } else { - ering->rx_pending = rx_pending; - ering->rx_max_pending = rx_max_pending; - ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = 0; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = 0; + rx_pending = oct->droq[0]->max_count; + tx_pending = oct->instr_queue[0]->max_count; } ering->tx_pending = tx_pending; ering->tx_max_pending = tx_max_pending; + ering->rx_pending = rx_pending; + ering->rx_max_pending = rx_max_pending; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; } static u32 lio_get_msglevel(struct net_device *netdev) @@ -826,6 +815,8 @@ lio_get_ethtool_stats(struct net_device *netdev, data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki); /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. @@ -1568,6 +1559,7 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, tstats->fw_total_sent = rsp_tstats->fw_total_sent; tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; tstats->fw_err_pko = rsp_tstats->fw_err_pko; + tstats->fw_err_pki = rsp_tstats->fw_err_pki; tstats->fw_err_link = rsp_tstats->fw_err_link; tstats->fw_err_drop = rsp_tstats->fw_err_drop; tstats->fw_tso = rsp_tstats->fw_tso; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 51583ae4b1eb..cbd6287e578e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -39,10 +39,14 @@ MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(LIQUIDIO_VERSION); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); static int ddr_timeout = 10000; module_param(ddr_timeout, int, 0644); @@ -59,6 +63,21 @@ static char fw_type[LIO_MAX_FW_TYPE_LEN]; module_param_string(fw_type, fw_type, sizeof(fw_type), 0000); MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\""); +static u32 console_bitmask; +module_param(console_bitmask, int, 0644); +MODULE_PARM_DESC(console_bitmask, + "Bitmask indicating which consoles have debug output redirected to syslog."); + +/** + * \brief determines if a given console has debug enabled. + * @param console console to check + * @returns 1 = enabled. 0 otherwise + */ +int octeon_console_debug_enabled(u32 console) +{ + return (console_bitmask >> (console)) & 0x1; +} + static int ptp_enable = 1; /* Polling interval for determining when NIC application is alive */ @@ -1717,6 +1736,10 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) oct->droq[0]->ops.poll_mode = 0; } + /* Delete NAPI */ + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + netif_napi_del(napi); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -1825,6 +1848,11 @@ static int octeon_chip_specific_setup(struct octeon_device *oct) case OCTEON_CN23XX_PCIID_PF: oct->chip_id = OCTEON_CN23XX_PF_VID; ret = setup_cn23xx_octeon_pf_device(oct); +#ifdef CONFIG_PCI_IOV + if (!ret) + pci_sriov_set_totalvfs(oct->pci_dev, + oct->sriov_info.max_vfs); +#endif s = "CN23XX"; break; @@ -2544,8 +2572,8 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, { struct octeon_droq_ops droq_ops; struct net_device *netdev; - static int cpu_id; - static int cpu_id_modulus; + int cpu_id; + int cpu_id_modulus; struct octeon_droq *droq; struct napi_struct *napi; int q, q_no, retval = 0; @@ -2746,6 +2774,17 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct napi_struct *napi, *n; + + if (oct->props[lio->ifidx].napi_enabled) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + if (OCTEON_CN23XX_PF(oct)) + oct->droq[0]->ops.poll_mode = 0; + } ifstate_reset(lio, LIO_IFSTATE_RUNNING); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 9b247102eb92..c6f52f235647 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1137,6 +1137,10 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) oct->droq[0]->ops.poll_mode = 0; } + /* Delete NAPI */ + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + netif_napi_del(napi); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -1663,10 +1667,10 @@ static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx) { struct octeon_droq_ops droq_ops; struct net_device *netdev; - static int cpu_id_modulus; + int cpu_id_modulus; struct octeon_droq *droq; struct napi_struct *napi; - static int cpu_id; + int cpu_id; int num_tx_descs; struct lio *lio; int retval = 0; @@ -1784,6 +1788,16 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct napi_struct *napi, *n; + + if (oct->props[lio->ifidx].napi_enabled) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + oct->droq[0]->ops.poll_mode = 0; + } netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); /* Inform that netif carrier is down */ diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 231dd7fbfb80..3b9e3646b971 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -27,8 +27,8 @@ #define LIQUIDIO_PACKAGE "" #define LIQUIDIO_BASE_MAJOR_VERSION 1 -#define LIQUIDIO_BASE_MINOR_VERSION 5 -#define LIQUIDIO_BASE_MICRO_VERSION 1 +#define LIQUIDIO_BASE_MINOR_VERSION 6 +#define LIQUIDIO_BASE_MICRO_VERSION 0 #define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \ __stringify(LIQUIDIO_BASE_MINOR_VERSION) #define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION) @@ -768,6 +768,7 @@ struct nic_rx_stats { /* firmware stats */ u64 fw_total_rcvd; u64 fw_total_fwd; + u64 fw_total_fwd_bytes; u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; @@ -814,6 +815,7 @@ struct nic_tx_stats { u64 fw_tso; /* number of tso requests */ u64 fw_tso_fwd; /* number of packets segmented in tso */ u64 fw_tx_vxlan; + u64 fw_err_pki; }; struct oct_link_stats { diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index e08f7600f986..dd0efc9b4286 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -37,13 +37,6 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, u32 flags); static int octeon_console_read(struct octeon_device *oct, u32 console_num, char *buffer, u32 buf_size); -static u32 console_bitmask; -module_param(console_bitmask, int, 0644); -MODULE_PARM_DESC(console_bitmask, - "Bitmask indicating which consoles have debug output redirected to syslog."); - -#define MIN(a, b) min((a), (b)) -#define CAST_ULL(v) ((u64)(v)) #define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008 #define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004 @@ -139,16 +132,6 @@ struct octeon_pci_console_desc { }; /** - * \brief determines if a given console has debug enabled. - * @param console console to check - * @returns 1 = enabled. 0 otherwise - */ -static int octeon_console_debug_enabled(u32 console) -{ - return (console_bitmask >> (console)) & 0x1; -} - -/** * This function is the implementation of the get macros defined * for individual structure members. The argument are generated * by the macros inorder to read only the needed memory. @@ -234,7 +217,7 @@ static int __cvmx_bootmem_check_version(struct octeon_device *oct, (exact_match && major_version != exact_match)) { dev_err(&oct->pci_dev->dev, "bootmem ver mismatch %d.%d addr:0x%llx\n", major_version, minor_version, - CAST_ULL(oct->bootmem_desc_addr)); + (long long)oct->bootmem_desc_addr); return -1; } else { return 0; @@ -704,7 +687,7 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num, if (bytes_to_read <= 0) return bytes_to_read; - bytes_to_read = MIN(bytes_to_read, (s32)buf_size); + bytes_to_read = min_t(s32, bytes_to_read, buf_size); /* Check to see if what we want to read is not contiguous, and limit * ourselves to the contiguous block diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 623e28ca736e..495cc8880646 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -528,9 +528,10 @@ static struct octeon_config_ptr { }; static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = { - "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", + "BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", - "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", + "DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE", + "INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", "INVALID" }; @@ -876,11 +877,11 @@ int octeon_setup_instr_queues(struct octeon_device *oct) oct->num_iqs = 0; - oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]), + oct->instr_queue[0] = vzalloc_node(sizeof(*oct->instr_queue[0]), numa_node); if (!oct->instr_queue[0]) oct->instr_queue[0] = - vmalloc(sizeof(struct octeon_instr_queue)); + vzalloc(sizeof(struct octeon_instr_queue)); if (!oct->instr_queue[0]) return 1; memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue)); @@ -923,9 +924,9 @@ int octeon_setup_output_queues(struct octeon_device *oct) desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf)); } oct->num_oqs = 0; - oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node); + oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node); if (!oct->droq[0]) - oct->droq[0] = vmalloc(sizeof(*oct->droq[0])); + oct->droq[0] = vzalloc(sizeof(*oct->droq[0])); if (!oct->droq[0]) return 1; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index c90ed48ae8ab..31efdef02a24 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -22,6 +22,8 @@ #ifndef _OCTEON_DEVICE_H_ #define _OCTEON_DEVICE_H_ +#include <linux/interrupt.h> + /** PCI VendorId Device Id */ #define OCTEON_CN68XX_PCIID 0x91177d #define OCTEON_CN66XX_PCIID 0x92177d @@ -737,6 +739,8 @@ int octeon_wait_for_bootloader(struct octeon_device *oct, */ int octeon_init_consoles(struct octeon_device *oct); +int octeon_console_debug_enabled(u32 console); + /** * Adds access to a console to the device. * diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 2e190deb2233..f7b5d68eb4cf 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -145,6 +145,8 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct, for (i = 0; i < droq->max_count; i++) { pg_info = &droq->recv_buf_list[i].pg_info; + if (!pg_info) + continue; if (pg_info->dma) lio_unmap_ring(oct->pci_dev, @@ -275,12 +277,12 @@ int octeon_init_droq(struct octeon_device *oct, droq->max_count); droq->recv_buf_list = (struct octeon_recv_buffer *) - vmalloc_node(droq->max_count * + vzalloc_node(droq->max_count * OCT_DROQ_RECVBUF_SIZE, numa_node); if (!droq->recv_buf_list) droq->recv_buf_list = (struct octeon_recv_buffer *) - vmalloc(droq->max_count * + vzalloc(droq->max_count * OCT_DROQ_RECVBUF_SIZE); if (!droq->recv_buf_list) { dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 0bc6a4ffce30..6a015362c340 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -793,7 +793,9 @@ static struct attribute *cxgb3_attrs[] = { NULL }; -static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs }; +static const struct attribute_group cxgb3_attr_group = { + .attrs = cxgb3_attrs, +}; static ssize_t tm_attr_show(struct device *d, char *buf, int sched) @@ -880,7 +882,9 @@ static struct attribute *offload_attrs[] = { NULL }; -static struct attribute_group offload_attr_group = {.attrs = offload_attrs }; +static const struct attribute_group offload_attr_group = { + .attrs = offload_attrs, +}; /* * Sends an sk_buff to an offload queue driver diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ef4be781fd05..daa37750d152 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -338,10 +338,12 @@ struct adapter_params { unsigned int sf_nsec; /* # of flash sectors */ unsigned int sf_fw_start; /* start of FW image in flash */ - unsigned int fw_vers; - unsigned int bs_vers; /* bootstrap version */ - unsigned int tp_vers; - unsigned int er_vers; /* expansion ROM version */ + unsigned int fw_vers; /* firmware version */ + unsigned int bs_vers; /* bootstrap version */ + unsigned int tp_vers; /* TP microcode version */ + unsigned int er_vers; /* expansion ROM version */ + unsigned int scfg_vers; /* Serial Configuration version */ + unsigned int vpd_vers; /* VPD Version */ u8 api_vers[7]; unsigned short mtus[NMTUS]; @@ -1403,10 +1405,15 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, int t4_fl_pkt_align(struct adapter *adap); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_check_fw_version(struct adapter *adap); +int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); int t4_get_fw_version(struct adapter *adapter, u32 *vers); int t4_get_bs_version(struct adapter *adapter, u32 *vers); int t4_get_tp_version(struct adapter *adapter, u32 *vers); int t4_get_exprom_version(struct adapter *adapter, u32 *vers); +int t4_get_scfg_version(struct adapter *adapter, u32 *vers); +int t4_get_vpd_version(struct adapter *adapter, u32 *vers); +int t4_get_version_info(struct adapter *adapter); +void t4_dump_version_info(struct adapter *adapter); int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 26eb00a45db1..03f593e84c24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -801,6 +801,104 @@ static int set_link_ksettings(struct net_device *dev, return ret; } +/* Translate the Firmware FEC value into the ethtool value. */ +static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec) +{ + unsigned int eth_fec = 0; + + if (fw_fec & FW_PORT_CAP_FEC_RS) + eth_fec |= ETHTOOL_FEC_RS; + if (fw_fec & FW_PORT_CAP_FEC_BASER_RS) + eth_fec |= ETHTOOL_FEC_BASER; + + /* if nothing is set, then FEC is off */ + if (!eth_fec) + eth_fec = ETHTOOL_FEC_OFF; + + return eth_fec; +} + +/* Translate Common Code FEC value into ethtool value. */ +static inline unsigned int cc_to_eth_fec(unsigned int cc_fec) +{ + unsigned int eth_fec = 0; + + if (cc_fec & FEC_AUTO) + eth_fec |= ETHTOOL_FEC_AUTO; + if (cc_fec & FEC_RS) + eth_fec |= ETHTOOL_FEC_RS; + if (cc_fec & FEC_BASER_RS) + eth_fec |= ETHTOOL_FEC_BASER; + + /* if nothing is set, then FEC is off */ + if (!eth_fec) + eth_fec = ETHTOOL_FEC_OFF; + + return eth_fec; +} + +/* Translate ethtool FEC value into Common Code value. */ +static inline unsigned int eth_to_cc_fec(unsigned int eth_fec) +{ + unsigned int cc_fec = 0; + + if (eth_fec & ETHTOOL_FEC_OFF) + return cc_fec; + + if (eth_fec & ETHTOOL_FEC_AUTO) + cc_fec |= FEC_AUTO; + if (eth_fec & ETHTOOL_FEC_RS) + cc_fec |= FEC_RS; + if (eth_fec & ETHTOOL_FEC_BASER) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +static int get_fecparam(struct net_device *dev, struct ethtool_fecparam *fec) +{ + const struct port_info *pi = netdev_priv(dev); + const struct link_config *lc = &pi->link_cfg; + + /* Translate the Firmware FEC Support into the ethtool value. We + * always support IEEE 802.3 "automatic" selection of Link FEC type if + * any FEC is supported. + */ + fec->fec = fwcap_to_eth_fec(lc->supported); + if (fec->fec != ETHTOOL_FEC_OFF) + fec->fec |= ETHTOOL_FEC_AUTO; + + /* Translate the current internal FEC parameters into the + * ethtool values. + */ + fec->active_fec = cc_to_eth_fec(lc->fec); + + return 0; +} + +static int set_fecparam(struct net_device *dev, struct ethtool_fecparam *fec) +{ + struct port_info *pi = netdev_priv(dev); + struct link_config *lc = &pi->link_cfg; + struct link_config old_lc; + int ret; + + /* Save old Link Configuration in case the L1 Configure below + * fails. + */ + old_lc = *lc; + + /* Try to perform the L1 Configure and return the result of that + * effort. If it fails, revert the attempted change. + */ + lc->requested_fec = eth_to_cc_fec(fec->fec); + ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, + pi->tx_chan, lc); + if (ret) + *lc = old_lc; + return ret; +} + static void get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { @@ -1255,6 +1353,8 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, static const struct ethtool_ops cxgb_ethtool_ops = { .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, + .get_fecparam = get_fecparam, + .set_fecparam = set_fecparam, .get_drvinfo = get_drvinfo, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e403fa18f1b1..d80b20d695e0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2889,14 +2889,29 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } -static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int cxgb_setup_tc_cls_u32(struct net_device *dev, + struct tc_cls_u32_offload *cls_u32) { - struct port_info *pi = netdev2pinfo(dev); - struct adapter *adap = netdev2adap(dev); + if (TC_H_MAJ(cls_u32->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + cls_u32->common.chain_index) + return -EOPNOTSUPP; - if (chain_index) + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return cxgb4_config_knode(dev, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return cxgb4_delete_knode(dev, cls_u32); + default: return -EOPNOTSUPP; + } +} + +static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); if (!(adap->flags & FULL_INIT_DONE)) { dev_err(adap->pdev_dev, @@ -2905,20 +2920,12 @@ static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, return -EINVAL; } - if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && - tc->type == TC_SETUP_CLSU32) { - switch (tc->cls_u32->command) { - case TC_CLSU32_NEW_KNODE: - case TC_CLSU32_REPLACE_KNODE: - return cxgb4_config_knode(dev, proto, tc->cls_u32); - case TC_CLSU32_DELETE_KNODE: - return cxgb4_delete_knode(dev, proto, tc->cls_u32); - default: - return -EOPNOTSUPP; - } + switch (type) { + case TC_SETUP_CLSU32: + return cxgb_setup_tc_cls_u32(dev, type_data); + default: + return -EOPNOTSUPP; } - - return -EOPNOTSUPP; } static netdev_features_t cxgb_fix_features(struct net_device *dev, @@ -3610,11 +3617,8 @@ static int adap_init0(struct adapter *adap) * later reporting and B. to warn if the currently loaded firmware * is excessively mismatched relative to the driver.) */ - t4_get_fw_version(adap, &adap->params.fw_vers); - t4_get_bs_version(adap, &adap->params.bs_vers); - t4_get_tp_version(adap, &adap->params.tp_vers); - t4_get_exprom_version(adap, &adap->params.er_vers); + t4_get_version_info(adap); ret = t4_check_fw_version(adap); /* If firmware is too old (not supported by driver) force an update. */ if (ret) @@ -4560,56 +4564,8 @@ static void cxgb4_check_pcie_caps(struct adapter *adap) /* Dump basic information about the adapter */ static void print_adapter_info(struct adapter *adapter) { - /* Device information */ - dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n", - adapter->params.vpd.id, - CHELSIO_CHIP_RELEASE(adapter->params.chip)); - dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n", - adapter->params.vpd.sn, adapter->params.vpd.pn); - - /* Firmware Version */ - if (!adapter->params.fw_vers) - dev_warn(adapter->pdev_dev, "No firmware loaded\n"); - else - dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers)); - - /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap - * Firmware, so dev_info() is more appropriate here.) - */ - if (!adapter->params.bs_vers) - dev_info(adapter->pdev_dev, "No bootstrap loaded\n"); - else - dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers)); - - /* TP Microcode Version */ - if (!adapter->params.tp_vers) - dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n"); - else - dev_info(adapter->pdev_dev, - "TP Microcode version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); - - /* Expansion ROM version */ - if (!adapter->params.er_vers) - dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n"); - else - dev_info(adapter->pdev_dev, - "Expansion ROM version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers)); + /* Hardware/Firmware/etc. Version/Revision IDs */ + t4_dump_version_info(adapter); /* Software/Hardware configuration */ dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index ef06ce8247ab..48970ba08bdc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -96,7 +96,7 @@ static int fill_action_fields(struct adapter *adap, LIST_HEAD(actions); exts = cls->knode.exts; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; tcf_exts_to_list(exts, &actions); @@ -146,11 +146,11 @@ static int fill_action_fields(struct adapter *adap, return 0; } -int cxgb4_config_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls) +int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) { const struct cxgb4_match_field *start, *link_start = NULL; struct adapter *adapter = netdev2adap(dev); + __be16 protocol = cls->common.protocol; struct ch_filter_specification fs; struct cxgb4_tc_u32_table *t; struct cxgb4_link *link; @@ -338,8 +338,7 @@ out: return ret; } -int cxgb4_delete_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls) +int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) { struct adapter *adapter = netdev2adap(dev); unsigned int filter_id, max_tids, i, j; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h index 021261a41c13..70a07b7cca56 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h @@ -44,10 +44,8 @@ static inline bool can_tc_u32_offload(struct net_device *dev) return (dev->features & NETIF_F_HW_TC) && adap->tc_u32 ? true : false; } -int cxgb4_config_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls); -int cxgb4_delete_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls); +int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls); +int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls); void cxgb4_cleanup_tc_u32(struct adapter *adapter); struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 82bf7aac6cdb..fff8fba86f97 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -913,7 +913,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0xd010, 0xd03c, 0xdfc0, 0xdfe0, 0xe000, 0xea7c, - 0xf000, 0x11190, + 0xf000, 0x11110, + 0x11118, 0x11190, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x190e4, @@ -1439,8 +1440,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x30000, 0x30030, - 0x30038, 0x30038, - 0x30040, 0x30040, 0x30100, 0x30144, 0x30190, 0x301a0, 0x301a8, 0x301b8, @@ -1551,8 +1550,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x33c3c, 0x33c50, 0x33cf0, 0x33cfc, 0x34000, 0x34030, - 0x34038, 0x34038, - 0x34040, 0x34040, 0x34100, 0x34144, 0x34190, 0x341a0, 0x341a8, 0x341b8, @@ -1663,8 +1660,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x37c3c, 0x37c50, 0x37cf0, 0x37cfc, 0x38000, 0x38030, - 0x38038, 0x38038, - 0x38040, 0x38040, 0x38100, 0x38144, 0x38190, 0x381a0, 0x381a8, 0x381b8, @@ -1775,8 +1770,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x3bc3c, 0x3bc50, 0x3bcf0, 0x3bcfc, 0x3c000, 0x3c030, - 0x3c038, 0x3c038, - 0x3c040, 0x3c040, 0x3c100, 0x3c144, 0x3c190, 0x3c1a0, 0x3c1a8, 0x3c1b8, @@ -2040,12 +2033,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, - 0x11fc, 0x1258, - 0x1280, 0x12d4, - 0x12d9, 0x12d9, - 0x12de, 0x12de, - 0x12e3, 0x12e3, - 0x12e8, 0x133c, + 0x11fc, 0x1274, + 0x1280, 0x133c, 0x1800, 0x18fc, 0x3000, 0x302c, 0x3060, 0x30b0, @@ -2076,6 +2065,9 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x5ea0, 0x5eb0, 0x5ec0, 0x5ec0, 0x5ec8, 0x5ed0, + 0x5ee0, 0x5ee0, + 0x5ef0, 0x5ef0, + 0x5f00, 0x5f00, 0x6000, 0x6020, 0x6028, 0x6040, 0x6058, 0x609c, @@ -2133,6 +2125,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0xd300, 0xd31c, 0xdfc0, 0xdfe0, 0xe000, 0xf008, + 0xf010, 0xf018, + 0xf020, 0xf028, 0x11000, 0x11014, 0x11048, 0x1106c, 0x11074, 0x11088, @@ -2256,13 +2250,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x30000, 0x30030, - 0x30038, 0x30038, - 0x30040, 0x30040, - 0x30048, 0x30048, - 0x30050, 0x30050, - 0x3005c, 0x30060, - 0x30068, 0x30068, - 0x30070, 0x30070, 0x30100, 0x30168, 0x30190, 0x301a0, 0x301a8, 0x301b8, @@ -2325,13 +2312,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x326a8, 0x326a8, 0x326ec, 0x326ec, 0x32a00, 0x32abc, - 0x32b00, 0x32b38, + 0x32b00, 0x32b18, + 0x32b20, 0x32b38, 0x32b40, 0x32b58, 0x32b60, 0x32b78, 0x32c00, 0x32c00, 0x32c08, 0x32c3c, - 0x32e00, 0x32e2c, - 0x32f00, 0x32f2c, 0x33000, 0x3302c, 0x33034, 0x33050, 0x33058, 0x33058, @@ -2396,13 +2382,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x33c38, 0x33c50, 0x33cf0, 0x33cfc, 0x34000, 0x34030, - 0x34038, 0x34038, - 0x34040, 0x34040, - 0x34048, 0x34048, - 0x34050, 0x34050, - 0x3405c, 0x34060, - 0x34068, 0x34068, - 0x34070, 0x34070, 0x34100, 0x34168, 0x34190, 0x341a0, 0x341a8, 0x341b8, @@ -2465,13 +2444,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x366a8, 0x366a8, 0x366ec, 0x366ec, 0x36a00, 0x36abc, - 0x36b00, 0x36b38, + 0x36b00, 0x36b18, + 0x36b20, 0x36b38, 0x36b40, 0x36b58, 0x36b60, 0x36b78, 0x36c00, 0x36c00, 0x36c08, 0x36c3c, - 0x36e00, 0x36e2c, - 0x36f00, 0x36f2c, 0x37000, 0x3702c, 0x37034, 0x37050, 0x37058, 0x37058, @@ -2545,8 +2523,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x40280, 0x40280, 0x40304, 0x40304, 0x40330, 0x4033c, - 0x41304, 0x413b8, - 0x413c0, 0x413c8, + 0x41304, 0x413c8, 0x413d0, 0x413dc, 0x413f0, 0x413f0, 0x41400, 0x4140c, @@ -3100,6 +3077,179 @@ int t4_get_exprom_version(struct adapter *adap, u32 *vers) } /** + * t4_get_vpd_version - return the VPD version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the VPD via the Firmware interface (thus this can only be called + * once we're ready to issue Firmware commands). The format of the + * VPD version is adapter specific. Returns 0 on success, an error on + * failure. + * + * Note that early versions of the Firmware didn't include the ability + * to retrieve the VPD version, so we zero-out the return-value parameter + * in that case to avoid leaving it with garbage in it. + * + * Also note that the Firmware will return its cached copy of the VPD + * Revision ID, not the actual Revision ID as written in the Serial + * EEPROM. This is only an issue if a new VPD has been written and the + * Firmware/Chip haven't yet gone through a RESET sequence. So it's best + * to defer calling this routine till after a FW_RESET_CMD has been issued + * if the Host Driver will be performing a full adapter initialization. + */ +int t4_get_vpd_version(struct adapter *adapter, u32 *vers) +{ + u32 vpdrev_param; + int ret; + + vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV)); + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, + 1, &vpdrev_param, vers); + if (ret) + *vers = 0; + return ret; +} + +/** + * t4_get_scfg_version - return the Serial Configuration version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the Serial Configuration Version via the Firmware interface + * (thus this can only be called once we're ready to issue Firmware + * commands). The format of the Serial Configuration version is + * adapter specific. Returns 0 on success, an error on failure. + * + * Note that early versions of the Firmware didn't include the ability + * to retrieve the Serial Configuration version, so we zero-out the + * return-value parameter in that case to avoid leaving it with + * garbage in it. + * + * Also note that the Firmware will return its cached copy of the Serial + * Initialization Revision ID, not the actual Revision ID as written in + * the Serial EEPROM. This is only an issue if a new VPD has been written + * and the Firmware/Chip haven't yet gone through a RESET sequence. So + * it's best to defer calling this routine till after a FW_RESET_CMD has + * been issued if the Host Driver will be performing a full adapter + * initialization. + */ +int t4_get_scfg_version(struct adapter *adapter, u32 *vers) +{ + u32 scfgrev_param; + int ret; + + scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV)); + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, + 1, &scfgrev_param, vers); + if (ret) + *vers = 0; + return ret; +} + +/** + * t4_get_version_info - extract various chip/firmware version information + * @adapter: the adapter + * + * Reads various chip/firmware version numbers and stores them into the + * adapter Adapter Parameters structure. If any of the efforts fails + * the first failure will be returned, but all of the version numbers + * will be read. + */ +int t4_get_version_info(struct adapter *adapter) +{ + int ret = 0; + + #define FIRST_RET(__getvinfo) \ + do { \ + int __ret = __getvinfo; \ + if (__ret && !ret) \ + ret = __ret; \ + } while (0) + + FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); + FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); + FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); + FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); + FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers)); + FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers)); + + #undef FIRST_RET + return ret; +} + +/** + * t4_dump_version_info - dump all of the adapter configuration IDs + * @adapter: the adapter + * + * Dumps all of the various bits of adapter configuration version/revision + * IDs information. This is typically called at some point after + * t4_get_version_info() has been called. + */ +void t4_dump_version_info(struct adapter *adapter) +{ + /* Device information */ + dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n", + adapter->params.vpd.id, + CHELSIO_CHIP_RELEASE(adapter->params.chip)); + dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n", + adapter->params.vpd.sn, adapter->params.vpd.pn); + + /* Firmware Version */ + if (!adapter->params.fw_vers) + dev_warn(adapter->pdev_dev, "No firmware loaded\n"); + else + dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers)); + + /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap + * Firmware, so dev_info() is more appropriate here.) + */ + if (!adapter->params.bs_vers) + dev_info(adapter->pdev_dev, "No bootstrap loaded\n"); + else + dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers)); + + /* TP Microcode Version */ + if (!adapter->params.tp_vers) + dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n"); + else + dev_info(adapter->pdev_dev, + "TP Microcode version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); + + /* Expansion ROM version */ + if (!adapter->params.er_vers) + dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n"); + else + dev_info(adapter->pdev_dev, + "Expansion ROM version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers)); + + /* Serial Configuration version */ + dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n", + adapter->params.scfg_vers); + + /* VPD Version */ + dev_info(adapter->pdev_dev, "VPD version: %#x\n", + adapter->params.vpd_vers); +} + +/** * t4_check_fw_version - check if the FW is supported with this driver * @adap: the adapter * @@ -3690,11 +3840,64 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ FW_PORT_CAP_ANEG) +/* Translate Firmware Port Capabilities Pause specification to Common Code */ +static inline unsigned int fwcap_to_cc_pause(unsigned int fw_pause) +{ + unsigned int cc_pause = 0; + + if (fw_pause & FW_PORT_CAP_FC_RX) + cc_pause |= PAUSE_RX; + if (fw_pause & FW_PORT_CAP_FC_TX) + cc_pause |= PAUSE_TX; + + return cc_pause; +} + +/* Translate Common Code Pause specification into Firmware Port Capabilities */ +static inline unsigned int cc_to_fwcap_pause(unsigned int cc_pause) +{ + unsigned int fw_pause = 0; + + if (cc_pause & PAUSE_RX) + fw_pause |= FW_PORT_CAP_FC_RX; + if (cc_pause & PAUSE_TX) + fw_pause |= FW_PORT_CAP_FC_TX; + + return fw_pause; +} + +/* Translate Firmware Forward Error Correction specification to Common Code */ +static inline unsigned int fwcap_to_cc_fec(unsigned int fw_fec) +{ + unsigned int cc_fec = 0; + + if (fw_fec & FW_PORT_CAP_FEC_RS) + cc_fec |= FEC_RS; + if (fw_fec & FW_PORT_CAP_FEC_BASER_RS) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +/* Translate Common Code Forward Error Correction specification to Firmware */ +static inline unsigned int cc_to_fwcap_fec(unsigned int cc_fec) +{ + unsigned int fw_fec = 0; + + if (cc_fec & FEC_RS) + fw_fec |= FW_PORT_CAP_FEC_RS; + if (cc_fec & FEC_BASER_RS) + fw_fec |= FW_PORT_CAP_FEC_BASER_RS; + + return fw_fec; +} + /** * t4_link_l1cfg - apply link configuration to MAC/PHY - * @phy: the PHY to setup - * @mac: the MAC to setup - * @lc: the requested link configuration + * @adapter: the adapter + * @mbox: the Firmware Mailbox to use + * @port: the Port ID + * @lc: the Port's Link Configuration * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then @@ -3707,22 +3910,46 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; - unsigned int mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); - unsigned int fc = 0, fec = 0, fw_fec = 0; + unsigned int fw_mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); + unsigned int fw_fc, cc_fec, fw_fec; + unsigned int rcap; lc->link_ok = 0; - if (lc->requested_fc & PAUSE_RX) - fc |= FW_PORT_CAP_FC_RX; - if (lc->requested_fc & PAUSE_TX) - fc |= FW_PORT_CAP_FC_TX; - fec = lc->requested_fec & FEC_AUTO ? lc->auto_fec : lc->requested_fec; + /* Convert driver coding of Pause Frame Flow Control settings into the + * Firmware's API. + */ + fw_fc = cc_to_fwcap_pause(lc->requested_fc); + + /* Convert Common Code Forward Error Control settings into the + * Firmware's API. If the current Requested FEC has "Automatic" + * (IEEE 802.3) specified, then we use whatever the Firmware + * sent us as part of it's IEEE 802.3-based interpratation of + * the Transceiver Module EPROM FEC parameters. Otherwise we + * use whatever is in the current Requested FEC settings. + */ + if (lc->requested_fec & FEC_AUTO) + cc_fec = lc->auto_fec; + else + cc_fec = lc->requested_fec; + fw_fec = cc_to_fwcap_fec(cc_fec); - if (fec & FEC_RS) - fw_fec |= FW_PORT_CAP_FEC_RS; - if (fec & FEC_BASER_RS) - fw_fec |= FW_PORT_CAP_FEC_BASER_RS; + /* Figure out what our Requested Port Capabilities are going to be. + */ + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + rcap = (lc->supported & ADVERT_MASK) | fw_fc | fw_fec; + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + lc->fec = cc_fec; + } else if (lc->autoneg == AUTONEG_DISABLE) { + rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi; + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + lc->fec = cc_fec; + } else { + rcap = lc->advertising | fw_fc | fw_fec | fw_mdi; + } + /* And send that on to the Firmware ... + */ memset(&c, 0, sizeof(c)); c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | FW_CMD_EXEC_F | @@ -3730,19 +3957,7 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, c.action_to_len16 = cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); - - if (!(lc->supported & FW_PORT_CAP_ANEG)) { - c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) | - fc | fw_fec); - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - } else if (lc->autoneg == AUTONEG_DISABLE) { - c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | - fw_fec | mdi); - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - } else - c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | - fw_fec | mdi); - + c.u.l1cfg.rcap = cpu_to_be32(rcap); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -6449,6 +6664,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, goto out; /* + * If there was a Firmware Configuration File stored in FLASH, + * there's a good chance that it won't be compatible with the new + * Firmware. In order to prevent difficult to diagnose adapter + * initialization issues, we clear out the Firmware Configuration File + * portion of the FLASH . The user will need to re-FLASH a new + * Firmware Configuration File which is compatible with the new + * Firmware if that's desired. + */ + (void)t4_load_cfg(adap, NULL, 0); + + /* * Older versions of the firmware don't understand the new * PCIE_FW.HALT flag and so won't know to perform a RESET when they * restart. So for newly loaded older firmware we'll have to do the @@ -7480,19 +7706,28 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc) void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) { const struct fw_port_cmd *p = (const void *)rpl; + unsigned int acaps = be16_to_cpu(p->u.info.acap); struct adapter *adap = pi->adapter; /* link/module state change message */ - int speed = 0, fc = 0; + int speed = 0, fc, fec; struct link_config *lc; u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; u32 mod = FW_PORT_CMD_MODTYPE_G(stat); + /* Unfortunately the format of the Link Status returned by the + * Firmware isn't the same as the Firmware Port Capabilities bitfield + * used everywhere else ... + */ + fc = 0; if (stat & FW_PORT_CMD_RXPAUSE_F) fc |= PAUSE_RX; if (stat & FW_PORT_CMD_TXPAUSE_F) fc |= PAUSE_TX; + + fec = fwcap_to_cc_fec(acaps); + if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) speed = 100; else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) @@ -7509,11 +7744,20 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) lc = &pi->link_cfg; if (mod != pi->mod_type) { + /* When a new Transceiver Module is inserted, the Firmware + * will examine any Forward Error Correction parameters + * present in the Transceiver Module i2c EPROM and determine + * the supported and recommended FEC settings from those + * based on IEEE 802.3 standards. We always record the + * IEEE 802.3 recommended "automatic" settings. + */ + lc->auto_fec = fec; + pi->mod_type = mod; t4_os_portmod_changed(adap, pi->port_id); } if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc) { /* something changed */ + fc != lc->fc || fec != lc->fec) { /* something changed */ if (!link_ok && lc->link_ok) { unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat); @@ -7525,6 +7769,8 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) lc->link_ok = link_ok; lc->speed = speed; lc->fc = fc; + lc->fec = fec; + lc->supported = be16_to_cpu(p->u.info.pcap); lc->lp_advertising = be16_to_cpu(p->u.info.lpacap); @@ -7614,7 +7860,8 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p) /** * init_link_config - initialize a link's SW state * @lc: structure holding the link state - * @caps: link capabilities + * @pcaps: link Port Capabilities + * @acaps: link current Advertised Port Capabilities * * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. @@ -7627,15 +7874,11 @@ static void init_link_config(struct link_config *lc, unsigned int pcaps, lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; - lc->auto_fec = 0; /* For Forward Error Control, we default to whatever the Firmware * tells us the Link is currently advertising. */ - if (acaps & FW_PORT_CAP_FEC_RS) - lc->auto_fec |= FEC_RS; - if (acaps & FW_PORT_CAP_FEC_BASER_RS) - lc->auto_fec |= FEC_BASER_RS; + lc->auto_fec = fwcap_to_cc_fec(acaps); lc->requested_fec = FEC_AUTO; lc->fec = lc->auto_fec; @@ -8664,6 +8907,65 @@ void t4_idma_monitor(struct adapter *adapter, } /** + * t4_load_cfg - download config file + * @adap: the adapter + * @cfg_data: the cfg text file to write + * @size: text file size + * + * Write the supplied config text file to the card's serial flash. + */ +int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) +{ + int ret, i, n, cfg_addr; + unsigned int addr; + unsigned int flash_cfg_start_sec; + unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; + + cfg_addr = t4_flash_cfg_addr(adap); + if (cfg_addr < 0) + return cfg_addr; + + addr = cfg_addr; + flash_cfg_start_sec = addr / SF_SEC_SIZE; + + if (size > FLASH_CFG_MAX_SIZE) { + dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n", + FLASH_CFG_MAX_SIZE); + return -EFBIG; + } + + i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ + sf_sec_size); + ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, + flash_cfg_start_sec + i - 1); + /* If size == 0 then we're simply erasing the FLASH sectors associated + * with the on-adapter Firmware Configuration File. + */ + if (ret || size == 0) + goto out; + + /* this will write to the flash up to SF_PAGE_SIZE at a time */ + for (i = 0; i < size; i += SF_PAGE_SIZE) { + if ((size - i) < SF_PAGE_SIZE) + n = size - i; + else + n = SF_PAGE_SIZE; + ret = t4_write_flash(adap, addr, n, cfg_data); + if (ret) + goto out; + + addr += SF_PAGE_SIZE; + cfg_data += SF_PAGE_SIZE; + } + +out: + if (ret) + dev_err(adap->pdev_dev, "config file %s failed %d\n", + (size == 0 ? "clear" : "download"), ret); + return ret; +} + +/** * t4_set_vf_mac - Set MAC address for the specified VF * @adapter: The adapter * @vf: one of the VFs instantiated by the specified PF diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 0ebed64d62d3..ad825fbc21a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1124,6 +1124,8 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, FW_PARAMS_PARAM_DEV_FWCACHE = 0x18, + FW_PARAMS_PARAM_DEV_SCFGREV = 0x1A, + FW_PARAMS_PARAM_DEV_VPDREV = 0x1B, FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C, FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E, }; diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 17e566a8b345..84394b43c0a1 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1303,7 +1303,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 0x00, 'L', 'i', 'n', 'u', 'x' }; static int last_irq; - static int multiport_cnt; /* For four-port boards w/one EEPROM */ int i, irq; unsigned short sum; unsigned char *ee_data; @@ -1557,7 +1556,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } else if (ee_data[0] == 0xff && ee_data[1] == 0xff && ee_data[2] == 0) { sa_offset = 2; /* Grrr, damn Matrox boards. */ - multiport_cnt = 4; } #ifdef CONFIG_MIPS_COBALT if ((pdev->bus->number == 0) && diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 4ee042c034a1..1b79a6defd56 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -73,7 +73,7 @@ #define ETHERCAT_MASTER_ID 0x14 -static struct pci_device_id ids[] = { +static const struct pci_device_id ids[] = { { PCI_DEVICE(0x15ec, 0x5000), }, { 0, } }; diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 2b62841c4c63..05989aafaf32 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -139,10 +139,7 @@ int be_roce_register_driver(struct ocrdma_driver *drv) } ocrdma_drv = drv; list_for_each_entry(dev, &be_adapter_list, entry) { - struct net_device *netdev; - _be_roce_dev_add(dev); - netdev = dev->netdev; } mutex_unlock(&be_adapter_list_lock); return 0; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 757b873735a5..733d54caabb6 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -342,18 +342,19 @@ static void dpaa_get_stats64(struct net_device *net_dev, } } -static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, - u32 chain_index, __be16 proto, struct tc_to_netdev *tc) +static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data) { struct dpaa_priv *priv = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc; int i; - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - num_tc = tc->mqprio->num_tc; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + num_tc = mqprio->num_tc; if (num_tc == priv->num_tc) return 0; @@ -398,8 +399,8 @@ static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) of_dev = of_find_device_by_node(mac_node); if (!of_dev) { - dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n", - mac_node->full_name); + dev_err(dpaa_dev, "of_find_device_by_node(%pOF) failed\n", + mac_node); of_node_put(mac_node); return ERR_PTR(-EINVAL); } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index a6e323f15637..df09b254553d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -173,10 +173,12 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #endif /* CONFIG_M5272 */ /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. + * + * 2048 byte skbufs are allocated. However, alignment requirements + * varies between FEC variants. Worst case is 64, so round down by 64. */ -#define PKT_MAXBUF_SIZE 1522 +#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) #define PKT_MINBUF_SIZE 64 -#define PKT_MAXBLR_SIZE 1536 /* FEC receive acceleration */ #define FEC_RACC_IPDIS (1 << 1) @@ -851,7 +853,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_rx_queues; i++) { rxq = fep->rx_queue[i]; writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); - writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); + writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); /* enable DMA1/2 */ if (i) @@ -1904,8 +1906,10 @@ static int fec_enet_mii_probe(struct net_device *ndev) phy_dev = of_phy_connect(ndev, fep->phy_node, &fec_enet_adjust_link, 0, fep->phy_interface); - if (!phy_dev) + if (!phy_dev) { + netdev_err(ndev, "Unable to connect to phy\n"); return -ENODEV; + } } else { /* check for attached phy */ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index aa8cf5d2a53c..6d7269d87a85 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -960,8 +960,8 @@ static int mpc52xx_fec_probe(struct platform_device *op) /* We're done ! */ platform_set_drvdata(op, ndev); - netdev_info(ndev, "%s MAC %pM\n", - op->dev.of_node->full_name, ndev->dev_addr); + netdev_info(ndev, "%pOF MAC %pM\n", + op->dev.of_node, ndev->dev_addr); return 0; diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 4aefe2438969..e714b8fa55eb 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -1925,8 +1925,8 @@ static int fman_reset(struct fman *fman) guts_regs = of_iomap(guts_node, 0); if (!guts_regs) { - dev_err(fman->dev, "%s: Couldn't map %s regs\n", - __func__, guts_node->full_name); + dev_err(fman->dev, "%s: Couldn't map %pOF regs\n", + __func__, guts_node); goto guts_regs; } #define FMAN1_ALL_MACS_MASK 0xFCC00000 @@ -2780,8 +2780,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) err = of_property_read_u32(fm_node, "cell-index", &val); if (err) { - dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n", - __func__, fm_node->full_name); + dev_err(&of_dev->dev, "%s: failed to read cell-index for %pOF\n", + __func__, fm_node); goto fman_node_put; } fman->dts_params.id = (u8)val; @@ -2834,8 +2834,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range", &range[0], 2); if (err) { - dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n", - __func__, fm_node->full_name); + dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %pOF\n", + __func__, fm_node); goto fman_node_put; } fman->dts_params.qman_channel_base = range[0]; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 57bf44fa16a1..49bfa11f2d20 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1720,8 +1720,8 @@ static int fman_port_probe(struct platform_device *of_dev) err = of_property_read_u32(port_node, "cell-index", &val); if (err) { - dev_err(port->dev, "%s: reading cell-index for %s failed\n", - __func__, port_node->full_name); + dev_err(port->dev, "%s: reading cell-index for %pOF failed\n", + __func__, port_node); err = -EINVAL; goto return_err; } diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 6e67d22fd0d5..14cd2c8b0024 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -698,8 +698,8 @@ static int mac_probe(struct platform_device *_of_dev) priv->internal_phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0); } else { - dev_err(dev, "MAC node (%s) contains unsupported MAC\n", - mac_node->full_name); + dev_err(dev, "MAC node (%pOF) contains unsupported MAC\n", + mac_node); err = -EINVAL; goto _return; } @@ -712,16 +712,15 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the FM node */ dev_node = of_get_parent(mac_node); if (!dev_node) { - dev_err(dev, "of_get_parent(%s) failed\n", - mac_node->full_name); + dev_err(dev, "of_get_parent(%pOF) failed\n", + mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } of_dev = of_find_device_by_node(dev_node); if (!of_dev) { - dev_err(dev, "of_find_device_by_node(%s) failed\n", - dev_node->full_name); + dev_err(dev, "of_find_device_by_node(%pOF) failed\n", dev_node); err = -EINVAL; goto _return_of_node_put; } @@ -729,8 +728,7 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the FMan cell-index */ err = of_property_read_u32(dev_node, "cell-index", &val); if (err) { - dev_err(dev, "failed to read cell-index for %s\n", - dev_node->full_name); + dev_err(dev, "failed to read cell-index for %pOF\n", dev_node); err = -EINVAL; goto _return_of_node_put; } @@ -739,7 +737,7 @@ static int mac_probe(struct platform_device *_of_dev) priv->fman = fman_bind(&of_dev->dev); if (!priv->fman) { - dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name); + dev_err(dev, "fman_bind(%pOF) failed\n", dev_node); err = -ENODEV; goto _return_of_node_put; } @@ -749,8 +747,8 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the address of the memory mapped registers */ err = of_address_to_resource(mac_node, 0, &res); if (err < 0) { - dev_err(dev, "of_address_to_resource(%s) = %d\n", - mac_node->full_name, err); + dev_err(dev, "of_address_to_resource(%pOF) = %d\n", + mac_node, err); goto _return_dev_set_drvdata; } @@ -784,8 +782,7 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the cell-index */ err = of_property_read_u32(mac_node, "cell-index", &val); if (err) { - dev_err(dev, "failed to read cell-index for %s\n", - mac_node->full_name); + dev_err(dev, "failed to read cell-index for %pOF\n", mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } @@ -794,8 +791,7 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the MAC address */ mac_addr = of_get_mac_address(mac_node); if (!mac_addr) { - dev_err(dev, "of_get_mac_address(%s) failed\n", - mac_node->full_name); + dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } @@ -804,15 +800,15 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the port handles */ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL); if (unlikely(nph < 0)) { - dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n", - mac_node->full_name); + dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n", + mac_node); err = nph; goto _return_dev_set_drvdata; } if (nph != ARRAY_SIZE(mac_dev->port)) { - dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n", - mac_node->full_name); + dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n", + mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } @@ -821,24 +817,24 @@ static int mac_probe(struct platform_device *_of_dev) /* Find the port node */ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i); if (!dev_node) { - dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n", - mac_node->full_name); + dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n", + mac_node); err = -EINVAL; goto _return_of_node_put; } of_dev = of_find_device_by_node(dev_node); if (!of_dev) { - dev_err(dev, "of_find_device_by_node(%s) failed\n", - dev_node->full_name); + dev_err(dev, "of_find_device_by_node(%pOF) failed\n", + dev_node); err = -EINVAL; goto _return_of_node_put; } mac_dev->port[i] = fman_port_bind(&of_dev->dev); if (!mac_dev->port[i]) { - dev_err(dev, "dev_get_drvdata(%s) failed\n", - dev_node->full_name); + dev_err(dev, "dev_get_drvdata(%pOF) failed\n", + dev_node); err = -EINVAL; goto _return_of_node_put; } @@ -849,8 +845,8 @@ static int mac_probe(struct platform_device *_of_dev) phy_if = of_get_phy_mode(mac_node); if (phy_if < 0) { dev_warn(dev, - "of_get_phy_mode() for %s failed. Defaulting to SGMII\n", - mac_node->full_name); + "of_get_phy_mode() for %pOF failed. Defaulting to SGMII\n", + mac_node); phy_if = PHY_INTERFACE_MODE_SGMII; } priv->phy_if = phy_if; diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index a10de1e9c157..80ad16acf0f1 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -267,8 +267,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) ret = of_address_to_resource(np, 0, &res); if (ret < 0) { - pr_debug("fsl-pq-mdio: no address range in node %s\n", - np->full_name); + pr_debug("fsl-pq-mdio: no address range in node %pOF\n", + np); continue; } @@ -280,8 +280,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) if (!iprop) { iprop = of_get_property(np, "device-id", NULL); if (!iprop) { - pr_debug("fsl-pq-mdio: no UCC ID in node %s\n", - np->full_name); + pr_debug("fsl-pq-mdio: no UCC ID in node %pOF\n", + np); continue; } } @@ -293,8 +293,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) * numbered from 1, not 0. */ if (ucc_set_qe_mux_mii_mng(id - 1) < 0) { - pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n", - np->full_name); + pr_debug("fsl-pq-mdio: invalid UCC ID in node %pOF\n", + np); continue; } @@ -442,8 +442,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) if (data->get_tbipa) { for_each_child_of_node(np, tbi) { if (strcmp(tbi->type, "tbi-phy") == 0) { - dev_dbg(&pdev->dev, "found TBI PHY node %s\n", - strrchr(tbi->full_name, '/') + 1); + dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n", + tbi); break; } } @@ -454,8 +454,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) if (!prop) { dev_err(&pdev->dev, - "missing 'reg' property in node %s\n", - tbi->full_name); + "missing 'reg' property in node %pOF\n", + tbi); err = -EBUSY; goto error; } diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index d11287e11371..91c7bdb9b43c 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -76,4 +76,31 @@ config HNS_ENET This selects the general ethernet driver for HNS. This module make use of any HNS AE driver, such as HNS_DSAF +config HNS3 + tristate "Hisilicon Network Subsystem Support HNS3 (Framework)" + depends on PCI + ---help--- + This selects the framework support for Hisilicon Network Subsystem 3. + This layer facilitates clients like ENET, RoCE and user-space ethernet + drivers(like ODP)to register with HNAE devices and their associated + operations. + +config HNS3_HCLGE + tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support" + depends on PCI_MSI + depends on HNS3 + ---help--- + This selects the HNS3_HCLGE network acceleration engine & its hardware + compatibility layer. The engine would be used in Hisilicon hip08 family of + SoCs and further upcoming SoCs. + +config HNS3_ENET + tristate "Hisilicon HNS3 Ethernet Device Support" + depends on 64BIT && PCI + depends on HNS3 && HNS3_HCLGE + ---help--- + This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 + family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 + devices and their associated operations. + endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile index 8661695024dc..3828c435c18f 100644 --- a/drivers/net/ethernet/hisilicon/Makefile +++ b/drivers/net/ethernet/hisilicon/Makefile @@ -6,4 +6,5 @@ obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o obj-$(CONFIG_HIP04_ETH) += hip04_eth.o obj-$(CONFIG_HNS_MDIO) += hns_mdio.o obj-$(CONFIG_HNS) += hns/ +obj-$(CONFIG_HNS3) += hns3/ obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 9d9b6e6dd988..a051e582d541 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -202,6 +202,7 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) ring->q = q; ring->flags = flags; spin_lock_init(&ring->lock); + ring->coal_param = q->handle->coal_param; assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); /* not matter for tx or rx ring, the ntc and ntc start from 0 */ diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 7ba653af19cb..3e62692af011 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -89,6 +89,10 @@ do { \ #define RCB_RING_NAME_LEN 16 +#define HNAE_LOWEST_LATENCY_COAL_PARAM 30 +#define HNAE_LOW_LATENCY_COAL_PARAM 80 +#define HNAE_BULK_LATENCY_COAL_PARAM 150 + enum hnae_led_state { HNAE_LED_INACTIVE, HNAE_LED_ACTIVE, @@ -292,6 +296,12 @@ struct hnae_ring { int flags; /* ring attribute */ int irq_init_flag; + + /* total rx bytes after last rx rate calucated */ + u64 coal_last_rx_bytes; + unsigned long coal_last_jiffies; + u32 coal_param; + u32 coal_rx_rate; /* rx rate in MB */ }; #define ring_ptr_move_fw(ring, p) \ @@ -548,8 +558,13 @@ struct hnae_handle { u32 if_support; int q_num; int vf_id; + unsigned long coal_last_jiffies; + u32 coal_param; /* self adapt coalesce param */ + /* the ring index of last ring that set coal param */ + u32 coal_ring_idx; u32 eport_id; u32 dport_id; /* v2 tx bd should fill the dport_id */ + bool coal_adapt_en; enum hnae_port_type port_type; enum hnae_media_type media_type; struct list_head node; /* list to hnae_ae_dev->handle_list */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index a37166ee577b..bd68379d2bea 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -99,6 +99,7 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, ae_handle->owner_dev = dsaf_dev->dev; ae_handle->dev = dev; ae_handle->q_num = qnum_per_vf; + ae_handle->coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM; /* find ring pair, and set vf id*/ for (ae_handle->vf_id = 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 3987699f8fe6..36520634c96a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -812,6 +812,113 @@ static int hns_desc_unused(struct hnae_ring *ring) return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; } +#define HNS_LOWEST_LATENCY_RATE 27 /* 27 MB/s */ +#define HNS_LOW_LATENCY_RATE 80 /* 80 MB/s */ + +#define HNS_COAL_BDNUM 3 + +static u32 hns_coal_rx_bdnum(struct hnae_ring *ring) +{ + bool coal_enable = ring->q->handle->coal_adapt_en; + + if (coal_enable && + ring->coal_last_rx_bytes > HNS_LOWEST_LATENCY_RATE) + return HNS_COAL_BDNUM; + else + return 0; +} + +static void hns_update_rx_rate(struct hnae_ring *ring) +{ + bool coal_enable = ring->q->handle->coal_adapt_en; + u32 time_passed_ms; + u64 total_bytes; + + if (!coal_enable || + time_before(jiffies, ring->coal_last_jiffies + (HZ >> 4))) + return; + + /* ring->stats.rx_bytes overflowed */ + if (ring->coal_last_rx_bytes > ring->stats.rx_bytes) { + ring->coal_last_rx_bytes = ring->stats.rx_bytes; + ring->coal_last_jiffies = jiffies; + return; + } + + total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes; + time_passed_ms = jiffies_to_msecs(jiffies - ring->coal_last_jiffies); + do_div(total_bytes, time_passed_ms); + ring->coal_rx_rate = total_bytes >> 10; + + ring->coal_last_rx_bytes = ring->stats.rx_bytes; + ring->coal_last_jiffies = jiffies; +} + +/** + * smooth_alg - smoothing algrithm for adjusting coalesce parameter + **/ +static u32 smooth_alg(u32 new_param, u32 old_param) +{ + u32 gap = (new_param > old_param) ? new_param - old_param + : old_param - new_param; + + if (gap > 8) + gap >>= 3; + + if (new_param > old_param) + return old_param + gap; + else + return old_param - gap; +} + +/** + * hns_nic_adp_coalesce - self adapte coalesce according to rx rate + * @ring_data: pointer to hns_nic_ring_data + **/ +static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data) +{ + struct hnae_ring *ring = ring_data->ring; + struct hnae_handle *handle = ring->q->handle; + u32 new_coal_param, old_coal_param = ring->coal_param; + + if (ring->coal_rx_rate < HNS_LOWEST_LATENCY_RATE) + new_coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM; + else if (ring->coal_rx_rate < HNS_LOW_LATENCY_RATE) + new_coal_param = HNAE_LOW_LATENCY_COAL_PARAM; + else + new_coal_param = HNAE_BULK_LATENCY_COAL_PARAM; + + if (new_coal_param == old_coal_param && + new_coal_param == handle->coal_param) + return; + + new_coal_param = smooth_alg(new_coal_param, old_coal_param); + ring->coal_param = new_coal_param; + + /** + * Because all ring in one port has one coalesce param, when one ring + * calculate its own coalesce param, it cannot write to hardware at + * once. There are three conditions as follows: + * 1. current ring's coalesce param is larger than the hardware. + * 2. or ring which adapt last time can change again. + * 3. timeout. + */ + if (new_coal_param == handle->coal_param) { + handle->coal_last_jiffies = jiffies; + handle->coal_ring_idx = ring_data->queue_index; + } else if (new_coal_param > handle->coal_param || + handle->coal_ring_idx == ring_data->queue_index || + time_after(jiffies, handle->coal_last_jiffies + (HZ >> 4))) { + handle->dev->ops->set_coalesce_usecs(handle, + new_coal_param); + handle->dev->ops->set_coalesce_frames(handle, + 1, new_coal_param); + handle->coal_param = new_coal_param; + handle->coal_ring_idx = ring_data->queue_index; + handle->coal_last_jiffies = jiffies; + } +} + static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, int budget, void *v) { @@ -868,20 +975,27 @@ static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; int num = 0; + bool rx_stopped; - ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); + hns_update_rx_rate(ring); /* for hardware bug fixed */ + ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - if (num > 0) { + if (num <= hns_coal_rx_bdnum(ring)) { + if (ring->q->handle->coal_adapt_en) + hns_nic_adpt_coalesce(ring_data); + + rx_stopped = true; + } else { ring_data->ring->q->handle->dev->ops->toggle_ring_irq( ring_data->ring, 1); - return false; - } else { - return true; + rx_stopped = false; } + + return rx_stopped; } static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) @@ -889,12 +1003,17 @@ static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) struct hnae_ring *ring = ring_data->ring; int num; + hns_update_rx_rate(ring); num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - if (!num) + if (num <= hns_coal_rx_bdnum(ring)) { + if (ring->q->handle->coal_adapt_en) + hns_nic_adpt_coalesce(ring_data); + return true; - else - return false; + } + + return false; } static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index 9cb4c7884201..26e9afcbdd50 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -38,7 +38,7 @@ struct hns_nic_ring_data { struct hnae_ring *ring; struct napi_struct napi; cpumask_t mask; /* affinity mask */ - int queue_index; + u32 queue_index; int (*poll_one)(struct hns_nic_ring_data *, int, void *); void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *); bool (*fini_process)(struct hns_nic_ring_data *); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index a8db27e86a11..7ea7f8a4aa2a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -595,7 +595,7 @@ static void hns_nic_self_test(struct net_device *ndev, set_bit(NIC_STATE_TESTING, &priv->state); if (if_running) - (void)dev_close(ndev); + dev_close(ndev); for (i = 0; i < SELF_TEST_TPYE_NUM; i++) { if (!st_param[i][1]) @@ -735,8 +735,8 @@ static int hns_get_coalesce(struct net_device *net_dev, ops = priv->ae_handle->dev->ops; - ec->use_adaptive_rx_coalesce = 1; - ec->use_adaptive_tx_coalesce = 1; + ec->use_adaptive_rx_coalesce = priv->ae_handle->coal_adapt_en; + ec->use_adaptive_tx_coalesce = priv->ae_handle->coal_adapt_en; if ((!ops->get_coalesce_usecs) || (!ops->get_max_coalesced_frames)) @@ -787,6 +787,9 @@ static int hns_set_coalesce(struct net_device *net_dev, (!ops->set_coalesce_frames)) return -ESRCH; + if (ec->use_adaptive_rx_coalesce != priv->ae_handle->coal_adapt_en) + priv->ae_handle->coal_adapt_en = ec->use_adaptive_rx_coalesce; + rc1 = ops->set_coalesce_usecs(priv->ae_handle, ec->rx_coalesce_usecs); diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile new file mode 100644 index 000000000000..a9349e1f3e51 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the HISILICON network device drivers. +# + +obj-$(CONFIG_HNS3) += hns3pf/ + +obj-$(CONFIG_HNS3) += hnae3.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c new file mode 100644 index 000000000000..59efbd605416 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "hnae3.h" + +static LIST_HEAD(hnae3_ae_algo_list); +static LIST_HEAD(hnae3_client_list); +static LIST_HEAD(hnae3_ae_dev_list); + +/* we are keeping things simple and using single lock for all the + * list. This is a non-critical code so other updations, if happen + * in parallel, can wait. + */ +static DEFINE_MUTEX(hnae3_common_lock); + +static bool hnae3_client_match(enum hnae3_client_type client_type, + enum hnae3_dev_type dev_type) +{ + if ((dev_type == HNAE3_DEV_KNIC) && (client_type == HNAE3_CLIENT_KNIC || + client_type == HNAE3_CLIENT_ROCE)) + return true; + + if (dev_type == HNAE3_DEV_UNIC && client_type == HNAE3_CLIENT_UNIC) + return true; + + return false; +} + +static int hnae3_match_n_instantiate(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, + bool is_reg, bool *matched) +{ + int ret; + + *matched = false; + + /* check if this client matches the type of ae_dev */ + if (!(hnae3_client_match(client->type, ae_dev->dev_type) && + hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { + return 0; + } + /* there is a match of client and dev */ + *matched = true; + + /* now, (un-)instantiate client by calling lower layer */ + if (is_reg) { + ret = ae_dev->ops->init_client_instance(client, ae_dev); + if (ret) + dev_err(&ae_dev->pdev->dev, + "fail to instantiate client\n"); + return ret; + } + + ae_dev->ops->uninit_client_instance(client, ae_dev); + return 0; +} + +int hnae3_register_client(struct hnae3_client *client) +{ + struct hnae3_client *client_tmp; + struct hnae3_ae_dev *ae_dev; + bool matched; + int ret = 0; + + mutex_lock(&hnae3_common_lock); + /* one system should only have one client for every type */ + list_for_each_entry(client_tmp, &hnae3_client_list, node) { + if (client_tmp->type == client->type) + goto exit; + } + + list_add_tail(&client->node, &hnae3_client_list); + + /* initialize the client on every matched port */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + /* if the client could not be initialized on current port, for + * any error reasons, move on to next available port + */ + ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed for port\n"); + } + +exit: + mutex_unlock(&hnae3_common_lock); + + return ret; +} +EXPORT_SYMBOL(hnae3_register_client); + +void hnae3_unregister_client(struct hnae3_client *client) +{ + struct hnae3_ae_dev *ae_dev; + bool matched; + + mutex_lock(&hnae3_common_lock); + /* un-initialize the client on every matched port */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + hnae3_match_n_instantiate(client, ae_dev, false, &matched); + } + + list_del(&client->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_client); + +/* hnae3_register_ae_algo - register a AE algorithm to hnae3 framework + * @ae_algo: AE algorithm + * NOTE: the duplicated name will not be checked + */ +int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) +{ + const struct pci_device_id *id; + struct hnae3_ae_dev *ae_dev; + struct hnae3_client *client; + bool matched; + int ret = 0; + + mutex_lock(&hnae3_common_lock); + + list_add_tail(&ae_algo->node, &hnae3_ae_algo_list); + + /* Check if this algo/ops matches the list of ae_devs */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + /* ae_dev init should set flag */ + ae_dev->ops = ae_algo->ops; + ret = ae_algo->ops->init_ae_dev(ae_dev); + if (ret) { + dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n"); + continue; + } + + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + + /* check the client list for the match with this ae_dev type and + * initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) { + ret = hnae3_match_n_instantiate(client, ae_dev, true, + &matched); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed\n"); + if (matched) + break; + } + } + + mutex_unlock(&hnae3_common_lock); + + return ret; +} +EXPORT_SYMBOL(hnae3_register_ae_algo); + +/* hnae3_unregister_ae_algo - unregisters a AE algorithm + * @ae_algo: the AE algorithm to unregister + */ +void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) +{ + const struct pci_device_id *id; + struct hnae3_ae_dev *ae_dev; + struct hnae3_client *client; + bool matched; + + mutex_lock(&hnae3_common_lock); + /* Check if there are matched ae_dev */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + /* check the client list for the match with this ae_dev type and + * un-initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) { + hnae3_match_n_instantiate(client, ae_dev, false, + &matched); + if (matched) + break; + } + + ae_algo->ops->uninit_ae_dev(ae_dev); + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + } + + list_del(&ae_algo->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_ae_algo); + +/* hnae3_register_ae_dev - registers a AE device to hnae3 framework + * @ae_dev: the AE device + * NOTE: the duplicated name will not be checked + */ +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + const struct pci_device_id *id; + struct hnae3_ae_algo *ae_algo; + struct hnae3_client *client; + bool matched; + int ret = 0; + + mutex_lock(&hnae3_common_lock); + list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); + + /* Check if there are matched ae_algo */ + list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + ae_dev->ops = ae_algo->ops; + + if (!ae_dev->ops) { + dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); + goto out_err; + } + + /* ae_dev init should set flag */ + ret = ae_dev->ops->init_ae_dev(ae_dev); + if (ret) { + dev_err(&ae_dev->pdev->dev, "init ae_dev error\n"); + goto out_err; + } + + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + break; + } + + /* check the client list for the match with this ae_dev type and + * initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) { + ret = hnae3_match_n_instantiate(client, ae_dev, true, + &matched); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed\n"); + if (matched) + break; + } + +out_err: + mutex_unlock(&hnae3_common_lock); + + return ret; +} +EXPORT_SYMBOL(hnae3_register_ae_dev); + +/* hnae3_unregister_ae_dev - unregisters a AE device + * @ae_dev: the AE device to unregister + */ +void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + const struct pci_device_id *id; + struct hnae3_ae_algo *ae_algo; + struct hnae3_client *client; + bool matched; + + mutex_lock(&hnae3_common_lock); + /* Check if there are matched ae_algo */ + list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + list_for_each_entry(client, &hnae3_client_list, node) { + hnae3_match_n_instantiate(client, ae_dev, false, + &matched); + if (matched) + break; + } + + ae_algo->ops->uninit_ae_dev(ae_dev); + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + } + + list_del(&ae_dev->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_ae_dev); + +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h new file mode 100644 index 000000000000..b2f28ae81273 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HNAE3_H +#define __HNAE3_H + +/* Names used in this framework: + * ae handle (handle): + * a set of queues provided by AE + * ring buffer queue (rbq): + * the channel between upper layer and the AE, can do tx and rx + * ring: + * a tx or rx channel within a rbq + * ring description (desc): + * an element in the ring with packet information + * buffer: + * a memory region referred by desc with the full packet payload + * + * "num" means a static number set as a parameter, "count" mean a dynamic + * number set while running + * "cb" means control block + */ + +#include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/types.h> + +/* Device IDs */ +#define HNAE3_DEV_ID_GE 0xA220 +#define HNAE3_DEV_ID_25GE 0xA221 +#define HNAE3_DEV_ID_25GE_RDMA 0xA222 +#define HNAE3_DEV_ID_25GE_RDMA_MACSEC 0xA223 +#define HNAE3_DEV_ID_50GE_RDMA 0xA224 +#define HNAE3_DEV_ID_50GE_RDMA_MACSEC 0xA225 +#define HNAE3_DEV_ID_100G_RDMA_MACSEC 0xA226 +#define HNAE3_DEV_ID_100G_VF 0xA22E +#define HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF 0xA22F + +#define HNAE3_CLASS_NAME_SIZE 16 + +#define HNAE3_DEV_INITED_B 0x0 +#define HNAE_DEV_SUPPORT_ROCE_B 0x1 + +#define ring_ptr_move_fw(ring, p) \ + ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) +#define ring_ptr_move_bw(ring, p) \ + ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) + +enum hns_desc_type { + DESC_TYPE_SKB, + DESC_TYPE_PAGE, +}; + +struct hnae3_handle; + +struct hnae3_queue { + void __iomem *io_base; + struct hnae3_ae_algo *ae_algo; + struct hnae3_handle *handle; + int tqp_index; /* index in a handle */ + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 desc_num; /* total number of desc */ +}; + +/*hnae3 loop mode*/ +enum hnae3_loop { + HNAE3_MAC_INTER_LOOP_MAC, + HNAE3_MAC_INTER_LOOP_SERDES, + HNAE3_MAC_INTER_LOOP_PHY, + HNAE3_MAC_LOOP_NONE, +}; + +enum hnae3_client_type { + HNAE3_CLIENT_KNIC, + HNAE3_CLIENT_UNIC, + HNAE3_CLIENT_ROCE, +}; + +enum hnae3_dev_type { + HNAE3_DEV_KNIC, + HNAE3_DEV_UNIC, +}; + +/* mac media type */ +enum hnae3_media_type { + HNAE3_MEDIA_TYPE_UNKNOWN, + HNAE3_MEDIA_TYPE_FIBER, + HNAE3_MEDIA_TYPE_COPPER, + HNAE3_MEDIA_TYPE_BACKPLANE, +}; + +struct hnae3_vector_info { + u8 __iomem *io_addr; + int vector; +}; + +#define HNAE3_RING_TYPE_B 0 +#define HNAE3_RING_TYPE_TX 0 +#define HNAE3_RING_TYPE_RX 1 + +struct hnae3_ring_chain_node { + struct hnae3_ring_chain_node *next; + u32 tqp_index; + u32 flag; +}; + +#define HNAE3_IS_TX_RING(node) \ + (((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX) + +struct hnae3_client_ops { + int (*init_instance)(struct hnae3_handle *handle); + void (*uninit_instance)(struct hnae3_handle *handle, bool reset); + void (*link_status_change)(struct hnae3_handle *handle, bool state); +}; + +#define HNAE3_CLIENT_NAME_LENGTH 16 +struct hnae3_client { + char name[HNAE3_CLIENT_NAME_LENGTH]; + u16 version; + unsigned long state; + enum hnae3_client_type type; + const struct hnae3_client_ops *ops; + struct list_head node; +}; + +struct hnae3_ae_dev { + struct pci_dev *pdev; + const struct hnae3_ae_ops *ops; + struct list_head node; + u32 flag; + enum hnae3_dev_type dev_type; + void *priv; +}; + +/* This struct defines the operation on the handle. + * + * init_ae_dev(): (mandatory) + * Get PF configure from pci_dev and initialize PF hardware + * uninit_ae_dev() + * Disable PF device and release PF resource + * register_client + * Register client to ae_dev + * unregister_client() + * Unregister client from ae_dev + * start() + * Enable the hardware + * stop() + * Disable the hardware + * get_status() + * Get the carrier state of the back channel of the handle, 1 for ok, 0 for + * non-ok + * get_ksettings_an_result() + * Get negotiation status,speed and duplex + * update_speed_duplex_h() + * Update hardware speed and duplex + * get_media_type() + * Get media type of MAC + * adjust_link() + * Adjust link status + * set_loopback() + * Set loopback + * set_promisc_mode + * Set promisc mode + * set_mtu() + * set mtu + * get_pauseparam() + * get tx and rx of pause frame use + * set_pauseparam() + * set tx and rx of pause frame use + * set_autoneg() + * set auto autonegotiation of pause frame use + * get_autoneg() + * get auto autonegotiation of pause frame use + * get_coalesce_usecs() + * get usecs to delay a TX interrupt after a packet is sent + * get_rx_max_coalesced_frames() + * get Maximum number of packets to be sent before a TX interrupt. + * set_coalesce_usecs() + * set usecs to delay a TX interrupt after a packet is sent + * set_coalesce_frames() + * set Maximum number of packets to be sent before a TX interrupt. + * get_mac_addr() + * get mac address + * set_mac_addr() + * set mac address + * add_uc_addr + * Add unicast addr to mac table + * rm_uc_addr + * Remove unicast addr from mac table + * set_mc_addr() + * Set multicast address + * add_mc_addr + * Add multicast address to mac table + * rm_mc_addr + * Remove multicast address from mac table + * update_stats() + * Update Old network device statistics + * get_ethtool_stats() + * Get ethtool network device statistics + * get_strings() + * Get a set of strings that describe the requested objects + * get_sset_count() + * Get number of strings that @get_strings will write + * update_led_status() + * Update the led status + * set_led_id() + * Set led id + * get_regs() + * Get regs dump + * get_regs_len() + * Get the len of the regs dump + * get_rss_key_size() + * Get rss key size + * get_rss_indir_size() + * Get rss indirection table size + * get_rss() + * Get rss table + * set_rss() + * Set rss table + * get_tc_size() + * Get tc size of handle + * get_vector() + * Get vector number and vector information + * map_ring_to_vector() + * Map rings to vector + * unmap_ring_from_vector() + * Unmap rings from vector + * add_tunnel_udp() + * Add tunnel information to hardware + * del_tunnel_udp() + * Delete tunnel information from hardware + * reset_queue() + * Reset queue + * get_fw_version() + * Get firmware version + * get_mdix_mode() + * Get media typr of phy + * set_vlan_filter() + * Set vlan filter config of Ports + * set_vf_vlan_filter() + * Set vlan filter config of vf + */ +struct hnae3_ae_ops { + int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); + void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev); + + int (*init_client_instance)(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev); + void (*uninit_client_instance)(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev); + int (*start)(struct hnae3_handle *handle); + void (*stop)(struct hnae3_handle *handle); + int (*get_status)(struct hnae3_handle *handle); + void (*get_ksettings_an_result)(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, u8 *duplex); + + int (*update_speed_duplex_h)(struct hnae3_handle *handle); + int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed, + u8 duplex); + + void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type); + void (*adjust_link)(struct hnae3_handle *handle, int speed, int duplex); + int (*set_loopback)(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en); + + void (*set_promisc_mode)(struct hnae3_handle *handle, u32 en); + int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); + + void (*get_pauseparam)(struct hnae3_handle *handle, + u32 *auto_neg, u32 *rx_en, u32 *tx_en); + int (*set_pauseparam)(struct hnae3_handle *handle, + u32 auto_neg, u32 rx_en, u32 tx_en); + + int (*set_autoneg)(struct hnae3_handle *handle, bool enable); + int (*get_autoneg)(struct hnae3_handle *handle); + + void (*get_coalesce_usecs)(struct hnae3_handle *handle, + u32 *tx_usecs, u32 *rx_usecs); + void (*get_rx_max_coalesced_frames)(struct hnae3_handle *handle, + u32 *tx_frames, u32 *rx_frames); + int (*set_coalesce_usecs)(struct hnae3_handle *handle, u32 timeout); + int (*set_coalesce_frames)(struct hnae3_handle *handle, + u32 coalesce_frames); + void (*get_coalesce_range)(struct hnae3_handle *handle, + u32 *tx_frames_low, u32 *rx_frames_low, + u32 *tx_frames_high, u32 *rx_frames_high, + u32 *tx_usecs_low, u32 *rx_usecs_low, + u32 *tx_usecs_high, u32 *rx_usecs_high); + + void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); + int (*set_mac_addr)(struct hnae3_handle *handle, void *p); + int (*add_uc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*rm_uc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*set_mc_addr)(struct hnae3_handle *handle, void *addr); + int (*add_mc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*rm_mc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + + void (*set_tso_stats)(struct hnae3_handle *handle, int enable); + void (*update_stats)(struct hnae3_handle *handle, + struct net_device_stats *net_stats); + void (*get_stats)(struct hnae3_handle *handle, u64 *data); + + void (*get_strings)(struct hnae3_handle *handle, + u32 stringset, u8 *data); + int (*get_sset_count)(struct hnae3_handle *handle, int stringset); + + void (*get_regs)(struct hnae3_handle *handle, void *data); + int (*get_regs_len)(struct hnae3_handle *handle); + + u32 (*get_rss_key_size)(struct hnae3_handle *handle); + u32 (*get_rss_indir_size)(struct hnae3_handle *handle); + int (*get_rss)(struct hnae3_handle *handle, u32 *indir, u8 *key, + u8 *hfunc); + int (*set_rss)(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc); + + int (*get_tc_size)(struct hnae3_handle *handle); + + int (*get_vector)(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info); + int (*map_ring_to_vector)(struct hnae3_handle *handle, + int vector_num, + struct hnae3_ring_chain_node *vr_chain); + int (*unmap_ring_from_vector)(struct hnae3_handle *handle, + int vector_num, + struct hnae3_ring_chain_node *vr_chain); + + int (*add_tunnel_udp)(struct hnae3_handle *handle, u16 port_num); + int (*del_tunnel_udp)(struct hnae3_handle *handle, u16 port_num); + + void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id); + u32 (*get_fw_version)(struct hnae3_handle *handle); + void (*get_mdix_mode)(struct hnae3_handle *handle, + u8 *tp_mdix_ctrl, u8 *tp_mdix); + + int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill); + int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto); +}; + +struct hnae3_ae_algo { + const struct hnae3_ae_ops *ops; + struct list_head node; + char name[HNAE3_CLASS_NAME_SIZE]; + const struct pci_device_id *pdev_id_table; +}; + +#define HNAE3_INT_NAME_LEN (IFNAMSIZ + 16) +#define HNAE3_ITR_COUNTDOWN_START 100 + +struct hnae3_tc_info { + u16 tqp_offset; /* TQP offset from base TQP */ + u16 tqp_count; /* Total TQPs */ + u8 up; /* user priority */ + u8 tc; /* TC index */ + bool enable; /* If this TC is enable or not */ +}; + +#define HNAE3_MAX_TC 8 +struct hnae3_knic_private_info { + struct net_device *netdev; /* Set by KNIC client when init instance */ + u16 rss_size; /* Allocated RSS queues */ + u16 rx_buf_len; + u16 num_desc; + + u8 num_tc; /* Total number of enabled TCs */ + struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ + + u16 num_tqps; /* total number of TQPs in this handle */ + struct hnae3_queue **tqp; /* array base of all TQPs in this instance */ +}; + +struct hnae3_roce_private_info { + struct net_device *netdev; + void __iomem *roce_io_base; + int base_vector; + int num_vectors; +}; + +struct hnae3_unic_private_info { + struct net_device *netdev; + u16 rx_buf_len; + u16 num_desc; + u16 num_tqps; /* total number of tqps in this handle */ + struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ +}; + +#define HNAE3_SUPPORT_MAC_LOOPBACK 1 +#define HNAE3_SUPPORT_PHY_LOOPBACK 2 +#define HNAE3_SUPPORT_SERDES_LOOPBACK 4 + +struct hnae3_handle { + struct hnae3_client *client; + struct pci_dev *pdev; + void *priv; + struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */ + u64 flags; /* Indicate the capabilities for this handle*/ + + union { + struct net_device *netdev; /* first member */ + struct hnae3_knic_private_info kinfo; + struct hnae3_unic_private_info uinfo; + struct hnae3_roce_private_info rinfo; + }; + + u32 numa_node_mask; /* for multi-chip support */ +}; + +#define hnae_set_field(origin, mask, shift, val) \ + do { \ + (origin) &= (~(mask)); \ + (origin) |= ((val) << (shift)) & (mask); \ + } while (0) +#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) + +#define hnae_set_bit(origin, shift, val) \ + hnae_set_field((origin), (0x1 << (shift)), (shift), (val)) +#define hnae_get_bit(origin, shift) \ + hnae_get_field((origin), (0x1 << (shift)), (shift)) + +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); +void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); + +void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); +int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); + +void hnae3_unregister_client(struct hnae3_client *client); +int hnae3_register_client(struct hnae3_client *client); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile new file mode 100644 index 000000000000..162e8a42acd0 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the HISILICON network device drivers. +# + +ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 + +obj-$(CONFIG_HNS3_HCLGE) += hclge.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o + +obj-$(CONFIG_HNS3_ENET) += hns3.o +hns3-objs = hns3_enet.o hns3_ethtool.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c new file mode 100644 index 000000000000..bc869842728f --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/dma-direction.h> +#include "hclge_cmd.h" +#include "hnae3.h" +#include "hclge_main.h" + +#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ) +#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \ + DMA_TO_DEVICE : DMA_FROM_DEVICE) +#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) + +static int hclge_ring_space(struct hclge_cmq_ring *ring) +{ + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; + int used = (ntu - ntc + ring->desc_num) % ring->desc_num; + + return ring->desc_num - used - 1; +} + +static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) +{ + int size = ring->desc_num * sizeof(struct hclge_desc); + + ring->desc = kzalloc(size, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, + size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; + return -ENOMEM; + } + + return 0; +} + +static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring) +{ + dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, + ring->desc_num * sizeof(ring->desc[0]), + DMA_BIDIRECTIONAL); + + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; +} + +static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type) +{ + struct hclge_hw *hw = &hdev->hw; + struct hclge_cmq_ring *ring = + (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; + int ret; + + ring->flag = ring_type; + ring->dev = hdev; + + ret = hclge_alloc_cmd_desc(ring); + if (ret) { + dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n", + (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret); + return ret; + } + + ring->next_to_clean = 0; + ring->next_to_use = 0; + + return 0; +} + +void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, bool is_read) +{ + memset((void *)desc, 0, sizeof(struct hclge_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); + + if (is_read) + desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); + else + desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); +} + +static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) +{ + dma_addr_t dma = ring->desc_dma_addr; + struct hclge_dev *hdev = ring->dev; + struct hclge_hw *hw = &hdev->hw; + + if (ring->flag == HCLGE_TYPE_CSQ) { + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, + (u32)dma); + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, + (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | + HCLGE_NIC_CMQ_ENABLE); + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); + } else { + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, + (u32)dma); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, + (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | + HCLGE_NIC_CMQ_ENABLE); + hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); + } +} + +static void hclge_cmd_init_regs(struct hclge_hw *hw) +{ + hclge_cmd_config_regs(&hw->cmq.csq); + hclge_cmd_config_regs(&hw->cmq.crq); +} + +static int hclge_cmd_csq_clean(struct hclge_hw *hw) +{ + struct hclge_cmq_ring *csq = &hw->cmq.csq; + u16 ntc = csq->next_to_clean; + struct hclge_desc *desc; + int clean = 0; + u32 head; + + desc = &csq->desc[ntc]; + head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); + + while (head != ntc) { + memset(desc, 0, sizeof(*desc)); + ntc++; + if (ntc == csq->desc_num) + ntc = 0; + desc = &csq->desc[ntc]; + clean++; + } + csq->next_to_clean = ntc; + + return clean; +} + +static int hclge_cmd_csq_done(struct hclge_hw *hw) +{ + u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); + return head == hw->cmq.csq.next_to_use; +} + +static bool hclge_is_special_opcode(u16 opcode) +{ + u16 spec_opcode[3] = {0x0030, 0x0031, 0x0032}; + int i; + + for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { + if (spec_opcode[i] == opcode) + return true; + } + + return false; +} + +/** + * hclge_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + **/ +int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) +{ + struct hclge_dev *hdev = (struct hclge_dev *)hw->back; + struct hclge_desc *desc_to_use; + bool complete = false; + u32 timeout = 0; + int handle = 0; + int retval = 0; + u16 opcode, desc_ret; + int ntc; + + spin_lock_bh(&hw->cmq.csq.lock); + + if (num > hclge_ring_space(&hw->cmq.csq)) { + spin_unlock_bh(&hw->cmq.csq.lock); + return -EBUSY; + } + + /** + * Record the location of desc in the ring for this time + * which will be use for hardware to write back + */ + ntc = hw->cmq.csq.next_to_use; + opcode = desc[0].opcode; + while (handle < num) { + desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; + *desc_to_use = desc[handle]; + (hw->cmq.csq.next_to_use)++; + if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) + hw->cmq.csq.next_to_use = 0; + handle++; + } + + /* Write to hardware */ + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use); + + /** + * If the command is sync, wait for the firmware to write back, + * if multi descriptors to be sent, use the first one to check + */ + if (HCLGE_SEND_SYNC(desc->flag)) { + do { + if (hclge_cmd_csq_done(hw)) + break; + udelay(1); + timeout++; + } while (timeout < hw->cmq.tx_timeout); + } + + if (hclge_cmd_csq_done(hw)) { + complete = true; + handle = 0; + while (handle < num) { + /* Get the result of hardware write back */ + desc_to_use = &hw->cmq.csq.desc[ntc]; + desc[handle] = *desc_to_use; + pr_debug("Get cmd desc:\n"); + + if (likely(!hclge_is_special_opcode(opcode))) + desc_ret = desc[handle].retval; + else + desc_ret = desc[0].retval; + + if ((enum hclge_cmd_return_status)desc_ret == + HCLGE_CMD_EXEC_SUCCESS) + retval = 0; + else + retval = -EIO; + hw->cmq.last_status = (enum hclge_cmd_status)desc_ret; + ntc++; + handle++; + if (ntc == hw->cmq.csq.desc_num) + ntc = 0; + } + } + + if (!complete) + retval = -EAGAIN; + + /* Clean the command send queue */ + handle = hclge_cmd_csq_clean(hw); + if (handle != num) { + dev_warn(&hdev->pdev->dev, + "cleaned %d, need to clean %d\n", handle, num); + } + + spin_unlock_bh(&hw->cmq.csq.lock); + + return retval; +} + +enum hclge_cmd_status hclge_cmd_query_firmware_version(struct hclge_hw *hw, + u32 *version) +{ + struct hclge_query_version *resp; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1); + resp = (struct hclge_query_version *)desc.data; + + ret = hclge_cmd_send(hw, &desc, 1); + if (!ret) + *version = le32_to_cpu(resp->firmware); + + return ret; +} + +int hclge_cmd_init(struct hclge_dev *hdev) +{ + u32 version; + int ret; + + /* Setup the queue entries for use cmd queue */ + hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; + hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; + + /* Setup the lock for command queue */ + spin_lock_init(&hdev->hw.cmq.csq.lock); + spin_lock_init(&hdev->hw.cmq.crq.lock); + + /* Setup Tx write back timeout */ + hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT; + + /* Setup queue rings */ + ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CSQ); + if (ret) { + dev_err(&hdev->pdev->dev, + "CSQ ring setup error %d\n", ret); + return ret; + } + + ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CRQ); + if (ret) { + dev_err(&hdev->pdev->dev, + "CRQ ring setup error %d\n", ret); + goto err_csq; + } + + hclge_cmd_init_regs(&hdev->hw); + + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); + if (ret) { + dev_err(&hdev->pdev->dev, + "firmware version query failed %d\n", ret); + return ret; + } + hdev->fw_version = version; + + dev_info(&hdev->pdev->dev, "The firware version is %08x\n", version); + + return 0; +err_csq: + hclge_free_cmd_desc(&hdev->hw.cmq.csq); + return ret; +} + +static void hclge_destroy_queue(struct hclge_cmq_ring *ring) +{ + spin_lock_bh(&ring->lock); + hclge_free_cmd_desc(ring); + spin_unlock_bh(&ring->lock); +} + +void hclge_destroy_cmd_queue(struct hclge_hw *hw) +{ + hclge_destroy_queue(&hw->cmq.csq); + hclge_destroy_queue(&hw->cmq.crq); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h new file mode 100644 index 000000000000..91ae0135ee50 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -0,0 +1,740 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_CMD_H +#define __HCLGE_CMD_H +#include <linux/types.h> +#include <linux/io.h> + +#define HCLGE_CMDQ_TX_TIMEOUT 1000 + +struct hclge_dev; +struct hclge_desc { + __le16 opcode; + +#define HCLGE_CMDQ_RX_INVLD_B 0 +#define HCLGE_CMDQ_RX_OUTVLD_B 1 + + __le16 flag; + __le16 retval; + __le16 rsv; + __le32 data[6]; +}; + +struct hclge_desc_cb { + dma_addr_t dma; + void *va; + u32 length; +}; + +struct hclge_cmq_ring { + dma_addr_t desc_dma_addr; + struct hclge_desc *desc; + struct hclge_desc_cb *desc_cb; + struct hclge_dev *dev; + u32 head; + u32 tail; + + u16 buf_size; + u16 desc_num; + int next_to_use; + int next_to_clean; + u8 flag; + spinlock_t lock; /* Command queue lock */ +}; + +enum hclge_cmd_return_status { + HCLGE_CMD_EXEC_SUCCESS = 0, + HCLGE_CMD_NO_AUTH = 1, + HCLGE_CMD_NOT_EXEC = 2, + HCLGE_CMD_QUEUE_FULL = 3, +}; + +enum hclge_cmd_status { + HCLGE_STATUS_SUCCESS = 0, + HCLGE_ERR_CSQ_FULL = -1, + HCLGE_ERR_CSQ_TIMEOUT = -2, + HCLGE_ERR_CSQ_ERROR = -3, +}; + +struct hclge_cmq { + struct hclge_cmq_ring csq; + struct hclge_cmq_ring crq; + u16 tx_timeout; /* Tx timeout */ + enum hclge_cmd_status last_status; +}; + +#define HCLGE_CMD_FLAG_IN_VALID_SHIFT 0 +#define HCLGE_CMD_FLAG_OUT_VALID_SHIFT 1 +#define HCLGE_CMD_FLAG_NEXT_SHIFT 2 +#define HCLGE_CMD_FLAG_WR_OR_RD_SHIFT 3 +#define HCLGE_CMD_FLAG_NO_INTR_SHIFT 4 +#define HCLGE_CMD_FLAG_ERR_INTR_SHIFT 5 + +#define HCLGE_CMD_FLAG_IN BIT(HCLGE_CMD_FLAG_IN_VALID_SHIFT) +#define HCLGE_CMD_FLAG_OUT BIT(HCLGE_CMD_FLAG_OUT_VALID_SHIFT) +#define HCLGE_CMD_FLAG_NEXT BIT(HCLGE_CMD_FLAG_NEXT_SHIFT) +#define HCLGE_CMD_FLAG_WR BIT(HCLGE_CMD_FLAG_WR_OR_RD_SHIFT) +#define HCLGE_CMD_FLAG_NO_INTR BIT(HCLGE_CMD_FLAG_NO_INTR_SHIFT) +#define HCLGE_CMD_FLAG_ERR_INTR BIT(HCLGE_CMD_FLAG_ERR_INTR_SHIFT) + +enum hclge_opcode_type { + /* Generic command */ + HCLGE_OPC_QUERY_FW_VER = 0x0001, + HCLGE_OPC_CFG_RST_TRIGGER = 0x0020, + HCLGE_OPC_GBL_RST_STATUS = 0x0021, + HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022, + HCLGE_OPC_QUERY_PF_RSRC = 0x0023, + HCLGE_OPC_QUERY_VF_RSRC = 0x0024, + HCLGE_OPC_GET_CFG_PARAM = 0x0025, + + HCLGE_OPC_STATS_64_BIT = 0x0030, + HCLGE_OPC_STATS_32_BIT = 0x0031, + HCLGE_OPC_STATS_MAC = 0x0032, + /* Device management command */ + + /* MAC commond */ + HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, + HCLGE_OPC_CONFIG_AN_MODE = 0x0304, + HCLGE_OPC_QUERY_AN_RESULT = 0x0306, + HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, + HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, + HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, + /* MACSEC command */ + + /* PFC/Pause CMD*/ + HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701, + HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702, + HCLGE_OPC_CFG_MAC_PARA = 0x0703, + HCLGE_OPC_CFG_PFC_PARA = 0x0704, + HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705, + HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706, + HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707, + HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708, + HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709, + HCLGE_OPC_QOS_MAP = 0x070A, + + /* ETS/scheduler commands */ + HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804, + HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805, + HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806, + HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807, + HCLGE_OPC_TM_PORT_WEIGHT = 0x0808, + HCLGE_OPC_TM_PG_WEIGHT = 0x0809, + HCLGE_OPC_TM_QS_WEIGHT = 0x080A, + HCLGE_OPC_TM_PRI_WEIGHT = 0x080B, + HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C, + HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D, + HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E, + HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F, + HCLGE_OPC_TM_PORT_SHAPPING = 0x0810, + HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812, + HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, + HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, + HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, + + /* Packet buffer allocate command */ + HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, + HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902, + HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903, + HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904, + HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905, + HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906, + + /* PTP command */ + /* TQP management command */ + HCLGE_OPC_SET_TQP_MAP = 0x0A01, + + /* TQP command */ + HCLGE_OPC_CFG_TX_QUEUE = 0x0B01, + HCLGE_OPC_QUERY_TX_POINTER = 0x0B02, + HCLGE_OPC_QUERY_TX_STATUS = 0x0B03, + HCLGE_OPC_CFG_RX_QUEUE = 0x0B11, + HCLGE_OPC_QUERY_RX_POINTER = 0x0B12, + HCLGE_OPC_QUERY_RX_STATUS = 0x0B13, + HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16, + HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17, + HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20, + HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22, + + /* TSO cmd */ + HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, + + /* RSS cmd */ + HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07, + HCLGE_OPC_RSS_TC_MODE = 0x0D08, + HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02, + + /* Promisuous mode command */ + HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01, + + /* Interrupts cmd */ + HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503, + HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504, + + /* MAC command */ + HCLGE_OPC_MAC_VLAN_ADD = 0x1000, + HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, + HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, + HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, + HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, + HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, + + /* Multicast linear table cmd */ + HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, + HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021, + HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022, + HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023, + + /* VLAN command */ + HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, + HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, + HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, + + /* MDIO command */ + HCLGE_OPC_MDIO_CONFIG = 0x1900, + + /* QCN command */ + HCLGE_OPC_QCN_MOD_CFG = 0x1A01, + HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02, + HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03, + HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04, + HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05, + HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06, + HCLGE_OPC_QCN_AJUST_INIT = 0x1A07, + HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08, + + /* Mailbox cmd */ + HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000, +}; + +#define HCLGE_TQP_REG_OFFSET 0x80000 +#define HCLGE_TQP_REG_SIZE 0x200 + +#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10 +#define HCLGE_RCB_INIT_FLAG_EN_B 0 +#define HCLGE_RCB_INIT_FLAG_FINI_B 8 +struct hclge_config_rcb_init { + __le16 rcb_init_flag; + u8 rsv[22]; +}; + +struct hclge_tqp_map { + __le16 tqp_id; /* Absolute tqp id for in this pf */ + u8 tqp_vf; /* VF id */ +#define HCLGE_TQP_MAP_TYPE_PF 0 +#define HCLGE_TQP_MAP_TYPE_VF 1 +#define HCLGE_TQP_MAP_TYPE_B 0 +#define HCLGE_TQP_MAP_EN_B 1 + u8 tqp_flag; /* Indicate it's pf or vf tqp */ + __le16 tqp_vid; /* Virtual id in this pf/vf */ + u8 rsv[18]; +}; + +#define HCLGE_VECTOR_ELEMENTS_PER_CMD 11 + +enum hclge_int_type { + HCLGE_INT_TX, + HCLGE_INT_RX, + HCLGE_INT_EVENT, +}; + +struct hclge_ctrl_vector_chain { + u8 int_vector_id; + u8 int_cause_num; +#define HCLGE_INT_TYPE_S 0 +#define HCLGE_INT_TYPE_M 0x3 +#define HCLGE_TQP_ID_S 2 +#define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S) + __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; +}; + +#define HCLGE_TC_NUM 8 +#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ +#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ +struct hclge_tx_buff_alloc { + __le16 tx_pkt_buff[HCLGE_TC_NUM]; + u8 tx_buff_rsv[8]; +}; + +struct hclge_rx_priv_buff { + __le16 buf_num[HCLGE_TC_NUM]; + u8 rsv[8]; +}; + +struct hclge_query_version { + __le32 firmware; + __le32 firmware_rsv[5]; +}; + +#define HCLGE_RX_PRIV_EN_B 15 +#define HCLGE_TC_NUM_ONE_DESC 4 +struct hclge_priv_wl { + __le16 high; + __le16 low; +}; + +struct hclge_rx_priv_wl_buf { + struct hclge_priv_wl tc_wl[HCLGE_TC_NUM_ONE_DESC]; +}; + +struct hclge_rx_com_thrd { + struct hclge_priv_wl com_thrd[HCLGE_TC_NUM_ONE_DESC]; +}; + +struct hclge_rx_com_wl { + struct hclge_priv_wl com_wl; +}; + +struct hclge_waterline { + u32 low; + u32 high; +}; + +struct hclge_tc_thrd { + u32 low; + u32 high; +}; + +struct hclge_priv_buf { + struct hclge_waterline wl; /* Waterline for low and high*/ + u32 buf_size; /* TC private buffer size */ + u32 enable; /* Enable TC private buffer or not */ +}; + +#define HCLGE_MAX_TC_NUM 8 +struct hclge_shared_buf { + struct hclge_waterline self; + struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; + u32 buf_size; +}; + +#define HCLGE_RX_COM_WL_EN_B 15 +struct hclge_rx_com_wl_buf { + __le16 high_wl; + __le16 low_wl; + u8 rsv[20]; +}; + +#define HCLGE_RX_PKT_EN_B 15 +struct hclge_rx_pkt_buf { + __le16 high_pkt; + __le16 low_pkt; + u8 rsv[20]; +}; + +#define HCLGE_PF_STATE_DONE_B 0 +#define HCLGE_PF_STATE_MAIN_B 1 +#define HCLGE_PF_STATE_BOND_B 2 +#define HCLGE_PF_STATE_MAC_N_B 6 +#define HCLGE_PF_MAC_NUM_MASK 0x3 +#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B) +#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B) +struct hclge_func_status { + __le32 vf_rst_state[4]; + u8 pf_state; + u8 mac_id; + u8 rsv1; + u8 pf_cnt_in_mac; + u8 pf_num; + u8 vf_num; + u8 rsv[2]; +}; + +struct hclge_pf_res { + __le16 tqp_num; + __le16 buf_size; + __le16 msixcap_localid_ba_nic; + __le16 msixcap_localid_ba_rocee; +#define HCLGE_PF_VEC_NUM_S 0 +#define HCLGE_PF_VEC_NUM_M (0xff << HCLGE_PF_VEC_NUM_S) + __le16 pf_intr_vector_number; + __le16 pf_own_fun_number; + __le32 rsv[3]; +}; + +#define HCLGE_CFG_OFFSET_S 0 +#define HCLGE_CFG_OFFSET_M 0xfffff /* Byte (8-10.3) */ +#define HCLGE_CFG_RD_LEN_S 24 +#define HCLGE_CFG_RD_LEN_M (0xf << HCLGE_CFG_RD_LEN_S) +#define HCLGE_CFG_RD_LEN_BYTES 16 +#define HCLGE_CFG_RD_LEN_UNIT 4 + +#define HCLGE_CFG_VMDQ_S 0 +#define HCLGE_CFG_VMDQ_M (0xff << HCLGE_CFG_VMDQ_S) +#define HCLGE_CFG_TC_NUM_S 8 +#define HCLGE_CFG_TC_NUM_M (0xff << HCLGE_CFG_TC_NUM_S) +#define HCLGE_CFG_TQP_DESC_N_S 16 +#define HCLGE_CFG_TQP_DESC_N_M (0xffff << HCLGE_CFG_TQP_DESC_N_S) +#define HCLGE_CFG_PHY_ADDR_S 0 +#define HCLGE_CFG_PHY_ADDR_M (0x1f << HCLGE_CFG_PHY_ADDR_S) +#define HCLGE_CFG_MEDIA_TP_S 8 +#define HCLGE_CFG_MEDIA_TP_M (0xff << HCLGE_CFG_MEDIA_TP_S) +#define HCLGE_CFG_RX_BUF_LEN_S 16 +#define HCLGE_CFG_RX_BUF_LEN_M (0xffff << HCLGE_CFG_RX_BUF_LEN_S) +#define HCLGE_CFG_MAC_ADDR_H_S 0 +#define HCLGE_CFG_MAC_ADDR_H_M (0xffff << HCLGE_CFG_MAC_ADDR_H_S) +#define HCLGE_CFG_DEFAULT_SPEED_S 16 +#define HCLGE_CFG_DEFAULT_SPEED_M (0xff << HCLGE_CFG_DEFAULT_SPEED_S) + +struct hclge_cfg_param { + __le32 offset; + __le32 rsv; + __le32 param[4]; +}; + +#define HCLGE_MAC_MODE 0x0 +#define HCLGE_DESC_NUM 0x40 + +#define HCLGE_ALLOC_VALID_B 0 +struct hclge_vf_num { + u8 alloc_valid; + u8 rsv[23]; +}; + +#define HCLGE_RSS_DEFAULT_OUTPORT_B 4 +#define HCLGE_RSS_HASH_KEY_OFFSET_B 4 +#define HCLGE_RSS_HASH_KEY_NUM 16 +struct hclge_rss_config { + u8 hash_config; + u8 rsv[7]; + u8 hash_key[HCLGE_RSS_HASH_KEY_NUM]; +}; + +struct hclge_rss_input_tuple { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; + u8 rsv[16]; +}; + +#define HCLGE_RSS_CFG_TBL_SIZE 16 + +struct hclge_rss_indirection_table { + u16 start_table_index; + u16 rss_set_bitmap; + u8 rsv[4]; + u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE]; +}; + +#define HCLGE_RSS_TC_OFFSET_S 0 +#define HCLGE_RSS_TC_OFFSET_M (0x3ff << HCLGE_RSS_TC_OFFSET_S) +#define HCLGE_RSS_TC_SIZE_S 12 +#define HCLGE_RSS_TC_SIZE_M (0x7 << HCLGE_RSS_TC_SIZE_S) +#define HCLGE_RSS_TC_VALID_B 15 +struct hclge_rss_tc_mode { + u16 rss_tc_mode[HCLGE_MAX_TC_NUM]; + u8 rsv[8]; +}; + +#define HCLGE_LINK_STS_B 0 +#define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B) +struct hclge_link_status { + u8 status; + u8 rsv[23]; +}; + +struct hclge_promisc_param { + u8 vf_id; + u8 enable; +}; + +#define HCLGE_PROMISC_EN_B 1 +#define HCLGE_PROMISC_EN_ALL 0x7 +#define HCLGE_PROMISC_EN_UC 0x1 +#define HCLGE_PROMISC_EN_MC 0x2 +#define HCLGE_PROMISC_EN_BC 0x4 +struct hclge_promisc_cfg { + u8 flag; + u8 vf_id; + __le16 rsv0; + u8 rsv1[20]; +}; + +enum hclge_promisc_type { + HCLGE_UNICAST = 1, + HCLGE_MULTICAST = 2, + HCLGE_BROADCAST = 3, +}; + +#define HCLGE_MAC_TX_EN_B 6 +#define HCLGE_MAC_RX_EN_B 7 +#define HCLGE_MAC_PAD_TX_B 11 +#define HCLGE_MAC_PAD_RX_B 12 +#define HCLGE_MAC_1588_TX_B 13 +#define HCLGE_MAC_1588_RX_B 14 +#define HCLGE_MAC_APP_LP_B 15 +#define HCLGE_MAC_LINE_LP_B 16 +#define HCLGE_MAC_FCS_TX_B 17 +#define HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B 18 +#define HCLGE_MAC_RX_FCS_STRIP_B 19 +#define HCLGE_MAC_RX_FCS_B 20 +#define HCLGE_MAC_TX_UNDER_MIN_ERR_B 21 +#define HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B 22 + +struct hclge_config_mac_mode { + __le32 txrx_pad_fcs_loop_en; + u8 rsv[20]; +}; + +#define HCLGE_CFG_SPEED_S 0 +#define HCLGE_CFG_SPEED_M (0x3f << HCLGE_CFG_SPEED_S) + +#define HCLGE_CFG_DUPLEX_B 7 +#define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B) + +struct hclge_config_mac_speed_dup { + u8 speed_dup; + +#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0 + u8 mac_change_fec_en; + u8 rsv[22]; +}; + +#define HCLGE_QUERY_SPEED_S 3 +#define HCLGE_QUERY_AN_B 0 +#define HCLGE_QUERY_DUPLEX_B 2 + +#define HCLGE_QUERY_SPEED_M (0x1f << HCLGE_QUERY_SPEED_S) +#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B) +#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B) + +struct hclge_query_an_speed_dup { + u8 an_syn_dup_speed; + u8 pause; + u8 rsv[23]; +}; + +#define HCLGE_RING_ID_MASK 0x3ff +#define HCLGE_TQP_ENABLE_B 0 + +#define HCLGE_MAC_CFG_AN_EN_B 0 +#define HCLGE_MAC_CFG_AN_INT_EN_B 1 +#define HCLGE_MAC_CFG_AN_INT_MSK_B 2 +#define HCLGE_MAC_CFG_AN_INT_CLR_B 3 +#define HCLGE_MAC_CFG_AN_RST_B 4 + +#define HCLGE_MAC_CFG_AN_EN BIT(HCLGE_MAC_CFG_AN_EN_B) + +struct hclge_config_auto_neg { + __le32 cfg_an_cmd_flag; + u8 rsv[20]; +}; + +#define HCLGE_MAC_MIN_MTU 64 +#define HCLGE_MAC_MAX_MTU 9728 +#define HCLGE_MAC_UPLINK_PORT 0x100 + +struct hclge_config_max_frm_size { + __le16 max_frm_size; + u8 rsv[22]; +}; + +enum hclge_mac_vlan_tbl_opcode { + HCLGE_MAC_VLAN_ADD, /* Add new or modify mac_vlan */ + HCLGE_MAC_VLAN_UPDATE, /* Modify other fields of this table */ + HCLGE_MAC_VLAN_REMOVE, /* Remove a entry through mac_vlan key */ + HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ +}; + +#define HCLGE_MAC_VLAN_BIT0_EN_B 0x0 +#define HCLGE_MAC_VLAN_BIT1_EN_B 0x1 +#define HCLGE_MAC_EPORT_SW_EN_B 0xc +#define HCLGE_MAC_EPORT_TYPE_B 0xb +#define HCLGE_MAC_EPORT_VFID_S 0x3 +#define HCLGE_MAC_EPORT_VFID_M (0xff << HCLGE_MAC_EPORT_VFID_S) +#define HCLGE_MAC_EPORT_PFID_S 0x0 +#define HCLGE_MAC_EPORT_PFID_M (0x7 << HCLGE_MAC_EPORT_PFID_S) +struct hclge_mac_vlan_tbl_entry { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + __le32 mac_addr_hi32; + __le16 mac_addr_lo16; + __le16 rsv1; + u8 entry_type; + u8 mc_mac_en; + __le16 egress_port; + __le16 egress_queue; + u8 rsv2[6]; +}; + +#define HCLGE_CFG_MTA_MAC_SEL_S 0x0 +#define HCLGE_CFG_MTA_MAC_SEL_M (0x3 << HCLGE_CFG_MTA_MAC_SEL_S) +#define HCLGE_CFG_MTA_MAC_EN_B 0x7 +struct hclge_mta_filter_mode { + u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ + u8 rsv[23]; +}; + +#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0 +struct hclge_cfg_func_mta_filter { + u8 accept; /* Only used lowest 1 bit */ + u8 function_id; + u8 rsv[22]; +}; + +#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0 +#define HCLGE_CFG_MTA_ITEM_IDX_S 0x0 +#define HCLGE_CFG_MTA_ITEM_IDX_M (0xfff << HCLGE_CFG_MTA_ITEM_IDX_S) +struct hclge_cfg_func_mta_item { + u16 item_idx; /* Only used lowest 12 bit */ + u8 accept; /* Only used lowest 1 bit */ + u8 rsv[21]; +}; + +struct hclge_mac_vlan_add { + __le16 flags; + __le16 mac_addr_hi16; + __le32 mac_addr_lo32; + __le32 mac_addr_msk_hi32; + __le16 mac_addr_msk_lo16; + __le16 vlan_tag; + __le16 ingress_port; + __le16 egress_port; + u8 rsv[4]; +}; + +#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0 +struct hclge_mac_vlan_remove { + __le16 flags; + __le16 mac_addr_hi16; + __le32 mac_addr_lo32; + __le32 mac_addr_msk_hi32; + __le16 mac_addr_msk_lo16; + __le16 vlan_tag; + __le16 ingress_port; + __le16 egress_port; + u8 rsv[4]; +}; + +struct hclge_vlan_filter_ctrl { + u8 vlan_type; + u8 vlan_fe; + u8 rsv[22]; +}; + +struct hclge_vlan_filter_pf_cfg { + u8 vlan_offset; + u8 vlan_cfg; + u8 rsv[2]; + u8 vlan_offset_bitmap[20]; +}; + +struct hclge_vlan_filter_vf_cfg { + u16 vlan_id; + u8 resp_code; + u8 rsv; + u8 vlan_cfg; + u8 rsv1[3]; + u8 vf_bitmap[16]; +}; + +struct hclge_cfg_com_tqp_queue { + __le16 tqp_id; + __le16 stream_id; + u8 enable; + u8 rsv[19]; +}; + +struct hclge_cfg_tx_queue_pointer { + __le16 tqp_id; + __le16 tx_tail; + __le16 tx_head; + __le16 fbd_num; + __le16 ring_offset; + u8 rsv[14]; +}; + +#define HCLGE_TSO_MSS_MIN_S 0 +#define HCLGE_TSO_MSS_MIN_M (0x3FFF << HCLGE_TSO_MSS_MIN_S) + +#define HCLGE_TSO_MSS_MAX_S 16 +#define HCLGE_TSO_MSS_MAX_M (0x3FFF << HCLGE_TSO_MSS_MAX_S) + +struct hclge_cfg_tso_status { + __le16 tso_mss_min; + __le16 tso_mss_max; + u8 rsv[20]; +}; + +#define HCLGE_TSO_MSS_MIN 256 +#define HCLGE_TSO_MSS_MAX 9668 + +#define HCLGE_TQP_RESET_B 0 +struct hclge_reset_tqp_queue { + __le16 tqp_id; + u8 reset_req; + u8 ready_to_reset; + u8 rsv[20]; +}; + +#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ +#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ +#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ + +#define HCLGE_TYPE_CRQ 0 +#define HCLGE_TYPE_CSQ 1 +#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGE_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGE_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701c +#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGE_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGE_NIC_CRQ_HEAD_REG 0x27028 +#define HCLGE_NIC_CMQ_EN_B 16 +#define HCLGE_NIC_CMQ_ENABLE BIT(HCLGE_NIC_CMQ_EN_B) +#define HCLGE_NIC_CMQ_DESC_NUM 1024 +#define HCLGE_NIC_CMQ_DESC_NUM_S 3 + +int hclge_cmd_init(struct hclge_dev *hdev); +static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) +{ + writel(value, base + reg); +} + +#define hclge_write_dev(a, reg, value) \ + hclge_write_reg((a)->io_base, (reg), (value)) +#define hclge_read_dev(a, reg) \ + hclge_read_reg((a)->io_base, (reg)) + +static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + return readl(reg_addr + reg); +} + +#define HCLGE_SEND_SYNC(flag) \ + ((flag) & HCLGE_CMD_FLAG_NO_INTR) + +struct hclge_hw; +int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num); +void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, bool is_read); + +int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, + struct hclge_promisc_param *param); + +enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, + struct hclge_desc *desc); +enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, + struct hclge_desc *desc); + +void hclge_destroy_cmd_queue(struct hclge_hw *hw); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c new file mode 100644 index 000000000000..6fb7648bb2f2 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -0,0 +1,4265 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/etherdevice.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/platform_device.h> + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_mdio.h" +#include "hclge_tm.h" +#include "hnae3.h" + +#define HCLGE_NAME "hclge" +#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) +#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) +#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) +#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) + +static int hclge_rss_init_hw(struct hclge_dev *hdev); +static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, + enum hclge_mta_dmac_sel_type mta_mac_sel, + bool enable); +static int hclge_init_vlan_config(struct hclge_dev *hdev); + +static struct hnae3_ae_algo ae_algo; + +static const struct pci_device_id ae_algo_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, + /* Required last entry */ + {0, } +}; + +static const struct pci_device_id roce_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, + /* Required last entry */ + {0, } +}; + +static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { + "Mac Loopback test", + "Serdes Loopback test", + "Phy Loopback test" +}; + +static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { + {"igu_rx_oversize_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, + {"igu_rx_undersize_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, + {"igu_rx_out_all_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, + {"igu_rx_uni_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, + {"igu_rx_multi_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, + {"igu_rx_broad_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, + {"egu_tx_out_all_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, + {"egu_tx_uni_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, + {"egu_tx_multi_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, + {"egu_tx_broad_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, + {"ssu_ppp_mac_key_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, + {"ssu_ppp_host_key_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, + {"ppp_ssu_mac_rlt_num", + HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, + {"ppp_ssu_host_rlt_num", + HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, + {"ssu_tx_in_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, + {"ssu_tx_out_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, + {"ssu_rx_in_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, + {"ssu_rx_out_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} +}; + +static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { + {"igu_rx_err_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, + {"igu_rx_no_eof_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, + {"igu_rx_no_sof_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, + {"egu_tx_1588_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, + {"ssu_full_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, + {"ssu_part_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, + {"ppp_key_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, + {"ppp_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, + {"ssu_key_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, + {"pkt_curr_buf_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, + {"qcn_fb_rcv_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, + {"qcn_fb_drop_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, + {"qcn_fb_invaild_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, + {"rx_packet_tc0_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, + {"rx_packet_tc1_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, + {"rx_packet_tc2_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, + {"rx_packet_tc3_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, + {"rx_packet_tc4_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, + {"rx_packet_tc5_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, + {"rx_packet_tc6_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, + {"rx_packet_tc7_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, + {"rx_packet_tc0_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, + {"rx_packet_tc1_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, + {"rx_packet_tc2_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, + {"rx_packet_tc3_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, + {"rx_packet_tc4_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, + {"rx_packet_tc5_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, + {"rx_packet_tc6_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, + {"rx_packet_tc7_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, + {"tx_packet_tc0_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, + {"tx_packet_tc1_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, + {"tx_packet_tc2_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, + {"tx_packet_tc3_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, + {"tx_packet_tc4_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, + {"tx_packet_tc5_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, + {"tx_packet_tc6_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, + {"tx_packet_tc7_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, + {"tx_packet_tc0_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, + {"tx_packet_tc1_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, + {"tx_packet_tc2_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, + {"tx_packet_tc3_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, + {"tx_packet_tc4_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, + {"tx_packet_tc5_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, + {"tx_packet_tc6_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, + {"tx_packet_tc7_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, + {"pkt_curr_buf_tc0_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, + {"pkt_curr_buf_tc1_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, + {"pkt_curr_buf_tc2_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, + {"pkt_curr_buf_tc3_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, + {"pkt_curr_buf_tc4_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, + {"pkt_curr_buf_tc5_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, + {"pkt_curr_buf_tc6_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, + {"pkt_curr_buf_tc7_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, + {"mb_uncopy_num", + HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, + {"lo_pri_unicast_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, + {"hi_pri_multicast_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, + {"lo_pri_multicast_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, + {"rx_oq_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, + {"tx_oq_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, + {"nic_l2_err_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, + {"roc_l2_err_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} +}; + +static const struct hclge_comm_stats_str g_mac_stats_string[] = { + {"mac_tx_mac_pause_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, + {"mac_rx_mac_pause_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, + {"mac_tx_pfc_pri0_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, + {"mac_tx_pfc_pri1_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, + {"mac_tx_pfc_pri2_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, + {"mac_tx_pfc_pri3_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, + {"mac_tx_pfc_pri4_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, + {"mac_tx_pfc_pri5_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, + {"mac_tx_pfc_pri6_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, + {"mac_tx_pfc_pri7_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, + {"mac_rx_pfc_pri0_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, + {"mac_rx_pfc_pri1_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, + {"mac_rx_pfc_pri2_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, + {"mac_rx_pfc_pri3_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, + {"mac_rx_pfc_pri4_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, + {"mac_rx_pfc_pri5_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, + {"mac_rx_pfc_pri6_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, + {"mac_rx_pfc_pri7_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, + {"mac_tx_total_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, + {"mac_tx_total_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, + {"mac_tx_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, + {"mac_tx_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, + {"mac_tx_good_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, + {"mac_tx_bad_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, + {"mac_tx_uni_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, + {"mac_tx_multi_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, + {"mac_tx_broad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, + {"mac_tx_undersize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, + {"mac_tx_overrsize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)}, + {"mac_tx_64_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, + {"mac_tx_65_127_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, + {"mac_tx_128_255_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, + {"mac_tx_256_511_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, + {"mac_tx_512_1023_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, + {"mac_tx_1024_1518_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, + {"mac_tx_1519_max_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)}, + {"mac_rx_total_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, + {"mac_rx_total_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, + {"mac_rx_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, + {"mac_rx_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, + {"mac_rx_good_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, + {"mac_rx_bad_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, + {"mac_rx_uni_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, + {"mac_rx_multi_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, + {"mac_rx_broad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, + {"mac_rx_undersize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, + {"mac_rx_overrsize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)}, + {"mac_rx_64_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, + {"mac_rx_65_127_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, + {"mac_rx_128_255_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, + {"mac_rx_256_511_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, + {"mac_rx_512_1023_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, + {"mac_rx_1024_1518_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, + {"mac_rx_1519_max_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)}, + + {"mac_trans_fragment_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)}, + {"mac_trans_undermin_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)}, + {"mac_trans_jabber_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)}, + {"mac_trans_err_all_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)}, + {"mac_trans_from_app_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)}, + {"mac_trans_from_app_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)}, + {"mac_rcv_fragment_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)}, + {"mac_rcv_undermin_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)}, + {"mac_rcv_jabber_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)}, + {"mac_rcv_fcs_err_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)}, + {"mac_rcv_send_app_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)}, + {"mac_rcv_send_app_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)} +}; + +static int hclge_64_bit_update_stats(struct hclge_dev *hdev) +{ +#define HCLGE_64_BIT_CMD_NUM 5 +#define HCLGE_64_BIT_RTN_DATANUM 4 + u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); + struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; + u64 *desc_data; + int i, k, n; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 64 bit pkt stats fail, status = %d.\n", ret); + return ret; + } + + for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { + if (unlikely(i == 0)) { + desc_data = (u64 *)(&desc[i].data[0]); + n = HCLGE_64_BIT_RTN_DATANUM - 1; + } else { + desc_data = (u64 *)(&desc[i]); + n = HCLGE_64_BIT_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *data++ += cpu_to_le64(*desc_data); + desc_data++; + } + } + + return 0; +} + +static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) +{ + stats->pkt_curr_buf_cnt = 0; + stats->pkt_curr_buf_tc0_cnt = 0; + stats->pkt_curr_buf_tc1_cnt = 0; + stats->pkt_curr_buf_tc2_cnt = 0; + stats->pkt_curr_buf_tc3_cnt = 0; + stats->pkt_curr_buf_tc4_cnt = 0; + stats->pkt_curr_buf_tc5_cnt = 0; + stats->pkt_curr_buf_tc6_cnt = 0; + stats->pkt_curr_buf_tc7_cnt = 0; +} + +static int hclge_32_bit_update_stats(struct hclge_dev *hdev) +{ +#define HCLGE_32_BIT_CMD_NUM 8 +#define HCLGE_32_BIT_RTN_DATANUM 8 + + struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; + struct hclge_32_bit_stats *all_32_bit_stats; + u32 *desc_data; + int i, k, n; + u64 *data; + int ret; + + all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; + data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 32 bit pkt stats fail, status = %d.\n", ret); + + return ret; + } + + hclge_reset_partial_32bit_counter(all_32_bit_stats); + for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { + if (unlikely(i == 0)) { + all_32_bit_stats->igu_rx_err_pkt += + cpu_to_le32(desc[i].data[0]); + all_32_bit_stats->igu_rx_no_eof_pkt += + cpu_to_le32(desc[i].data[1] & 0xffff); + all_32_bit_stats->igu_rx_no_sof_pkt += + cpu_to_le32((desc[i].data[1] >> 16) & 0xffff); + + desc_data = (u32 *)(&desc[i].data[2]); + n = HCLGE_32_BIT_RTN_DATANUM - 4; + } else { + desc_data = (u32 *)(&desc[i]); + n = HCLGE_32_BIT_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *data++ += cpu_to_le32(*desc_data); + desc_data++; + } + } + + return 0; +} + +static int hclge_mac_update_stats(struct hclge_dev *hdev) +{ +#define HCLGE_MAC_CMD_NUM 17 +#define HCLGE_RTN_DATA_NUM 4 + + u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); + struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; + u64 *desc_data; + int i, k, n; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get MAC pkt stats fail, status = %d.\n", ret); + + return ret; + } + + for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { + if (unlikely(i == 0)) { + desc_data = (u64 *)(&desc[i].data[0]); + n = HCLGE_RTN_DATA_NUM - 2; + } else { + desc_data = (u64 *)(&desc[i]); + n = HCLGE_RTN_DATA_NUM; + } + for (k = 0; k < n; k++) { + *data++ += cpu_to_le64(*desc_data); + desc_data++; + } + } + + return 0; +} + +static int hclge_tqps_update_stats(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hnae3_queue *queue; + struct hclge_desc desc[1]; + struct hclge_tqp *tqp; + int ret, i; + + for (i = 0; i < kinfo->num_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclge_tqp, q); + /* command : HCLGE_OPC_QUERY_IGU_STAT */ + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_QUERY_RX_STATUS, + true); + + desc[0].data[0] = (tqp->index & 0x1ff); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query tqp stat fail, status = %d,queue = %d\n", + ret, i); + return ret; + } + tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += + cpu_to_le32(desc[0].data[4]); + } + + for (i = 0; i < kinfo->num_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclge_tqp, q); + /* command : HCLGE_OPC_QUERY_IGU_STAT */ + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_QUERY_TX_STATUS, + true); + + desc[0].data[0] = (tqp->index & 0x1ff); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query tqp stat fail, status = %d,queue = %d\n", + ret, i); + return ret; + } + tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += + cpu_to_le32(desc[0].data[4]); + } + + return 0; +} + +static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_tqp *tqp; + u64 *buff = data; + int i; + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); + *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_tx_ring_pktnum_rcd); + } + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); + *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_rx_ring_pktnum_rcd); + } + + return buff; +} + +static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + + return kinfo->num_tqps * (2); +} + +static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + u8 *buff = data; + int i = 0; + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], + struct hclge_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", + tqp->index); + buff = buff + ETH_GSTRING_LEN; + } + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_tqp *tqp = container_of(kinfo->tqp[i], + struct hclge_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", + tqp->index); + buff = buff + ETH_GSTRING_LEN; + } + + return buff; +} + +static u64 *hclge_comm_get_stats(void *comm_stats, + const struct hclge_comm_stats_str strs[], + int size, u64 *data) +{ + u64 *buf = data; + u32 i; + + for (i = 0; i < size; i++) + buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); + + return buf + size; +} + +static u8 *hclge_comm_get_strings(u32 stringset, + const struct hclge_comm_stats_str strs[], + int size, u8 *data) +{ + char *buff = (char *)data; + u32 i; + + if (stringset != ETH_SS_STATS) + return buff; + + for (i = 0; i < size; i++) { + snprintf(buff, ETH_GSTRING_LEN, + strs[i].desc); + buff = buff + ETH_GSTRING_LEN; + } + + return (u8 *)buff; +} + +static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, + struct net_device_stats *net_stats) +{ + net_stats->tx_dropped = 0; + net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; + net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; + net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; + + net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; + net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt; + net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; + net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; + net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; + + net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; + net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; + + net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; + net_stats->rx_length_errors = + hw_stats->mac_stats.mac_rx_undersize_pkt_num; + net_stats->rx_length_errors += + hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + net_stats->rx_over_errors = + hw_stats->mac_stats.mac_rx_overrsize_pkt_num; +} + +static void hclge_update_stats_for_all(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle; + int status; + + handle = &hdev->vport[0].nic; + if (handle->client) { + status = hclge_tqps_update_stats(handle); + if (status) { + dev_err(&hdev->pdev->dev, + "Update TQPS stats fail, status = %d.\n", + status); + } + } + + status = hclge_mac_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update MAC stats fail, status = %d.\n", status); + + status = hclge_32_bit_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update 32 bit stats fail, status = %d.\n", + status); + + hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); +} + +static void hclge_update_stats(struct hnae3_handle *handle, + struct net_device_stats *net_stats) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_hw_stats *hw_stats = &hdev->hw_stats; + int status; + + status = hclge_mac_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update MAC stats fail, status = %d.\n", + status); + + status = hclge_32_bit_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update 32 bit stats fail, status = %d.\n", + status); + + status = hclge_64_bit_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update 64 bit stats fail, status = %d.\n", + status); + + status = hclge_tqps_update_stats(handle); + if (status) + dev_err(&hdev->pdev->dev, + "Update TQPS stats fail, status = %d.\n", + status); + + hclge_update_netstat(hw_stats, net_stats); +} + +static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) +{ +#define HCLGE_LOOPBACK_TEST_FLAGS 0x7 + + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int count = 0; + + /* Loopback test support rules: + * mac: only GE mode support + * serdes: all mac mode will support include GE/XGE/LGE/CGE + * phy: only support when phy device exist on board + */ + if (stringset == ETH_SS_TEST) { + /* clear loopback bit flags at first */ + handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); + if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { + count += 1; + handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; + } else { + count = -EOPNOTSUPP; + } + } else if (stringset == ETH_SS_STATS) { + count = ARRAY_SIZE(g_mac_stats_string) + + ARRAY_SIZE(g_all_32bit_stats_string) + + ARRAY_SIZE(g_all_64bit_stats_string) + + hclge_tqps_get_sset_count(handle, stringset); + } + + return count; +} + +static void hclge_get_strings(struct hnae3_handle *handle, + u32 stringset, + u8 *data) +{ + u8 *p = (char *)data; + int size; + + if (stringset == ETH_SS_STATS) { + size = ARRAY_SIZE(g_mac_stats_string); + p = hclge_comm_get_strings(stringset, + g_mac_stats_string, + size, + p); + size = ARRAY_SIZE(g_all_32bit_stats_string); + p = hclge_comm_get_strings(stringset, + g_all_32bit_stats_string, + size, + p); + size = ARRAY_SIZE(g_all_64bit_stats_string); + p = hclge_comm_get_strings(stringset, + g_all_64bit_stats_string, + size, + p); + p = hclge_tqps_get_strings(handle, p); + } else if (stringset == ETH_SS_TEST) { + if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } +} + +static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u64 *p; + + p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, + g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string), + data); + p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, + g_all_32bit_stats_string, + ARRAY_SIZE(g_all_32bit_stats_string), + p); + p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, + g_all_64bit_stats_string, + ARRAY_SIZE(g_all_64bit_stats_string), + p); + p = hclge_tqps_get_stats(handle, p); +} + +static int hclge_parse_func_status(struct hclge_dev *hdev, + struct hclge_func_status *status) +{ + if (!(status->pf_state & HCLGE_PF_STATE_DONE)) + return -EINVAL; + + /* Set the pf to main pf */ + if (status->pf_state & HCLGE_PF_STATE_MAIN) + hdev->flag |= HCLGE_FLAG_MAIN; + else + hdev->flag &= ~HCLGE_FLAG_MAIN; + + hdev->num_req_vfs = status->vf_num / status->pf_num; + return 0; +} + +static int hclge_query_function_status(struct hclge_dev *hdev) +{ + struct hclge_func_status *req; + struct hclge_desc desc; + int timeout = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); + req = (struct hclge_func_status *)desc.data; + + do { + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query function status failed %d.\n", + ret); + + return ret; + } + + /* Check pf reset is done */ + if (req->pf_state) + break; + usleep_range(1000, 2000); + } while (timeout++ < 5); + + ret = hclge_parse_func_status(hdev, req); + + return ret; +} + +static int hclge_query_pf_resource(struct hclge_dev *hdev) +{ + struct hclge_pf_res *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query pf resource failed %d.\n", ret); + return ret; + } + + req = (struct hclge_pf_res *)desc.data; + hdev->num_tqps = __le16_to_cpu(req->tqp_num); + hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; + + if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) { + hdev->num_roce_msix = + hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + + /* PF should have NIC vectors and Roce vectors, + * NIC vectors are queued before Roce vectors. + */ + hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET; + } else { + hdev->num_msi = + hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + } + + return 0; +} + +static int hclge_parse_speed(int speed_cmd, int *speed) +{ + switch (speed_cmd) { + case 6: + *speed = HCLGE_MAC_SPEED_10M; + break; + case 7: + *speed = HCLGE_MAC_SPEED_100M; + break; + case 0: + *speed = HCLGE_MAC_SPEED_1G; + break; + case 1: + *speed = HCLGE_MAC_SPEED_10G; + break; + case 2: + *speed = HCLGE_MAC_SPEED_25G; + break; + case 3: + *speed = HCLGE_MAC_SPEED_40G; + break; + case 4: + *speed = HCLGE_MAC_SPEED_50G; + break; + case 5: + *speed = HCLGE_MAC_SPEED_100G; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) +{ + struct hclge_cfg_param *req; + u64 mac_addr_tmp_high; + u64 mac_addr_tmp; + int i; + + req = (struct hclge_cfg_param *)desc[0].data; + + /* get the configuration */ + cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_VMDQ_M, + HCLGE_CFG_VMDQ_S); + cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); + cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TQP_DESC_N_M, + HCLGE_CFG_TQP_DESC_N_S); + + cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_PHY_ADDR_M, + HCLGE_CFG_PHY_ADDR_S); + cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_MEDIA_TP_M, + HCLGE_CFG_MEDIA_TP_S); + cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_RX_BUF_LEN_M, + HCLGE_CFG_RX_BUF_LEN_S); + /* get mac_address */ + mac_addr_tmp = __le32_to_cpu(req->param[2]); + mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_MAC_ADDR_H_M, + HCLGE_CFG_MAC_ADDR_H_S); + + mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; + + cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_DEFAULT_SPEED_M, + HCLGE_CFG_DEFAULT_SPEED_S); + for (i = 0; i < ETH_ALEN; i++) + cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; + + req = (struct hclge_cfg_param *)desc[1].data; + cfg->numa_node_map = __le32_to_cpu(req->param[0]); +} + +/* hclge_get_cfg: query the static parameter from flash + * @hdev: pointer to struct hclge_dev + * @hcfg: the config structure to be getted + */ +static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) +{ + struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; + struct hclge_cfg_param *req; + int i, ret; + + for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { + req = (struct hclge_cfg_param *)desc[i].data; + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, + true); + hnae_set_field(req->offset, HCLGE_CFG_OFFSET_M, + HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); + /* Len should be united by 4 bytes when send to hardware */ + hnae_set_field(req->offset, HCLGE_CFG_RD_LEN_M, + HCLGE_CFG_RD_LEN_S, + HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); + req->offset = cpu_to_le32(req->offset); + } + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "get config failed %d.\n", ret); + return ret; + } + + hclge_parse_cfg(hcfg, desc); + return 0; +} + +static int hclge_get_cap(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_query_function_status(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "query function status error %d.\n", ret); + return ret; + } + + /* get pf resource */ + ret = hclge_query_pf_resource(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "query pf resource error %d.\n", ret); + return ret; + } + + return 0; +} + +static int hclge_configure(struct hclge_dev *hdev) +{ + struct hclge_cfg cfg; + int ret, i; + + ret = hclge_get_cfg(hdev, &cfg); + if (ret) { + dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); + return ret; + } + + hdev->num_vmdq_vport = cfg.vmdq_vport_num; + hdev->base_tqp_pid = 0; + hdev->rss_size_max = 1; + hdev->rx_buf_len = cfg.rx_buf_len; + for (i = 0; i < ETH_ALEN; i++) + hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i]; + hdev->hw.mac.media_type = cfg.media_type; + hdev->num_desc = cfg.tqp_desc_num; + hdev->tm_info.num_pg = 1; + hdev->tm_info.num_tc = cfg.tc_num; + hdev->tm_info.hw_pfc_map = 0; + + ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); + if (ret) { + dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); + return ret; + } + + if ((hdev->tm_info.num_tc > HNAE3_MAX_TC) || + (hdev->tm_info.num_tc < 1)) { + dev_warn(&hdev->pdev->dev, "TC num = %d.\n", + hdev->tm_info.num_tc); + hdev->tm_info.num_tc = 1; + } + + /* Currently not support uncontiuous tc */ + for (i = 0; i < cfg.tc_num; i++) + hnae_set_bit(hdev->hw_tc_map, i, 1); + + if (!hdev->num_vmdq_vport && !hdev->num_req_vfs) + hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; + else + hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE; + + return ret; +} + +static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, + int tso_mss_max) +{ + struct hclge_cfg_tso_status *req; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); + + req = (struct hclge_cfg_tso_status *)desc.data; + hnae_set_field(req->tso_mss_min, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_min); + hnae_set_field(req->tso_mss_max, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_max); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_alloc_tqps(struct hclge_dev *hdev) +{ + struct hclge_tqp *tqp; + int i; + + hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, + sizeof(struct hclge_tqp), GFP_KERNEL); + if (!hdev->htqp) + return -ENOMEM; + + tqp = hdev->htqp; + + for (i = 0; i < hdev->num_tqps; i++) { + tqp->dev = &hdev->pdev->dev; + tqp->index = i; + + tqp->q.ae_algo = &ae_algo; + tqp->q.buf_size = hdev->rx_buf_len; + tqp->q.desc_num = hdev->num_desc; + tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + + i * HCLGE_TQP_REG_SIZE; + + tqp++; + } + + return 0; +} + +static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, + u16 tqp_pid, u16 tqp_vid, bool is_pf) +{ + struct hclge_tqp_map *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); + + req = (struct hclge_tqp_map *)desc.data; + req->tqp_id = cpu_to_le16(tqp_pid); + req->tqp_vf = cpu_to_le16(func_id); + req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | + 1 << HCLGE_TQP_MAP_EN_B; + req->tqp_vid = cpu_to_le16(tqp_vid); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_assign_tqp(struct hclge_vport *vport, + struct hnae3_queue **tqp, u16 num_tqps) +{ + struct hclge_dev *hdev = vport->back; + int i, alloced, func_id, ret; + bool is_pf; + + func_id = vport->vport_id; + is_pf = (vport->vport_id == 0) ? true : false; + + for (i = 0, alloced = 0; i < hdev->num_tqps && + alloced < num_tqps; i++) { + if (!hdev->htqp[i].alloced) { + hdev->htqp[i].q.handle = &vport->nic; + hdev->htqp[i].q.tqp_index = alloced; + tqp[alloced] = &hdev->htqp[i].q; + hdev->htqp[i].alloced = true; + ret = hclge_map_tqps_to_func(hdev, func_id, + hdev->htqp[i].index, + alloced, is_pf); + if (ret) + return ret; + + alloced++; + } + } + vport->alloc_tqps = num_tqps; + + return 0; +} + +static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) +{ + struct hnae3_handle *nic = &vport->nic; + struct hnae3_knic_private_info *kinfo = &nic->kinfo; + struct hclge_dev *hdev = vport->back; + int i, ret; + + kinfo->num_desc = hdev->num_desc; + kinfo->rx_buf_len = hdev->rx_buf_len; + kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); + kinfo->rss_size + = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); + kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i)) { + kinfo->tc_info[i].enable = true; + kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; + kinfo->tc_info[i].tqp_count = kinfo->rss_size; + kinfo->tc_info[i].tc = i; + } else { + /* Set to default queue if TC is disable */ + kinfo->tc_info[i].enable = false; + kinfo->tc_info[i].tqp_offset = 0; + kinfo->tc_info[i].tqp_count = 1; + kinfo->tc_info[i].tc = 0; + } + } + + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, + sizeof(struct hnae3_queue *), GFP_KERNEL); + if (!kinfo->tqp) + return -ENOMEM; + + ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); + if (ret) { + dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); + return -EINVAL; + } + + return 0; +} + +static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) +{ + /* this would be initialized later */ +} + +static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_dev *hdev = vport->back; + int ret; + + nic->pdev = hdev->pdev; + nic->ae_algo = &ae_algo; + nic->numa_node_mask = hdev->numa_node_mask; + + if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { + ret = hclge_knic_setup(vport, num_tqps); + if (ret) { + dev_err(&hdev->pdev->dev, "knic setup failed %d\n", + ret); + return ret; + } + } else { + hclge_unic_setup(vport, num_tqps); + } + + return 0; +} + +static int hclge_alloc_vport(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_vport *vport; + u32 tqp_main_vport; + u32 tqp_per_vport; + int num_vport, i; + int ret; + + /* We need to alloc a vport for main NIC of PF */ + num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + + if (hdev->num_tqps < num_vport) + num_vport = hdev->num_tqps; + + /* Alloc the same number of TQPs for every vport */ + tqp_per_vport = hdev->num_tqps / num_vport; + tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; + + vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), + GFP_KERNEL); + if (!vport) + return -ENOMEM; + + hdev->vport = vport; + hdev->num_alloc_vport = num_vport; + +#ifdef CONFIG_PCI_IOV + /* Enable SRIOV */ + if (hdev->num_req_vfs) { + dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n", + hdev->num_req_vfs); + ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs); + if (ret) { + hdev->num_alloc_vfs = 0; + dev_err(&pdev->dev, "SRIOV enable failed %d\n", + ret); + return ret; + } + } + hdev->num_alloc_vfs = hdev->num_req_vfs; +#endif + + for (i = 0; i < num_vport; i++) { + vport->back = hdev; + vport->vport_id = i; + + if (i == 0) + ret = hclge_vport_setup(vport, tqp_main_vport); + else + ret = hclge_vport_setup(vport, tqp_per_vport); + if (ret) { + dev_err(&pdev->dev, + "vport setup failed for vport %d, %d\n", + i, ret); + return ret; + } + + vport++; + } + + return 0; +} + +static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size) +{ +/* TX buffer size is unit by 128 byte */ +#define HCLGE_BUF_SIZE_UNIT_SHIFT 7 +#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) + struct hclge_tx_buff_alloc *req; + struct hclge_desc desc; + int ret; + u8 i; + + req = (struct hclge_tx_buff_alloc *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); + for (i = 0; i < HCLGE_TC_NUM; i++) + req->tx_pkt_buff[i] = + cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | + HCLGE_BUF_SIZE_UPDATE_EN_MSK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, u32 buf_size) +{ + int ret = hclge_cmd_alloc_tx_buff(hdev, buf_size); + + if (ret) { + dev_err(&hdev->pdev->dev, + "tx buffer alloc failed %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_tc_num(struct hclge_dev *hdev) +{ + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + if (hdev->hw_tc_map & BIT(i)) + cnt++; + return cnt; +} + +static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) +{ + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + if (hdev->hw_tc_map & BIT(i) && + hdev->tm_info.hw_pfc_map & BIT(i)) + cnt++; + return cnt; +} + +/* Get the number of pfc enabled TCs, which have private buffer */ +static int hclge_get_pfc_priv_num(struct hclge_dev *hdev) +{ + struct hclge_priv_buf *priv; + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if ((hdev->tm_info.hw_pfc_map & BIT(i)) && + priv->enable) + cnt++; + } + + return cnt; +} + +/* Get the number of pfc disabled TCs, which have private buffer */ +static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev) +{ + struct hclge_priv_buf *priv; + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if (hdev->hw_tc_map & BIT(i) && + !(hdev->tm_info.hw_pfc_map & BIT(i)) && + priv->enable) + cnt++; + } + + return cnt; +} + +static u32 hclge_get_rx_priv_buff_alloced(struct hclge_dev *hdev) +{ + struct hclge_priv_buf *priv; + u32 rx_priv = 0; + int i; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if (priv->enable) + rx_priv += priv->buf_size; + } + return rx_priv; +} + +static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) +{ + u32 shared_buf_min, shared_buf_tc, shared_std; + int tc_num, pfc_enable_num; + u32 shared_buf; + u32 rx_priv; + int i; + + tc_num = hclge_get_tc_num(hdev); + pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); + + shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; + shared_buf_tc = pfc_enable_num * hdev->mps + + (tc_num - pfc_enable_num) * hdev->mps / 2 + + hdev->mps; + shared_std = max_t(u32, shared_buf_min, shared_buf_tc); + + rx_priv = hclge_get_rx_priv_buff_alloced(hdev); + if (rx_all <= rx_priv + shared_std) + return false; + + shared_buf = rx_all - rx_priv; + hdev->s_buf.buf_size = shared_buf; + hdev->s_buf.self.high = shared_buf; + hdev->s_buf.self.low = 2 * hdev->mps; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + if ((hdev->hw_tc_map & BIT(i)) && + (hdev->tm_info.hw_pfc_map & BIT(i))) { + hdev->s_buf.tc_thrd[i].low = hdev->mps; + hdev->s_buf.tc_thrd[i].high = 2 * hdev->mps; + } else { + hdev->s_buf.tc_thrd[i].low = 0; + hdev->s_buf.tc_thrd[i].high = hdev->mps; + } + } + + return true; +} + +/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs + * @hdev: pointer to struct hclge_dev + * @tx_size: the allocated tx buffer for all TCs + * @return: 0: calculate sucessful, negative: fail + */ +int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) +{ + u32 rx_all = hdev->pkt_buf_size - tx_size; + int no_pfc_priv_num, pfc_priv_num; + struct hclge_priv_buf *priv; + int i; + + /* step 1, try to alloc private buffer for all enabled tc */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if (hdev->hw_tc_map & BIT(i)) { + priv->enable = 1; + if (hdev->tm_info.hw_pfc_map & BIT(i)) { + priv->wl.low = hdev->mps; + priv->wl.high = priv->wl.low + hdev->mps; + priv->buf_size = priv->wl.high + + HCLGE_DEFAULT_DV; + } else { + priv->wl.low = 0; + priv->wl.high = 2 * hdev->mps; + priv->buf_size = priv->wl.high; + } + } + } + + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + /* step 2, try to decrease the buffer size of + * no pfc TC's private buffer + */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + + if (hdev->hw_tc_map & BIT(i)) + priv->enable = 1; + + if (hdev->tm_info.hw_pfc_map & BIT(i)) { + priv->wl.low = 128; + priv->wl.high = priv->wl.low + hdev->mps; + priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; + } else { + priv->wl.low = 0; + priv->wl.high = hdev->mps; + priv->buf_size = priv->wl.high; + } + } + + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + /* step 3, try to reduce the number of pfc disabled TCs, + * which have private buffer + */ + /* get the total no pfc enable TC number, which have private buffer */ + no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev); + + /* let the last to be cleared first */ + for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { + priv = &hdev->priv_buf[i]; + + if (hdev->hw_tc_map & BIT(i) && + !(hdev->tm_info.hw_pfc_map & BIT(i))) { + /* Clear the no pfc TC private buffer */ + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + priv->enable = 0; + no_pfc_priv_num--; + } + + if (hclge_is_rx_buf_ok(hdev, rx_all) || + no_pfc_priv_num == 0) + break; + } + + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + /* step 4, try to reduce the number of pfc enabled TCs + * which have private buffer. + */ + pfc_priv_num = hclge_get_pfc_priv_num(hdev); + + /* let the last to be cleared first */ + for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { + priv = &hdev->priv_buf[i]; + + if (hdev->hw_tc_map & BIT(i) && + hdev->tm_info.hw_pfc_map & BIT(i)) { + /* Reduce the number of pfc TC with private buffer */ + priv->wl.low = 0; + priv->enable = 0; + priv->wl.high = 0; + priv->buf_size = 0; + pfc_priv_num--; + } + + if (hclge_is_rx_buf_ok(hdev, rx_all) || + pfc_priv_num == 0) + break; + } + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + return -ENOMEM; +} + +static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) +{ + struct hclge_rx_priv_buff *req; + struct hclge_desc desc; + int ret; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); + req = (struct hclge_rx_priv_buff *)desc.data; + + /* Alloc private buffer TCs */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &hdev->priv_buf[i]; + + req->buf_num[i] = + cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); + req->buf_num[i] |= + cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B); + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "rx private buffer alloc cmd failed %d\n", ret); + return ret; + } + + return 0; +} + +#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) + +static int hclge_rx_priv_wl_config(struct hclge_dev *hdev) +{ + struct hclge_rx_priv_wl_buf *req; + struct hclge_priv_buf *priv; + struct hclge_desc desc[2]; + int i, j; + int ret; + + for (i = 0; i < 2; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, + false); + req = (struct hclge_rx_priv_wl_buf *)desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + priv = &hdev->priv_buf[i * HCLGE_TC_NUM_ONE_DESC + j]; + req->tc_wl[j].high = + cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); + req->tc_wl[j].high |= + cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << + HCLGE_RX_PRIV_EN_B); + req->tc_wl[j].low = + cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); + req->tc_wl[j].low |= + cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << + HCLGE_RX_PRIV_EN_B); + } + } + + /* Send 2 descriptor at one time */ + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "rx private waterline config cmd failed %d\n", + ret); + return ret; + } + return 0; +} + +static int hclge_common_thrd_config(struct hclge_dev *hdev) +{ + struct hclge_shared_buf *s_buf = &hdev->s_buf; + struct hclge_rx_com_thrd *req; + struct hclge_desc desc[2]; + struct hclge_tc_thrd *tc; + int i, j; + int ret; + + for (i = 0; i < 2; i++) { + hclge_cmd_setup_basic_desc(&desc[i], + HCLGE_OPC_RX_COM_THRD_ALLOC, false); + req = (struct hclge_rx_com_thrd *)&desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; + + req->com_thrd[j].high = + cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); + req->com_thrd[j].high |= + cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << + HCLGE_RX_PRIV_EN_B); + req->com_thrd[j].low = + cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); + req->com_thrd[j].low |= + cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << + HCLGE_RX_PRIV_EN_B); + } + } + + /* Send 2 descriptors at one time */ + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "common threshold config cmd failed %d\n", ret); + return ret; + } + return 0; +} + +static int hclge_common_wl_config(struct hclge_dev *hdev) +{ + struct hclge_shared_buf *buf = &hdev->s_buf; + struct hclge_rx_com_wl *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); + + req = (struct hclge_rx_com_wl *)desc.data; + req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); + req->com_wl.high |= + cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << + HCLGE_RX_PRIV_EN_B); + + req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); + req->com_wl.low |= + cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << + HCLGE_RX_PRIV_EN_B); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "common waterline config cmd failed %d\n", ret); + return ret; + } + + return 0; +} + +int hclge_buffer_alloc(struct hclge_dev *hdev) +{ + u32 tx_buf_size = HCLGE_DEFAULT_TX_BUF; + int ret; + + hdev->priv_buf = devm_kmalloc_array(&hdev->pdev->dev, HCLGE_MAX_TC_NUM, + sizeof(struct hclge_priv_buf), + GFP_KERNEL | __GFP_ZERO); + if (!hdev->priv_buf) + return -ENOMEM; + + ret = hclge_tx_buffer_alloc(hdev, tx_buf_size); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not alloc tx buffers %d\n", ret); + return ret; + } + + ret = hclge_rx_buffer_calc(hdev, tx_buf_size); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not calc rx priv buffer size for all TCs %d\n", + ret); + return ret; + } + + ret = hclge_rx_priv_buf_alloc(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", + ret); + return ret; + } + + ret = hclge_rx_priv_wl_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure rx private waterline %d\n", ret); + return ret; + } + + ret = hclge_common_thrd_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure common threshold %d\n", ret); + return ret; + } + + ret = hclge_common_wl_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure common waterline %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_init_roce_base_info(struct hclge_vport *vport) +{ + struct hnae3_handle *roce = &vport->roce; + struct hnae3_handle *nic = &vport->nic; + + roce->rinfo.num_vectors = vport->back->num_roce_msix; + + if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || + vport->back->num_msi_left == 0) + return -EINVAL; + + roce->rinfo.base_vector = vport->back->roce_base_vector; + + roce->rinfo.netdev = nic->kinfo.netdev; + roce->rinfo.roce_io_base = vport->back->hw.io_base; + + roce->pdev = nic->pdev; + roce->ae_algo = nic->ae_algo; + roce->numa_node_mask = nic->numa_node_mask; + + return 0; +} + +static int hclge_init_msix(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret, i; + + hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!hdev->msix_entries) + return -ENOMEM; + + hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(u16), GFP_KERNEL); + if (!hdev->vector_status) + return -ENOMEM; + + for (i = 0; i < hdev->num_msi; i++) { + hdev->msix_entries[i].entry = i; + hdev->vector_status[i] = HCLGE_INVALID_VPORT; + } + + hdev->num_msi_left = hdev->num_msi; + hdev->base_msi_vector = hdev->pdev->irq; + hdev->roce_base_vector = hdev->base_msi_vector + + HCLGE_ROCE_VECTOR_OFFSET; + + ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries, + hdev->num_msi, hdev->num_msi); + if (ret < 0) { + dev_info(&hdev->pdev->dev, + "MSI-X vector alloc failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_init_msi(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int vectors; + int i; + + hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(u16), GFP_KERNEL); + if (!hdev->vector_status) + return -ENOMEM; + + for (i = 0; i < hdev->num_msi; i++) + hdev->vector_status[i] = HCLGE_INVALID_VPORT; + + vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI); + if (vectors < 0) { + dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors); + return -EINVAL; + } + hdev->num_msi = vectors; + hdev->num_msi_left = vectors; + hdev->base_msi_vector = pdev->irq; + hdev->roce_base_vector = hdev->base_msi_vector + + HCLGE_ROCE_VECTOR_OFFSET; + + return 0; +} + +static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) + mac->duplex = (u8)duplex; + else + mac->duplex = HCLGE_MAC_FULL; + + mac->speed = speed; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +{ + struct hclge_config_mac_speed_dup *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_config_mac_speed_dup *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); + + hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); + + switch (speed) { + case HCLGE_MAC_SPEED_10M: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 6); + break; + case HCLGE_MAC_SPEED_100M: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 7); + break; + case HCLGE_MAC_SPEED_1G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 0); + break; + case HCLGE_MAC_SPEED_10G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 1); + break; + case HCLGE_MAC_SPEED_25G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 2); + break; + case HCLGE_MAC_SPEED_40G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 3); + break; + case HCLGE_MAC_SPEED_50G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 4); + break; + case HCLGE_MAC_SPEED_100G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 5); + break; + default: + dev_err(&hdev->pdev->dev, "invald speed (%d)\n", speed); + return -EINVAL; + } + + hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, + 1); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config cmd failed %d.\n", ret); + return ret; + } + + hclge_check_speed_dup(hdev, duplex, speed); + + return 0; +} + +static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, + u8 duplex) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_cfg_mac_speed_dup(hdev, speed, duplex); +} + +static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, + u8 *duplex) +{ + struct hclge_query_an_speed_dup *req; + struct hclge_desc desc; + int speed_tmp; + int ret; + + req = (struct hclge_query_an_speed_dup *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/autoneg/duplex query cmd failed %d\n", + ret); + return ret; + } + + *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); + speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, + HCLGE_QUERY_SPEED_S); + + ret = hclge_parse_speed(speed_tmp, speed); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not parse speed(=%d), %d\n", speed_tmp, ret); + return -EIO; + } + + return 0; +} + +static int hclge_query_autoneg_result(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + struct hclge_query_an_speed_dup *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_query_an_speed_dup *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "autoneg result query cmd failed %d.\n", ret); + return ret; + } + + mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B); + + return 0; +} + +static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) +{ + struct hclge_config_auto_neg *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); + + req = (struct hclge_config_auto_neg *)desc.data; + hnae_set_bit(req->cfg_an_cmd_flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_set_autoneg_en(hdev, enable); +} + +static int hclge_get_autoneg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_query_autoneg_result(hdev); + + return hdev->hw.mac.autoneg; +} + +static int hclge_mac_init(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + int ret; + + ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config mac speed dup fail ret=%d\n", ret); + return ret; + } + + mac->link = 0; + + ret = hclge_mac_mdio_config(hdev); + if (ret) { + dev_warn(&hdev->pdev->dev, + "mdio config fail ret=%d\n", ret); + return ret; + } + + /* Initialize the MTA table work mode */ + hdev->accept_mta_mc = true; + hdev->enable_mta = true; + hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; + + ret = hclge_set_mta_filter_mode(hdev, + hdev->mta_mac_sel_type, + hdev->enable_mta); + if (ret) { + dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", + ret); + return ret; + } + + return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); +} + +static void hclge_task_schedule(struct hclge_dev *hdev) +{ + if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && + !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) + (void)schedule_work(&hdev->service_task); +} + +static int hclge_get_mac_link_status(struct hclge_dev *hdev) +{ + struct hclge_link_status *req; + struct hclge_desc desc; + int link_status; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", + ret); + return ret; + } + + req = (struct hclge_link_status *)desc.data; + link_status = req->status & HCLGE_LINK_STATUS; + + return !!link_status; +} + +static int hclge_get_mac_phy_link(struct hclge_dev *hdev) +{ + int mac_state; + int link_stat; + + mac_state = hclge_get_mac_link_status(hdev); + + if (hdev->hw.mac.phydev) { + if (!genphy_read_status(hdev->hw.mac.phydev)) + link_stat = mac_state & + hdev->hw.mac.phydev->link; + else + link_stat = 0; + + } else { + link_stat = mac_state; + } + + return !!link_stat; +} + +static void hclge_update_link_status(struct hclge_dev *hdev) +{ + struct hnae3_client *client = hdev->nic_client; + struct hnae3_handle *handle; + int state; + int i; + + if (!client) + return; + state = hclge_get_mac_phy_link(hdev); + if (state != hdev->hw.mac.link) { + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + handle = &hdev->vport[i].nic; + client->ops->link_status_change(handle, state); + } + hdev->hw.mac.link = state; + } +} + +static int hclge_update_speed_duplex(struct hclge_dev *hdev) +{ + struct hclge_mac mac = hdev->hw.mac; + u8 duplex; + int speed; + int ret; + + /* get the speed and duplex as autoneg'result from mac cmd when phy + * doesn't exit. + */ + if (mac.phydev) + return 0; + + /* update mac->antoneg. */ + ret = hclge_query_autoneg_result(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "autoneg result query failed %d\n", ret); + return ret; + } + + if (!mac.autoneg) + return 0; + + ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac autoneg/speed/duplex query failed %d\n", ret); + return ret; + } + + if ((mac.speed != speed) || (mac.duplex != duplex)) { + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config failed %d\n", ret); + return ret; + } + } + + return 0; +} + +static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_update_speed_duplex(hdev); +} + +static int hclge_get_status(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_update_link_status(hdev); + + return hdev->hw.mac.link; +} + +static void hclge_service_timer(unsigned long data) +{ + struct hclge_dev *hdev = (struct hclge_dev *)data; + (void)mod_timer(&hdev->service_timer, jiffies + HZ); + + hclge_task_schedule(hdev); +} + +static void hclge_service_complete(struct hclge_dev *hdev) +{ + WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); + + /* Flush memory before next watchdog */ + smp_mb__before_atomic(); + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); +} + +static void hclge_service_task(struct work_struct *work) +{ + struct hclge_dev *hdev = + container_of(work, struct hclge_dev, service_task); + + hclge_update_speed_duplex(hdev); + hclge_update_link_status(hdev); + hclge_update_stats_for_all(hdev); + hclge_service_complete(hdev); +} + +static void hclge_disable_sriov(struct hclge_dev *hdev) +{ + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(hdev->pdev)) { + dev_warn(&hdev->pdev->dev, + "disabling driver while VFs are assigned\n"); + return; + } + + pci_disable_sriov(hdev->pdev); +} + +struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) +{ + /* VF handle has no client */ + if (!handle->client) + return container_of(handle, struct hclge_vport, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclge_vport, roce); + else + return container_of(handle, struct hclge_vport, nic); +} + +static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_vector_info *vector = vector_info; + struct hclge_dev *hdev = vport->back; + int alloc = 0; + int i, j; + + vector_num = min(hdev->num_msi_left, vector_num); + + for (j = 0; j < vector_num; j++) { + for (i = 1; i < hdev->num_msi; i++) { + if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { + vector->vector = pci_irq_vector(hdev->pdev, i); + vector->io_addr = hdev->hw.io_base + + HCLGE_VECTOR_REG_BASE + + (i - 1) * HCLGE_VECTOR_REG_OFFSET + + vport->vport_id * + HCLGE_VECTOR_VF_OFFSET; + hdev->vector_status[i] = vport->vport_id; + + vector++; + alloc++; + + break; + } + } + } + hdev->num_msi_left -= alloc; + hdev->num_msi_used += alloc; + + return alloc; +} + +static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) +{ + int i; + + for (i = 0; i < hdev->num_msi; i++) { + if (hdev->msix_entries) { + if (vector == hdev->msix_entries[i].vector) + return i; + } else { + if (vector == (hdev->base_msi_vector + i)) + return i; + } + } + return -EINVAL; +} + +static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) +{ + return HCLGE_RSS_KEY_SIZE; +} + +static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) +{ + return HCLGE_RSS_IND_TBL_SIZE; +} + +static int hclge_get_rss_algo(struct hclge_dev *hdev) +{ + struct hclge_rss_config *req; + struct hclge_desc desc; + int rss_hash_algo; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get link status error, status =%d\n", ret); + return ret; + } + + req = (struct hclge_rss_config *)desc.data; + rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK); + + if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ) + return ETH_RSS_HASH_TOP; + + return -EINVAL; +} + +static int hclge_set_rss_algo_key(struct hclge_dev *hdev, + const u8 hfunc, const u8 *key) +{ + struct hclge_rss_config *req; + struct hclge_desc desc; + int key_offset; + int key_size; + int ret; + + req = (struct hclge_rss_config *)desc.data; + + for (key_offset = 0; key_offset < 3; key_offset++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, + false); + + req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); + req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); + + if (key_offset == 2) + key_size = + HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; + else + key_size = HCLGE_RSS_HASH_KEY_NUM; + + memcpy(req->hash_key, + key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure RSS config fail, status = %d\n", + ret); + return ret; + } + } + return 0; +} + +static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) +{ + struct hclge_rss_indirection_table *req; + struct hclge_desc desc; + int i, j; + int ret; + + req = (struct hclge_rss_indirection_table *)desc.data; + + for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { + hclge_cmd_setup_basic_desc + (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); + + req->start_table_index = i * HCLGE_RSS_CFG_TBL_SIZE; + req->rss_set_bitmap = HCLGE_RSS_SET_BITMAP_MSK; + + for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) + req->rss_result[j] = + indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure rss indir table fail,status = %d\n", + ret); + return ret; + } + } + return 0; +} + +static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, + u16 *tc_size, u16 *tc_offset) +{ + struct hclge_rss_tc_mode *req; + struct hclge_desc desc; + int ret; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); + req = (struct hclge_rss_tc_mode *)desc.data; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + hnae_set_bit(req->rss_tc_mode[i], HCLGE_RSS_TC_VALID_B, + (tc_valid[i] & 0x1)); + hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_SIZE_M, + HCLGE_RSS_TC_SIZE_S, tc_size[i]); + hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_OFFSET_M, + HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure rss tc mode fail, status = %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) +{ +#define HCLGE_RSS_INPUT_TUPLE_OTHER 0xf +#define HCLGE_RSS_INPUT_TUPLE_SCTP 0x1f + struct hclge_rss_input_tuple *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); + + req = (struct hclge_rss_input_tuple *)desc.data; + req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; + req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; + req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure rss input fail, status = %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, + u8 *key, u8 *hfunc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i; + + /* Get hash algorithm */ + if (hfunc) + *hfunc = hclge_get_rss_algo(hdev); + + /* Get the RSS Key required by the user */ + if (key) + memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); + + /* Get indirect table */ + if (indir) + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) + indir[i] = vport->rss_indirection_tbl[i]; + + return 0; +} + +static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u8 hash_algo; + int ret, i; + + /* Set the RSS Hash Key if specififed by the user */ + if (key) { + /* Update the shadow RSS key with user specified qids */ + memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); + + if (hfunc == ETH_RSS_HASH_TOP || + hfunc == ETH_RSS_HASH_NO_CHANGE) + hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + else + return -EINVAL; + ret = hclge_set_rss_algo_key(hdev, hash_algo, key); + if (ret) + return ret; + } + + /* Update the shadow RSS table with user specified qids */ + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) + vport->rss_indirection_tbl[i] = indir[i]; + + /* Update the hardware */ + ret = hclge_set_rss_indir_table(hdev, indir); + return ret; +} + +static int hclge_get_tc_size(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->rss_size_max; +} + +static int hclge_rss_init_hw(struct hclge_dev *hdev) +{ + const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + struct hclge_vport *vport = hdev->vport; + u16 tc_offset[HCLGE_MAX_TC_NUM]; + u8 rss_key[HCLGE_RSS_KEY_SIZE]; + u16 tc_valid[HCLGE_MAX_TC_NUM]; + u16 tc_size[HCLGE_MAX_TC_NUM]; + u32 *rss_indir = NULL; + const u8 *key; + int i, ret, j; + + rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); + if (!rss_indir) + return -ENOMEM; + + /* Get default RSS key */ + netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE); + + /* Initialize RSS indirect table for each vport */ + for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { + vport[j].rss_indirection_tbl[i] = + i % hdev->rss_size_max; + rss_indir[i] = vport[j].rss_indirection_tbl[i]; + } + } + ret = hclge_set_rss_indir_table(hdev, rss_indir); + if (ret) + goto err; + + key = rss_key; + ret = hclge_set_rss_algo_key(hdev, hfunc, key); + if (ret) + goto err; + + ret = hclge_set_rss_input_tuple(hdev); + if (ret) + goto err; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + if (hdev->hw_tc_map & BIT(i)) + tc_valid[i] = 1; + else + tc_valid[i] = 0; + + switch (hdev->rss_size_max) { + case HCLGE_RSS_TC_SIZE_0: + tc_size[i] = 0; + break; + case HCLGE_RSS_TC_SIZE_1: + tc_size[i] = 1; + break; + case HCLGE_RSS_TC_SIZE_2: + tc_size[i] = 2; + break; + case HCLGE_RSS_TC_SIZE_3: + tc_size[i] = 3; + break; + case HCLGE_RSS_TC_SIZE_4: + tc_size[i] = 4; + break; + case HCLGE_RSS_TC_SIZE_5: + tc_size[i] = 5; + break; + case HCLGE_RSS_TC_SIZE_6: + tc_size[i] = 6; + break; + case HCLGE_RSS_TC_SIZE_7: + tc_size[i] = 7; + break; + default: + break; + } + tc_offset[i] = hdev->rss_size_max * i; + } + ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + +err: + kfree(rss_indir); + + return ret; +} + +int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_ctrl_vector_chain *req; + struct hnae3_ring_chain_node *node; + struct hclge_desc desc; + int ret; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false); + + req = (struct hclge_ctrl_vector_chain *)desc.data; + req->int_vector_id = vector_id; + + i = 0; + for (node = ring_chain; node; node = node->next) { + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + + if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { + req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", + ret); + return ret; + } + i = 0; + + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_ADD_RING_TO_VECTOR, + false); + req->int_vector_id = vector_id; + } + } + + if (i > 0) { + req->int_cause_num = i; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", ret); + return ret; + } + } + + return 0; +} + +int hclge_map_handle_ring_to_vector(struct hnae3_handle *handle, + int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int vector_id; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&hdev->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain); +} + +static int hclge_unmap_ring_from_vector( + struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_ctrl_vector_chain *req; + struct hnae3_ring_chain_node *node; + struct hclge_desc desc; + int i, vector_id; + int ret; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false); + + req = (struct hclge_ctrl_vector_chain *)desc.data; + req->int_vector_id = vector_id; + + i = 0; + for (node = ring_chain; node; node = node->next) { + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + + req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + + if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { + req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Unmap TQP fail, status is %d.\n", + ret); + return ret; + } + i = 0; + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_ADD_RING_TO_VECTOR, + false); + req->int_vector_id = vector_id; + } + } + + if (i > 0) { + req->int_cause_num = i; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Unmap TQP fail, status is %d.\n", ret); + return ret; + } + } + + return 0; +} + +int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, + struct hclge_promisc_param *param) +{ + struct hclge_promisc_cfg *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); + + req = (struct hclge_promisc_cfg *)desc.data; + req->vf_id = param->vf_id; + req->flag = (param->enable << HCLGE_PROMISC_EN_B); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set promisc mode fail, status is %d.\n", ret); + return ret; + } + return 0; +} + +void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, + bool en_mc, bool en_bc, int vport_id) +{ + if (!param) + return; + + memset(param, 0, sizeof(struct hclge_promisc_param)); + if (en_uc) + param->enable = HCLGE_PROMISC_EN_UC; + if (en_mc) + param->enable |= HCLGE_PROMISC_EN_MC; + if (en_bc) + param->enable |= HCLGE_PROMISC_EN_BC; + param->vf_id = vport_id; +} + +static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_promisc_param param; + + hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); + hclge_cmd_set_promisc_mode(hdev, ¶m); +} + +static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) +{ + struct hclge_desc desc; + struct hclge_config_mac_mode *req = + (struct hclge_config_mac_mode *)desc.data; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_TX_EN_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_EN_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_TX_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_RX_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_TX_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_RX_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_APP_LP_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_LINE_LP_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_FCS_TX_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_FCS_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_RX_FCS_STRIP_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "mac enable fail, ret =%d.\n", ret); +} + +static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, + int stream_id, bool enable) +{ + struct hclge_desc desc; + struct hclge_cfg_com_tqp_queue *req = + (struct hclge_cfg_com_tqp_queue *)desc.data; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); + req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); + req->stream_id = cpu_to_le16(stream_id); + req->enable |= enable << HCLGE_TQP_ENABLE_B; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Tqp enable fail, status =%d.\n", ret); + return ret; +} + +static void hclge_reset_tqp_stats(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_queue *queue; + struct hclge_tqp *tqp; + int i; + + for (i = 0; i < vport->alloc_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclge_tqp, q); + memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); + } +} + +static int hclge_ae_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i, queue_id, ret; + + for (i = 0; i < vport->alloc_tqps; i++) { + /* todo clear interrupt */ + /* ring enable */ + queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); + if (queue_id < 0) { + dev_warn(&hdev->pdev->dev, + "Get invalid queue id, ignore it\n"); + continue; + } + + hclge_tqp_enable(hdev, queue_id, 0, true); + } + /* mac enable */ + hclge_cfg_mac_mode(hdev, true); + clear_bit(HCLGE_STATE_DOWN, &hdev->state); + (void)mod_timer(&hdev->service_timer, jiffies + HZ); + + ret = hclge_mac_start_phy(hdev); + if (ret) + return ret; + + /* reset tqp stats */ + hclge_reset_tqp_stats(handle); + + return 0; +} + +static void hclge_ae_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i, queue_id; + + for (i = 0; i < vport->alloc_tqps; i++) { + /* Ring disable */ + queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); + if (queue_id < 0) { + dev_warn(&hdev->pdev->dev, + "Get invalid queue id, ignore it\n"); + continue; + } + + hclge_tqp_enable(hdev, queue_id, 0, false); + } + /* Mac disable */ + hclge_cfg_mac_mode(hdev, false); + + hclge_mac_stop_phy(hdev); + + /* reset tqp stats */ + hclge_reset_tqp_stats(handle); +} + +static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, + u16 cmdq_resp, u8 resp_code, + enum hclge_mac_vlan_tbl_opcode op) +{ + struct hclge_dev *hdev = vport->back; + int return_status = -EIO; + + if (cmdq_resp) { + dev_err(&hdev->pdev->dev, + "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", + cmdq_resp); + return -EIO; + } + + if (op == HCLGE_MAC_VLAN_ADD) { + if ((!resp_code) || (resp_code == 1)) { + return_status = 0; + } else if (resp_code == 2) { + return_status = -EIO; + dev_err(&hdev->pdev->dev, + "add mac addr failed for uc_overflow.\n"); + } else if (resp_code == 3) { + return_status = -EIO; + dev_err(&hdev->pdev->dev, + "add mac addr failed for mc_overflow.\n"); + } else { + dev_err(&hdev->pdev->dev, + "add mac addr failed for undefined, code=%d.\n", + resp_code); + } + } else if (op == HCLGE_MAC_VLAN_REMOVE) { + if (!resp_code) { + return_status = 0; + } else if (resp_code == 1) { + return_status = -EIO; + dev_dbg(&hdev->pdev->dev, + "remove mac addr failed for miss.\n"); + } else { + dev_err(&hdev->pdev->dev, + "remove mac addr failed for undefined, code=%d.\n", + resp_code); + } + } else if (op == HCLGE_MAC_VLAN_LKUP) { + if (!resp_code) { + return_status = 0; + } else if (resp_code == 1) { + return_status = -EIO; + dev_dbg(&hdev->pdev->dev, + "lookup mac addr failed for miss.\n"); + } else { + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for undefined, code=%d.\n", + resp_code); + } + } else { + return_status = -EIO; + dev_err(&hdev->pdev->dev, + "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", + op); + } + + return return_status; +} + +static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) +{ + int word_num; + int bit_num; + + if (vfid > 255 || vfid < 0) + return -EIO; + + if (vfid >= 0 && vfid <= 191) { + word_num = vfid / 32; + bit_num = vfid % 32; + if (clr) + desc[1].data[word_num] &= ~(1 << bit_num); + else + desc[1].data[word_num] |= (1 << bit_num); + } else { + word_num = (vfid - 192) / 32; + bit_num = vfid % 32; + if (clr) + desc[2].data[word_num] &= ~(1 << bit_num); + else + desc[2].data[word_num] |= (1 << bit_num); + } + + return 0; +} + +static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) +{ +#define HCLGE_DESC_NUMBER 3 +#define HCLGE_FUNC_NUMBER_PER_DESC 6 + int i, j; + + for (i = 0; i < HCLGE_DESC_NUMBER; i++) + for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) + if (desc[i].data[j]) + return false; + + return true; +} + +static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req, + const u8 *addr) +{ + const unsigned char *mac_addr = addr; + u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | + (mac_addr[0]) | (mac_addr[1] << 8); + u32 low_val = mac_addr[4] | (mac_addr[5] << 8); + + new_req->mac_addr_hi32 = cpu_to_le32(high_val); + new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); +} + +u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, + const u8 *addr) +{ + u16 high_val = addr[1] | (addr[0] << 8); + struct hclge_dev *hdev = vport->back; + u32 rsh = 4 - hdev->mta_mac_sel_type; + u16 ret_val = (high_val >> rsh) & 0xfff; + + return ret_val; +} + +static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, + enum hclge_mta_dmac_sel_type mta_mac_sel, + bool enable) +{ + struct hclge_mta_filter_mode *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_mta_filter_mode *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); + + hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, + enable); + hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, + HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config mat filter mode failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, + u8 func_id, + bool enable) +{ + struct hclge_cfg_func_mta_filter *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_cfg_func_mta_filter *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); + + hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, + enable); + req->function_id = func_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config func_id enable failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_set_mta_table_item(struct hclge_vport *vport, + u16 idx, + bool enable) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_cfg_func_mta_item *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_cfg_func_mta_item *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); + hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); + + hnae_set_field(req->item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, + HCLGE_CFG_MTA_ITEM_IDX_S, idx); + req->item_idx = cpu_to_le16(req->item_idx); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config mta table item failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry *req) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u8 resp_code; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); + + memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "del mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + resp_code = (desc.data[0] >> 8) & 0xff; + + return hclge_get_mac_vlan_cmd_status(vport, desc.retval, resp_code, + HCLGE_MAC_VLAN_REMOVE); +} + +static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry *req, + struct hclge_desc *desc, + bool is_mc) +{ + struct hclge_dev *hdev = vport->back; + u8 resp_code; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); + if (is_mc) { + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + memcpy(desc[0].data, + req, + sizeof(struct hclge_mac_vlan_tbl_entry)); + hclge_cmd_setup_basic_desc(&desc[1], + HCLGE_OPC_MAC_VLAN_ADD, + true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], + HCLGE_OPC_MAC_VLAN_ADD, + true); + ret = hclge_cmd_send(&hdev->hw, desc, 3); + } else { + memcpy(desc[0].data, + req, + sizeof(struct hclge_mac_vlan_tbl_entry)); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + } + if (ret) { + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + resp_code = (desc[0].data[0] >> 8) & 0xff; + + return hclge_get_mac_vlan_cmd_status(vport, desc[0].retval, resp_code, + HCLGE_MAC_VLAN_LKUP); +} + +static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry *req, + struct hclge_desc *mc_desc) +{ + struct hclge_dev *hdev = vport->back; + int cfg_status; + u8 resp_code; + int ret; + + if (!mc_desc) { + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_MAC_VLAN_ADD, + false); + memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry)); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + resp_code = (desc.data[0] >> 8) & 0xff; + cfg_status = hclge_get_mac_vlan_cmd_status(vport, desc.retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } else { + mc_desc[0].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + mc_desc[1].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); + memcpy(mc_desc[0].data, req, + sizeof(struct hclge_mac_vlan_tbl_entry)); + ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); + resp_code = (mc_desc[0].data[0] >> 8) & 0xff; + cfg_status = hclge_get_mac_vlan_cmd_status(vport, + mc_desc[0].retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } + + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return cfg_status; +} + +static int hclge_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_add_uc_addr_common(vport, addr); +} + +int hclge_add_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + enum hclge_cmd_status status; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + dev_err(&hdev->pdev->dev, + "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", + addr, + is_zero_ether_addr(addr), + is_broadcast_ether_addr(addr), + is_multicast_ether_addr(addr)); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); + hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.egress_port, + HCLGE_MAC_EPORT_SW_EN_B, 0); + hnae_set_bit(req.egress_port, + HCLGE_MAC_EPORT_TYPE_B, 0); + hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_VFID_M, + HCLGE_MAC_EPORT_VFID_S, vport->vport_id); + hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_PFID_M, + HCLGE_MAC_EPORT_PFID_S, 0); + req.egress_port = cpu_to_le16(req.egress_port); + + hclge_prepare_mac_addr(&req, addr); + + status = hclge_add_mac_vlan_tbl(vport, &req, NULL); + + return status; +} + +static int hclge_rm_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_rm_uc_addr_common(vport, addr); +} + +int hclge_rm_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + enum hclge_cmd_status status; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + dev_dbg(&hdev->pdev->dev, + "Remove mac err! invalid mac:%pM.\n", + addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr); + status = hclge_remove_mac_vlan_tbl(vport, &req); + + return status; +} + +static int hclge_add_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_add_mc_addr_common(vport, addr); +} + +int hclge_add_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + struct hclge_desc desc[3]; + u16 tbl_idx; + int status; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + dev_err(&hdev->pdev->dev, + "Add mc mac err! invalid mac:%pM.\n", + addr); + return -EINVAL; + } + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr); + status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); + if (!status) { + /* This mac addr exist, update VFID for it */ + hclge_update_desc_vfid(desc, vport->vport_id, false); + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } else { + /* This mac addr do not exist, add new entry for it */ + memset(desc[0].data, 0, sizeof(desc[0].data)); + memset(desc[1].data, 0, sizeof(desc[0].data)); + memset(desc[2].data, 0, sizeof(desc[0].data)); + hclge_update_desc_vfid(desc, vport->vport_id, false); + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } + + /* Set MTA table for this MAC address */ + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); + status = hclge_set_mta_table_item(vport, tbl_idx, true); + + return status; +} + +static int hclge_rm_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_rm_mc_addr_common(vport, addr); +} + +int hclge_rm_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + enum hclge_cmd_status status; + struct hclge_desc desc[3]; + u16 tbl_idx; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + dev_dbg(&hdev->pdev->dev, + "Remove mc mac err! invalid mac:%pM.\n", + addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr); + status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); + if (!status) { + /* This mac addr exist, remove this handle's VFID for it */ + hclge_update_desc_vfid(desc, vport->vport_id, true); + + if (hclge_is_all_function_id_zero(desc)) + /* All the vfid is zero, so need to delete this entry */ + status = hclge_remove_mac_vlan_tbl(vport, &req); + else + /* Not all the vfid is zero, update the vfid */ + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + + } else { + /* This mac addr do not exist, can't delete it */ + dev_err(&hdev->pdev->dev, + "Rm mutilcast mac addr failed, ret = %d.\n", + status); + return -EIO; + } + + /* Set MTB table for this MAC address */ + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); + status = hclge_set_mta_table_item(vport, tbl_idx, false); + + return status; +} + +static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + ether_addr_copy(p, hdev->hw.mac.mac_addr); +} + +static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) +{ + const unsigned char *new_addr = (const unsigned char *)p; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + /* mac addr check */ + if (is_zero_ether_addr(new_addr) || + is_broadcast_ether_addr(new_addr) || + is_multicast_ether_addr(new_addr)) { + dev_err(&hdev->pdev->dev, + "Change uc mac err! invalid mac:%p.\n", + new_addr); + return -EINVAL; + } + + hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); + + if (!hclge_add_uc_addr(handle, new_addr)) { + ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + return 0; + } + + return -EIO; +} + +static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, + bool filter_en) +{ + struct hclge_vlan_filter_ctrl *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); + + req = (struct hclge_vlan_filter_ctrl *)desc.data; + req->vlan_type = vlan_type; + req->vlan_fe = filter_en; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, + bool is_kill, u16 vlan, u8 qos, __be16 proto) +{ +#define HCLGE_MAX_VF_BYTES 16 + struct hclge_vlan_filter_vf_cfg *req0; + struct hclge_vlan_filter_vf_cfg *req1; + struct hclge_desc desc[2]; + u8 vf_byte_val; + u8 vf_byte_off; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + hclge_cmd_setup_basic_desc(&desc[1], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + vf_byte_off = vfid / 8; + vf_byte_val = 1 << (vfid % 8); + + req0 = (struct hclge_vlan_filter_vf_cfg *)desc[0].data; + req1 = (struct hclge_vlan_filter_vf_cfg *)desc[1].data; + + req0->vlan_id = vlan; + req0->vlan_cfg = is_kill; + + if (vf_byte_off < HCLGE_MAX_VF_BYTES) + req0->vf_bitmap[vf_byte_off] = vf_byte_val; + else + req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; + + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "Send vf vlan command fail, ret =%d.\n", + ret); + return ret; + } + + if (!is_kill) { + if (!req0->resp_code || req0->resp_code == 1) + return 0; + + dev_err(&hdev->pdev->dev, + "Add vf vlan filter fail, ret =%d.\n", + req0->resp_code); + } else { + if (!req0->resp_code) + return 0; + + dev_err(&hdev->pdev->dev, + "Kill vf vlan filter fail, ret =%d.\n", + req0->resp_code); + } + + return -EIO; +} + +static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, + __be16 proto, u16 vlan_id, + bool is_kill) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_filter_pf_cfg *req; + struct hclge_desc desc; + u8 vlan_offset_byte_val; + u8 vlan_offset_byte; + u8 vlan_offset_160; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); + + vlan_offset_160 = vlan_id / 160; + vlan_offset_byte = (vlan_id % 160) / 8; + vlan_offset_byte_val = 1 << (vlan_id % 8); + + req = (struct hclge_vlan_filter_pf_cfg *)desc.data; + req->vlan_offset = vlan_offset_160; + req->vlan_cfg = is_kill; + req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "port vlan command, send fail, ret =%d.\n", + ret); + return ret; + } + + ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set pf vlan filter config fail, ret =%d.\n", + ret); + return -EIO; + } + + return 0; +} + +static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto); +} + +static int hclge_init_vlan_config(struct hclge_dev *hdev) +{ +#define HCLGE_VLAN_TYPE_VF_TABLE 0 +#define HCLGE_VLAN_TYPE_PORT_TABLE 1 + int ret; + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE, + true); + if (ret) + return ret; + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE, + true); + + return ret; +} + +static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_config_max_frm_size *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU)) + return -EINVAL; + + hdev->mps = new_mtu; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); + + req = (struct hclge_config_max_frm_size *)desc.data; + req->max_frm_size = cpu_to_le16(new_mtu); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); + return ret; + } + + return 0; +} + +static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, + bool enable) +{ + struct hclge_reset_tqp_queue *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); + + req = (struct hclge_reset_tqp_queue *)desc.data; + req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); + hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Send tqp reset cmd error, status =%d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) +{ + struct hclge_reset_tqp_queue *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); + + req = (struct hclge_reset_tqp_queue *)desc.data; + req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get reset status error, status =%d\n", ret); + return ret; + } + + return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); +} + +static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int reset_try_times = 0; + int reset_status; + int ret; + + ret = hclge_tqp_enable(hdev, queue_id, 0, false); + if (ret) { + dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); + return; + } + + ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true); + if (ret) { + dev_warn(&hdev->pdev->dev, + "Send reset tqp cmd fail, ret = %d\n", ret); + return; + } + + reset_try_times = 0; + while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { + /* Wait for tqp hw reset */ + msleep(20); + reset_status = hclge_get_reset_status(hdev, queue_id); + if (reset_status) + break; + } + + if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { + dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); + return; + } + + ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false); + if (ret) { + dev_warn(&hdev->pdev->dev, + "Deassert the soft reset fail, ret = %d\n", ret); + return; + } +} + +static u32 hclge_get_fw_version(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->fw_version; +} + +static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, + u32 *rx_en, u32 *tx_en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + *auto_neg = hclge_get_autoneg(handle); + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { + *rx_en = 0; + *tx_en = 0; + return; + } + + if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { + *rx_en = 1; + *tx_en = 0; + } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { + *tx_en = 1; + *rx_en = 0; + } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { + *rx_en = 1; + *tx_en = 1; + } else { + *rx_en = 0; + *tx_en = 0; + } +} + +static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, u8 *duplex) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (speed) + *speed = hdev->hw.mac.speed; + if (duplex) + *duplex = hdev->hw.mac.duplex; + if (auto_neg) + *auto_neg = hdev->hw.mac.autoneg; +} + +static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (media_type) + *media_type = hdev->hw.mac.media_type; +} + +static void hclge_get_mdix_mode(struct hnae3_handle *handle, + u8 *tp_mdix_ctrl, u8 *tp_mdix) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + int mdix_ctrl, mdix, retval, is_resolved; + + if (!phydev) { + *tp_mdix_ctrl = ETH_TP_MDI_INVALID; + *tp_mdix = ETH_TP_MDI_INVALID; + return; + } + + phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); + + retval = phy_read(phydev, HCLGE_PHY_CSC_REG); + mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, + HCLGE_PHY_MDIX_CTRL_S); + + retval = phy_read(phydev, HCLGE_PHY_CSS_REG); + mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); + is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); + + phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); + + switch (mdix_ctrl) { + case 0x0: + *tp_mdix_ctrl = ETH_TP_MDI; + break; + case 0x1: + *tp_mdix_ctrl = ETH_TP_MDI_X; + break; + case 0x3: + *tp_mdix_ctrl = ETH_TP_MDI_AUTO; + break; + default: + *tp_mdix_ctrl = ETH_TP_MDI_INVALID; + break; + } + + if (!is_resolved) + *tp_mdix = ETH_TP_MDI_INVALID; + else if (mdix) + *tp_mdix = ETH_TP_MDI_X; + else + *tp_mdix = ETH_TP_MDI; +} + +static int hclge_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport; + int i, ret; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + vport = &hdev->vport[i]; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + + hdev->nic_client = client; + vport->nic.client = client; + ret = client->ops->init_instance(&vport->nic); + if (ret) + goto err; + + if (hdev->roce_client && + hnae_get_bit(hdev->ae_dev->flag, + HNAE_DEV_SUPPORT_ROCE_B)) { + struct hnae3_client *rc = hdev->roce_client; + + ret = hclge_init_roce_base_info(vport); + if (ret) + goto err; + + ret = rc->ops->init_instance(&vport->roce); + if (ret) + goto err; + } + + break; + case HNAE3_CLIENT_UNIC: + hdev->nic_client = client; + vport->nic.client = client; + + ret = client->ops->init_instance(&vport->nic); + if (ret) + goto err; + + break; + case HNAE3_CLIENT_ROCE: + if (hnae_get_bit(hdev->ae_dev->flag, + HNAE_DEV_SUPPORT_ROCE_B)) { + hdev->roce_client = client; + vport->roce.client = client; + } + + if (hdev->roce_client) { + ret = hclge_init_roce_base_info(vport); + if (ret) + goto err; + + ret = client->ops->init_instance(&vport->roce); + if (ret) + goto err; + } + } + } + + return 0; +err: + return ret; +} + +static void hclge_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + vport = &hdev->vport[i]; + if (hdev->roce_client) + hdev->roce_client->ops->uninit_instance(&vport->roce, + 0); + if (client->type == HNAE3_CLIENT_ROCE) + return; + if (client->ops->uninit_instance) + client->ops->uninit_instance(&vport->nic, 0); + } +} + +static int hclge_pci_init(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_hw *hw; + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "failed to enable PCI device\n"); + goto err_no_drvdata; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, + "can't set consistent PCI DMA"); + goto err_disable_device; + } + dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); + } + + ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); + goto err_disable_device; + } + + pci_set_master(pdev); + hw = &hdev->hw; + hw->back = hdev; + hw->io_base = pcim_iomap(pdev, 2, 0); + if (!hw->io_base) { + dev_err(&pdev->dev, "Can't map configuration register space\n"); + ret = -ENOMEM; + goto err_clr_master; + } + + return 0; +err_clr_master: + pci_clear_master(pdev); + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_no_drvdata: + pci_set_drvdata(pdev, NULL); + + return ret; +} + +static void hclge_pci_uninit(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + if (hdev->flag & HCLGE_FLAG_USE_MSIX) { + pci_disable_msix(pdev); + devm_kfree(&pdev->dev, hdev->msix_entries); + hdev->msix_entries = NULL; + } else { + pci_disable_msi(pdev); + } + + pci_clear_master(pdev); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + const struct pci_device_id *id; + struct hclge_dev *hdev; + int ret; + + hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) { + ret = -ENOMEM; + goto err_hclge_dev; + } + + hdev->flag |= HCLGE_FLAG_USE_MSIX; + hdev->pdev = pdev; + hdev->ae_dev = ae_dev; + ae_dev->priv = hdev; + + id = pci_match_id(roce_pci_tbl, ae_dev->pdev); + if (id) + hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1); + + ret = hclge_pci_init(hdev); + if (ret) { + dev_err(&pdev->dev, "PCI init failed\n"); + goto err_pci_init; + } + + /* Command queue initialize */ + ret = hclge_cmd_init(hdev); + if (ret) + goto err_cmd_init; + + ret = hclge_get_cap(hdev); + if (ret) { + dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", + ret); + return ret; + } + + ret = hclge_configure(hdev); + if (ret) { + dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); + return ret; + } + + if (hdev->flag & HCLGE_FLAG_USE_MSIX) + ret = hclge_init_msix(hdev); + else + ret = hclge_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_alloc_tqps(hdev); + if (ret) { + dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_alloc_vport(hdev); + if (ret) { + dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_mac_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); + return ret; + } + ret = hclge_buffer_alloc(hdev); + if (ret) { + dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); + if (ret) { + dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_rss_init_hw(hdev); + if (ret) { + dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_init_vlan_config(hdev); + if (ret) { + dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_tm_schd_init(hdev); + if (ret) { + dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); + return ret; + } + + setup_timer(&hdev->service_timer, hclge_service_timer, + (unsigned long)hdev); + INIT_WORK(&hdev->service_task, hclge_service_task); + + set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); + return 0; + +err_cmd_init: + pci_release_regions(pdev); +err_pci_init: + pci_set_drvdata(pdev, NULL); +err_hclge_dev: + return ret; +} + +static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_mac *mac = &hdev->hw.mac; + + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + if (IS_ENABLED(CONFIG_PCI_IOV)) + hclge_disable_sriov(hdev); + + if (hdev->service_timer.data) + del_timer_sync(&hdev->service_timer); + if (hdev->service_task.func) + cancel_work_sync(&hdev->service_task); + + if (mac->phydev) + mdiobus_unregister(mac->mdio_bus); + + hclge_destroy_cmd_queue(&hdev->hw); + hclge_pci_uninit(hdev); + ae_dev->priv = NULL; +} + +static const struct hnae3_ae_ops hclge_ops = { + .init_ae_dev = hclge_init_ae_dev, + .uninit_ae_dev = hclge_uninit_ae_dev, + .init_client_instance = hclge_init_client_instance, + .uninit_client_instance = hclge_uninit_client_instance, + .map_ring_to_vector = hclge_map_handle_ring_to_vector, + .unmap_ring_from_vector = hclge_unmap_ring_from_vector, + .get_vector = hclge_get_vector, + .set_promisc_mode = hclge_set_promisc_mode, + .start = hclge_ae_start, + .stop = hclge_ae_stop, + .get_status = hclge_get_status, + .get_ksettings_an_result = hclge_get_ksettings_an_result, + .update_speed_duplex_h = hclge_update_speed_duplex_h, + .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, + .get_media_type = hclge_get_media_type, + .get_rss_key_size = hclge_get_rss_key_size, + .get_rss_indir_size = hclge_get_rss_indir_size, + .get_rss = hclge_get_rss, + .set_rss = hclge_set_rss, + .get_tc_size = hclge_get_tc_size, + .get_mac_addr = hclge_get_mac_addr, + .set_mac_addr = hclge_set_mac_addr, + .add_uc_addr = hclge_add_uc_addr, + .rm_uc_addr = hclge_rm_uc_addr, + .add_mc_addr = hclge_add_mc_addr, + .rm_mc_addr = hclge_rm_mc_addr, + .set_autoneg = hclge_set_autoneg, + .get_autoneg = hclge_get_autoneg, + .get_pauseparam = hclge_get_pauseparam, + .set_mtu = hclge_set_mtu, + .reset_queue = hclge_reset_tqp, + .get_stats = hclge_get_stats, + .update_stats = hclge_update_stats, + .get_strings = hclge_get_strings, + .get_sset_count = hclge_get_sset_count, + .get_fw_version = hclge_get_fw_version, + .get_mdix_mode = hclge_get_mdix_mode, + .set_vlan_filter = hclge_set_port_vlan_filter, + .set_vf_vlan_filter = hclge_set_vf_vlan_filter, +}; + +static struct hnae3_ae_algo ae_algo = { + .ops = &hclge_ops, + .name = HCLGE_NAME, + .pdev_id_table = ae_algo_pci_tbl, +}; + +static int hclge_init(void) +{ + pr_info("%s is initializing\n", HCLGE_NAME); + + return hnae3_register_ae_algo(&ae_algo); +} + +static void hclge_exit(void) +{ + hnae3_unregister_ae_algo(&ae_algo); +} +module_init(hclge_init); +module_exit(hclge_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("HCLGE Driver"); +MODULE_VERSION(HCLGE_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h new file mode 100644 index 000000000000..edb10ad075eb --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -0,0 +1,519 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_MAIN_H +#define __HCLGE_MAIN_H +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/phy.h> +#include "hclge_cmd.h" +#include "hnae3.h" + +#define HCLGE_MOD_VERSION "v1.0" +#define HCLGE_DRIVER_NAME "hclge" + +#define HCLGE_INVALID_VPORT 0xffff + +#define HCLGE_ROCE_VECTOR_OFFSET 96 + +#define HCLGE_PF_CFG_BLOCK_SIZE 32 +#define HCLGE_PF_CFG_DESC_NUM \ + (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) + +#define HCLGE_VECTOR_REG_BASE 0x20000 + +#define HCLGE_VECTOR_REG_OFFSET 0x4 +#define HCLGE_VECTOR_VF_OFFSET 0x100000 + +#define HCLGE_RSS_IND_TBL_SIZE 512 +#define HCLGE_RSS_SET_BITMAP_MSK 0xffff +#define HCLGE_RSS_KEY_SIZE 40 +#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 +#define HCLGE_RSS_HASH_ALGO_SIMPLE 1 +#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 +#define HCLGE_RSS_HASH_ALGO_MASK 0xf +#define HCLGE_RSS_CFG_TBL_NUM \ + (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) + +#define HCLGE_RSS_TC_SIZE_0 1 +#define HCLGE_RSS_TC_SIZE_1 2 +#define HCLGE_RSS_TC_SIZE_2 4 +#define HCLGE_RSS_TC_SIZE_3 8 +#define HCLGE_RSS_TC_SIZE_4 16 +#define HCLGE_RSS_TC_SIZE_5 32 +#define HCLGE_RSS_TC_SIZE_6 64 +#define HCLGE_RSS_TC_SIZE_7 128 + +#define HCLGE_TQP_RESET_TRY_TIMES 10 + +#define HCLGE_PHY_PAGE_MDIX 0 +#define HCLGE_PHY_PAGE_COPPER 0 + +/* Page Selection Reg. */ +#define HCLGE_PHY_PAGE_REG 22 + +/* Copper Specific Control Register */ +#define HCLGE_PHY_CSC_REG 16 + +/* Copper Specific Status Register */ +#define HCLGE_PHY_CSS_REG 17 + +#define HCLGE_PHY_MDIX_CTRL_S (5) +#define HCLGE_PHY_MDIX_CTRL_M (3 << HCLGE_PHY_MDIX_CTRL_S) + +#define HCLGE_PHY_MDIX_STATUS_B (6) +#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11) + +enum HCLGE_DEV_STATE { + HCLGE_STATE_REINITING, + HCLGE_STATE_DOWN, + HCLGE_STATE_DISABLED, + HCLGE_STATE_REMOVING, + HCLGE_STATE_SERVICE_INITED, + HCLGE_STATE_SERVICE_SCHED, + HCLGE_STATE_MBX_HANDLING, + HCLGE_STATE_MBX_IRQ, + HCLGE_STATE_MAX +}; + +#define HCLGE_MPF_ENBALE 1 +struct hclge_caps { + u16 num_tqp; + u16 num_buffer_cell; + u32 flag; + u16 vmdq; +}; + +enum HCLGE_MAC_SPEED { + HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ + HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ + HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ + HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */ + HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */ + HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */ + HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */ + HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */ +}; + +enum HCLGE_MAC_DUPLEX { + HCLGE_MAC_HALF, + HCLGE_MAC_FULL +}; + +enum hclge_mta_dmac_sel_type { + HCLGE_MAC_ADDR_47_36, + HCLGE_MAC_ADDR_46_35, + HCLGE_MAC_ADDR_45_34, + HCLGE_MAC_ADDR_44_33, +}; + +struct hclge_mac { + u8 phy_addr; + u8 flag; + u8 media_type; + u8 mac_addr[ETH_ALEN]; + u8 autoneg; + u8 duplex; + u32 speed; + int link; /* store the link status of mac & phy (if phy exit)*/ + struct phy_device *phydev; + struct mii_bus *mdio_bus; + phy_interface_t phy_if; +}; + +struct hclge_hw { + void __iomem *io_base; + struct hclge_mac mac; + int num_vec; + struct hclge_cmq cmq; + struct hclge_caps caps; + void *back; +}; + +/* TQP stats */ +struct hlcge_tqp_stats { + /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ + u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ + /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ + u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ +}; + +struct hclge_tqp { + struct device *dev; /* Device for DMA mapping */ + struct hnae3_queue q; + struct hlcge_tqp_stats tqp_stats; + u16 index; /* Global index in a NIC controller */ + + bool alloced; +}; + +enum hclge_fc_mode { + HCLGE_FC_NONE, + HCLGE_FC_RX_PAUSE, + HCLGE_FC_TX_PAUSE, + HCLGE_FC_FULL, + HCLGE_FC_PFC, + HCLGE_FC_DEFAULT +}; + +#define HCLGE_PG_NUM 4 +#define HCLGE_SCH_MODE_SP 0 +#define HCLGE_SCH_MODE_DWRR 1 +struct hclge_pg_info { + u8 pg_id; + u8 pg_sch_mode; /* 0: sp; 1: dwrr */ + u8 tc_bit_map; + u32 bw_limit; + u8 tc_dwrr[HNAE3_MAX_TC]; +}; + +struct hclge_tc_info { + u8 tc_id; + u8 tc_sch_mode; /* 0: sp; 1: dwrr */ + u8 up; + u8 pgid; + u32 bw_limit; +}; + +struct hclge_cfg { + u8 vmdq_vport_num; + u8 tc_num; + u16 tqp_desc_num; + u16 rx_buf_len; + u8 phy_addr; + u8 media_type; + u8 mac_addr[ETH_ALEN]; + u8 default_speed; + u32 numa_node_map; +}; + +struct hclge_tm_info { + u8 num_tc; + u8 num_pg; /* It must be 1 if vNET-Base schd */ + u8 pg_dwrr[HCLGE_PG_NUM]; + struct hclge_pg_info pg_info[HCLGE_PG_NUM]; + struct hclge_tc_info tc_info[HNAE3_MAX_TC]; + enum hclge_fc_mode fc_mode; + u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ +}; + +struct hclge_comm_stats_str { + char desc[ETH_GSTRING_LEN]; + unsigned long offset; +}; + +/* all 64bit stats, opcode id: 0x0030 */ +struct hclge_64_bit_stats { + /* query_igu_stat */ + u64 igu_rx_oversize_pkt; + u64 igu_rx_undersize_pkt; + u64 igu_rx_out_all_pkt; + u64 igu_rx_uni_pkt; + u64 igu_rx_multi_pkt; + u64 igu_rx_broad_pkt; + u64 rsv0; + + /* query_egu_stat */ + u64 egu_tx_out_all_pkt; + u64 egu_tx_uni_pkt; + u64 egu_tx_multi_pkt; + u64 egu_tx_broad_pkt; + + /* ssu_ppp packet stats */ + u64 ssu_ppp_mac_key_num; + u64 ssu_ppp_host_key_num; + u64 ppp_ssu_mac_rlt_num; + u64 ppp_ssu_host_rlt_num; + + /* ssu_tx_in_out_dfx_stats */ + u64 ssu_tx_in_num; + u64 ssu_tx_out_num; + /* ssu_rx_in_out_dfx_stats */ + u64 ssu_rx_in_num; + u64 ssu_rx_out_num; +}; + +/* all 32bit stats, opcode id: 0x0031 */ +struct hclge_32_bit_stats { + u64 igu_rx_err_pkt; + u64 igu_rx_no_eof_pkt; + u64 igu_rx_no_sof_pkt; + u64 egu_tx_1588_pkt; + u64 egu_tx_err_pkt; + u64 ssu_full_drop_num; + u64 ssu_part_drop_num; + u64 ppp_key_drop_num; + u64 ppp_rlt_drop_num; + u64 ssu_key_drop_num; + u64 pkt_curr_buf_cnt; + u64 qcn_fb_rcv_cnt; + u64 qcn_fb_drop_cnt; + u64 qcn_fb_invaild_cnt; + u64 rsv0; + u64 rx_packet_tc0_in_cnt; + u64 rx_packet_tc1_in_cnt; + u64 rx_packet_tc2_in_cnt; + u64 rx_packet_tc3_in_cnt; + u64 rx_packet_tc4_in_cnt; + u64 rx_packet_tc5_in_cnt; + u64 rx_packet_tc6_in_cnt; + u64 rx_packet_tc7_in_cnt; + u64 rx_packet_tc0_out_cnt; + u64 rx_packet_tc1_out_cnt; + u64 rx_packet_tc2_out_cnt; + u64 rx_packet_tc3_out_cnt; + u64 rx_packet_tc4_out_cnt; + u64 rx_packet_tc5_out_cnt; + u64 rx_packet_tc6_out_cnt; + u64 rx_packet_tc7_out_cnt; + + /* Tx packet level statistics */ + u64 tx_packet_tc0_in_cnt; + u64 tx_packet_tc1_in_cnt; + u64 tx_packet_tc2_in_cnt; + u64 tx_packet_tc3_in_cnt; + u64 tx_packet_tc4_in_cnt; + u64 tx_packet_tc5_in_cnt; + u64 tx_packet_tc6_in_cnt; + u64 tx_packet_tc7_in_cnt; + u64 tx_packet_tc0_out_cnt; + u64 tx_packet_tc1_out_cnt; + u64 tx_packet_tc2_out_cnt; + u64 tx_packet_tc3_out_cnt; + u64 tx_packet_tc4_out_cnt; + u64 tx_packet_tc5_out_cnt; + u64 tx_packet_tc6_out_cnt; + u64 tx_packet_tc7_out_cnt; + + /* packet buffer statistics */ + u64 pkt_curr_buf_tc0_cnt; + u64 pkt_curr_buf_tc1_cnt; + u64 pkt_curr_buf_tc2_cnt; + u64 pkt_curr_buf_tc3_cnt; + u64 pkt_curr_buf_tc4_cnt; + u64 pkt_curr_buf_tc5_cnt; + u64 pkt_curr_buf_tc6_cnt; + u64 pkt_curr_buf_tc7_cnt; + + u64 mb_uncopy_num; + u64 lo_pri_unicast_rlt_drop_num; + u64 hi_pri_multicast_rlt_drop_num; + u64 lo_pri_multicast_rlt_drop_num; + u64 rx_oq_drop_pkt_cnt; + u64 tx_oq_drop_pkt_cnt; + u64 nic_l2_err_drop_pkt_cnt; + u64 roc_l2_err_drop_pkt_cnt; +}; + +/* mac stats ,opcode id: 0x0032 */ +struct hclge_mac_stats { + u64 mac_tx_mac_pause_num; + u64 mac_rx_mac_pause_num; + u64 mac_tx_pfc_pri0_pkt_num; + u64 mac_tx_pfc_pri1_pkt_num; + u64 mac_tx_pfc_pri2_pkt_num; + u64 mac_tx_pfc_pri3_pkt_num; + u64 mac_tx_pfc_pri4_pkt_num; + u64 mac_tx_pfc_pri5_pkt_num; + u64 mac_tx_pfc_pri6_pkt_num; + u64 mac_tx_pfc_pri7_pkt_num; + u64 mac_rx_pfc_pri0_pkt_num; + u64 mac_rx_pfc_pri1_pkt_num; + u64 mac_rx_pfc_pri2_pkt_num; + u64 mac_rx_pfc_pri3_pkt_num; + u64 mac_rx_pfc_pri4_pkt_num; + u64 mac_rx_pfc_pri5_pkt_num; + u64 mac_rx_pfc_pri6_pkt_num; + u64 mac_rx_pfc_pri7_pkt_num; + u64 mac_tx_total_pkt_num; + u64 mac_tx_total_oct_num; + u64 mac_tx_good_pkt_num; + u64 mac_tx_bad_pkt_num; + u64 mac_tx_good_oct_num; + u64 mac_tx_bad_oct_num; + u64 mac_tx_uni_pkt_num; + u64 mac_tx_multi_pkt_num; + u64 mac_tx_broad_pkt_num; + u64 mac_tx_undersize_pkt_num; + u64 mac_tx_overrsize_pkt_num; + u64 mac_tx_64_oct_pkt_num; + u64 mac_tx_65_127_oct_pkt_num; + u64 mac_tx_128_255_oct_pkt_num; + u64 mac_tx_256_511_oct_pkt_num; + u64 mac_tx_512_1023_oct_pkt_num; + u64 mac_tx_1024_1518_oct_pkt_num; + u64 mac_tx_1519_max_oct_pkt_num; + u64 mac_rx_total_pkt_num; + u64 mac_rx_total_oct_num; + u64 mac_rx_good_pkt_num; + u64 mac_rx_bad_pkt_num; + u64 mac_rx_good_oct_num; + u64 mac_rx_bad_oct_num; + u64 mac_rx_uni_pkt_num; + u64 mac_rx_multi_pkt_num; + u64 mac_rx_broad_pkt_num; + u64 mac_rx_undersize_pkt_num; + u64 mac_rx_overrsize_pkt_num; + u64 mac_rx_64_oct_pkt_num; + u64 mac_rx_65_127_oct_pkt_num; + u64 mac_rx_128_255_oct_pkt_num; + u64 mac_rx_256_511_oct_pkt_num; + u64 mac_rx_512_1023_oct_pkt_num; + u64 mac_rx_1024_1518_oct_pkt_num; + u64 mac_rx_1519_max_oct_pkt_num; + + u64 mac_trans_fragment_pkt_num; + u64 mac_trans_undermin_pkt_num; + u64 mac_trans_jabber_pkt_num; + u64 mac_trans_err_all_pkt_num; + u64 mac_trans_from_app_good_pkt_num; + u64 mac_trans_from_app_bad_pkt_num; + u64 mac_rcv_fragment_pkt_num; + u64 mac_rcv_undermin_pkt_num; + u64 mac_rcv_jabber_pkt_num; + u64 mac_rcv_fcs_err_pkt_num; + u64 mac_rcv_send_app_good_pkt_num; + u64 mac_rcv_send_app_bad_pkt_num; +}; + +struct hclge_hw_stats { + struct hclge_mac_stats mac_stats; + struct hclge_64_bit_stats all_64_bit_stats; + struct hclge_32_bit_stats all_32_bit_stats; +}; + +struct hclge_dev { + struct pci_dev *pdev; + struct hnae3_ae_dev *ae_dev; + struct hclge_hw hw; + struct hclge_hw_stats hw_stats; + unsigned long state; + + u32 fw_version; + u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ + u16 num_tqps; /* Num task queue pairs of this PF */ + u16 num_req_vfs; /* Num VFs requested for this PF */ + + u16 num_roce_msix; /* Num of roce vectors for this PF */ + int roce_base_vector; + + /* Base task tqp physical id of this PF */ + u16 base_tqp_pid; + u16 alloc_rss_size; /* Allocated RSS task queue */ + u16 rss_size_max; /* HW defined max RSS task queue */ + + /* Num of guaranteed filters for this PF */ + u16 fdir_pf_filter_count; + u16 num_alloc_vport; /* Num vports this driver supports */ + u32 numa_node_mask; + u16 rx_buf_len; + u16 num_desc; + u8 hw_tc_map; + u8 tc_num_last_time; + enum hclge_fc_mode fc_mode_last_time; + +#define HCLGE_FLAG_TC_BASE_SCH_MODE 1 +#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 + u8 tx_sch_mode; + + u8 default_up; + struct hclge_tm_info tm_info; + + u16 num_msi; + u16 num_msi_left; + u16 num_msi_used; + u32 base_msi_vector; + struct msix_entry *msix_entries; + u16 *vector_status; + + u16 pending_udp_bitmap; + + u16 rx_itr_default; + u16 tx_itr_default; + + u16 adminq_work_limit; /* Num of admin receive queue desc to process */ + unsigned long service_timer_period; + unsigned long service_timer_previous; + struct timer_list service_timer; + struct work_struct service_task; + + bool cur_promisc; + int num_alloc_vfs; /* Actual number of VFs allocated */ + + struct hclge_tqp *htqp; + struct hclge_vport *vport; + + struct dentry *hclge_dbgfs; + + struct hnae3_client *nic_client; + struct hnae3_client *roce_client; + +#define HCLGE_FLAG_USE_MSI 0x00000001 +#define HCLGE_FLAG_USE_MSIX 0x00000002 +#define HCLGE_FLAG_MAIN 0x00000004 +#define HCLGE_FLAG_DCB_CAPABLE 0x00000008 +#define HCLGE_FLAG_DCB_ENABLE 0x00000010 + u32 flag; + + u32 pkt_buf_size; /* Total pf buf size for tx/rx */ + u32 mps; /* Max packet size */ + struct hclge_priv_buf *priv_buf; + struct hclge_shared_buf s_buf; + + enum hclge_mta_dmac_sel_type mta_mac_sel_type; + bool enable_mta; /* Mutilcast filter enable */ + bool accept_mta_mc; /* Whether accept mta filter multicast */ +}; + +struct hclge_vport { + u16 alloc_tqps; /* Allocated Tx/Rx queues */ + + u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ + /* User configured lookup table entries */ + u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; + + u16 qs_offset; + u16 bw_limit; /* VSI BW Limit (0 = disabled) */ + u8 dwrr; + + int vport_id; + struct hclge_dev *back; /* Back reference to associated dev */ + struct hnae3_handle nic; + struct hnae3_handle roce; +}; + +void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, + bool en_mc, bool en_bc, int vport_id); + +int hclge_add_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_rm_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_add_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_rm_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); + +int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, + u8 func_id, + bool enable); +struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); +int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector, + struct hnae3_ring_chain_node *ring_chain); +static inline int hclge_get_queue_id(struct hnae3_queue *queue) +{ + struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); + + return tqp->index; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); +int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, + bool is_kill, u16 vlan, u8 qos, __be16 proto); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c new file mode 100644 index 000000000000..a2add8bb1945 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/etherdevice.h> +#include <linux/kernel.h> + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_mdio.h" + +enum hclge_mdio_c22_op_seq { + HCLGE_MDIO_C22_WRITE = 1, + HCLGE_MDIO_C22_READ = 2 +}; + +#define HCLGE_MDIO_CTRL_START_B 0 +#define HCLGE_MDIO_CTRL_ST_S 1 +#define HCLGE_MDIO_CTRL_ST_M (0x3 << HCLGE_MDIO_CTRL_ST_S) +#define HCLGE_MDIO_CTRL_OP_S 3 +#define HCLGE_MDIO_CTRL_OP_M (0x3 << HCLGE_MDIO_CTRL_OP_S) + +#define HCLGE_MDIO_PHYID_S 0 +#define HCLGE_MDIO_PHYID_M (0x1f << HCLGE_MDIO_PHYID_S) + +#define HCLGE_MDIO_PHYREG_S 0 +#define HCLGE_MDIO_PHYREG_M (0x1f << HCLGE_MDIO_PHYREG_S) + +#define HCLGE_MDIO_STA_B 0 + +struct hclge_mdio_cfg_cmd { + u8 ctrl_bit; + u8 phyid; + u8 phyad; + u8 rsvd; + __le16 reserve; + __le16 data_wr; + __le16 data_rd; + __le16 sta; +}; + +static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, + u16 data) +{ + struct hclge_mdio_cfg_cmd *mdio_cmd; + struct hclge_dev *hdev = bus->priv; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false); + + mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; + + hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); + + hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); + + mdio_cmd->data_wr = cpu_to_le16(data); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio write fail when sending cmd, status is %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) +{ + struct hclge_mdio_cfg_cmd *mdio_cmd; + struct hclge_dev *hdev = bus->priv; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true); + + mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; + + hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); + + hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); + + /* Read out phy data */ + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio read fail when get data, status is %d.\n", + ret); + return ret; + } + + if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { + dev_err(&hdev->pdev->dev, "mdio read data error\n"); + return -EIO; + } + + return le16_to_cpu(mdio_cmd->data_rd); +} + +int hclge_mac_mdio_config(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + struct phy_device *phydev; + struct mii_bus *mdio_bus; + int ret; + + if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) + return 0; + + mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev); + if (!mdio_bus) + return -ENOMEM; + + mdio_bus->name = "hisilicon MII bus"; + mdio_bus->read = hclge_mdio_read; + mdio_bus->write = hclge_mdio_write; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii", + dev_name(&hdev->pdev->dev)); + + mdio_bus->parent = &hdev->pdev->dev; + mdio_bus->priv = hdev; + mdio_bus->phy_mask = ~(1 << mac->phy_addr); + ret = mdiobus_register(mdio_bus); + if (ret) { + dev_err(mdio_bus->parent, + "Failed to register MDIO bus ret = %#x\n", ret); + return ret; + } + + phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr); + if (!phydev || IS_ERR(phydev)) { + dev_err(mdio_bus->parent, "Failed to get phy device\n"); + mdiobus_unregister(mdio_bus); + return -EIO; + } + + mac->phydev = phydev; + mac->mdio_bus = mdio_bus; + + return 0; +} + +static void hclge_mac_adjust_link(struct net_device *netdev) +{ + struct hnae3_handle *h = *((void **)netdev_priv(netdev)); + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + int duplex, speed; + int ret; + + speed = netdev->phydev->speed; + duplex = netdev->phydev->duplex; + + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); + if (ret) + netdev_err(netdev, "failed to adjust link.\n"); +} + +int hclge_mac_start_phy(struct hclge_dev *hdev) +{ + struct net_device *netdev = hdev->vport[0].nic.netdev; + struct phy_device *phydev = hdev->hw.mac.phydev; + int ret; + + if (!phydev) + return 0; + + ret = phy_connect_direct(netdev, phydev, + hclge_mac_adjust_link, + PHY_INTERFACE_MODE_SGMII); + if (ret) { + netdev_err(netdev, "phy_connect_direct err.\n"); + return ret; + } + + phy_start(phydev); + + return 0; +} + +void hclge_mac_stop_phy(struct hclge_dev *hdev) +{ + struct net_device *netdev = hdev->vport[0].nic.netdev; + struct phy_device *phydev = netdev->phydev; + + if (!phydev) + return; + + phy_stop(phydev); + phy_disconnect(phydev); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h new file mode 100644 index 000000000000..c5e91cfb8f2c --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_MDIO_H +#define __HCLGE_MDIO_H + +int hclge_mac_mdio_config(struct hclge_dev *hdev); +int hclge_mac_start_phy(struct hclge_dev *hdev); +void hclge_mac_stop_phy(struct hclge_dev *hdev); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c new file mode 100644 index 000000000000..1c577d268f00 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -0,0 +1,1015 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/etherdevice.h> + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_tm.h" + +enum hclge_shaper_level { + HCLGE_SHAPER_LVL_PRI = 0, + HCLGE_SHAPER_LVL_PG = 1, + HCLGE_SHAPER_LVL_PORT = 2, + HCLGE_SHAPER_LVL_QSET = 3, + HCLGE_SHAPER_LVL_CNT = 4, + HCLGE_SHAPER_LVL_VF = 0, + HCLGE_SHAPER_LVL_PF = 1, +}; + +#define HCLGE_SHAPER_BS_U_DEF 1 +#define HCLGE_SHAPER_BS_S_DEF 4 + +#define HCLGE_ETHER_MAX_RATE 100000 + +/* hclge_shaper_para_calc: calculate ir parameter for the shaper + * @ir: Rate to be config, its unit is Mbps + * @shaper_level: the shaper level. eg: port, pg, priority, queueset + * @ir_b: IR_B parameter of IR shaper + * @ir_u: IR_U parameter of IR shaper + * @ir_s: IR_S parameter of IR shaper + * + * the formula: + * + * IR_b * (2 ^ IR_u) * 8 + * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) + * Tick * (2 ^ IR_s) + * + * @return: 0: calculate sucessful, negative: fail + */ +static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, + u8 *ir_b, u8 *ir_u, u8 *ir_s) +{ + const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { + 6 * 256, /* Prioriy level */ + 6 * 32, /* Prioriy group level */ + 6 * 8, /* Port level */ + 6 * 256 /* Qset level */ + }; + u8 ir_u_calc = 0, ir_s_calc = 0; + u32 ir_calc; + u32 tick; + + /* Calc tick */ + if (shaper_level >= HCLGE_SHAPER_LVL_CNT) + return -EINVAL; + + tick = tick_array[shaper_level]; + + /** + * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 + * the formula is changed to: + * 126 * 1 * 8 + * ir_calc = ---------------- * 1000 + * tick * 1 + */ + ir_calc = (1008000 + (tick >> 1) - 1) / tick; + + if (ir_calc == ir) { + *ir_b = 126; + *ir_u = 0; + *ir_s = 0; + + return 0; + } else if (ir_calc > ir) { + /* Increasing the denominator to select ir_s value */ + while (ir_calc > ir) { + ir_s_calc++; + ir_calc = 1008000 / (tick * (1 << ir_s_calc)); + } + + if (ir_calc == ir) + *ir_b = 126; + else + *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000; + } else { + /* Increasing the numerator to select ir_u value */ + u32 numerator; + + while (ir_calc < ir) { + ir_u_calc++; + numerator = 1008000 * (1 << ir_u_calc); + ir_calc = (numerator + (tick >> 1)) / tick; + } + + if (ir_calc == ir) { + *ir_b = 126; + } else { + u32 denominator = (8000 * (1 << --ir_u_calc)); + *ir_b = (ir * tick + (denominator >> 1)) / denominator; + } + } + + *ir_u = ir_u_calc; + *ir_s = ir_s_calc; + + return 0; +} + +static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); + + desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | + (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) +{ + u8 tc; + + for (tc = 0; tc < hdev->tm_info.num_tc; tc++) + if (hdev->tm_info.tc_info[tc].up == pri_id) + break; + + if (tc >= hdev->tm_info.num_tc) + return -EINVAL; + + /** + * the register for priority has four bytes, the first bytes includes + * priority0 and priority1, the higher 4bit stands for priority1 + * while the lower 4bit stands for priority0, as below: + * first byte: | pri_1 | pri_0 | + * second byte: | pri_3 | pri_2 | + * third byte: | pri_5 | pri_4 | + * fourth byte: | pri_7 | pri_6 | + */ + pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); + + return 0; +} + +static int hclge_up_to_tc_map(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + u8 *pri = (u8 *)desc.data; + u8 pri_id; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); + + for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) { + ret = hclge_fill_pri_array(hdev, pri, pri_id); + if (ret) + return ret; + } + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, + u8 pg_id, u8 pri_bit_map) +{ + struct hclge_pg_to_pri_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); + + map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + + map->pg_id = pg_id; + map->pri_bit_map = pri_bit_map; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, + u16 qs_id, u8 pri) +{ + struct hclge_qs_to_pri_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); + + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + + map->qs_id = cpu_to_le16(qs_id); + map->priority = pri; + map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, + u8 q_id, u16 qs_id) +{ + struct hclge_nq_to_qs_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); + + map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + + map->nq_id = cpu_to_le16(q_id); + map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, + u8 dwrr) +{ + struct hclge_pg_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); + + weight = (struct hclge_pg_weight_cmd *)desc.data; + + weight->pg_id = pg_id; + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, + u8 dwrr) +{ + struct hclge_priority_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); + + weight = (struct hclge_priority_weight_cmd *)desc.data; + + weight->pri_id = pri_id; + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, + u8 dwrr) +{ + struct hclge_qs_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); + + weight = (struct hclge_qs_weight_cmd *)desc.data; + + weight->qs_id = cpu_to_le16(qs_id); + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, u8 pg_id, + u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) +{ + struct hclge_pg_shapping_cmd *shap_cfg_cmd; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : + HCLGE_OPC_TM_PG_C_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + + shap_cfg_cmd->pg_id = pg_id; + + hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); + hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); + hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); + hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); + hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, u8 pri_id, + u8 ir_b, u8 ir_u, u8 ir_s, + u8 bs_b, u8 bs_s) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : + HCLGE_OPC_TM_PRI_C_SHAPPING; + + hclge_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + + shap_cfg_cmd->pri_id = pri_id; + + hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); + hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); + hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); + hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); + hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); + + if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(pg_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); + + if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(pri_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); + + if (hdev->tm_info.tc_info[qs_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(qs_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc) +{ + struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, + false); + + bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; + + bp_to_qs_map_cmd->tc_id = tc; + + /* Qset and tc is one by one mapping */ + bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u8 i; + + kinfo = &vport->nic.kinfo; + vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; + kinfo->num_tc = + min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc); + kinfo->rss_size + = min_t(u16, hdev->rss_size_max, + kinfo->num_tqps / kinfo->num_tc); + vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; + vport->dwrr = 100; /* 100 percent as init */ + + for (i = 0; i < kinfo->num_tc; i++) { + if (hdev->hw_tc_map & BIT(i)) { + kinfo->tc_info[i].enable = true; + kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; + kinfo->tc_info[i].tqp_count = kinfo->rss_size; + kinfo->tc_info[i].tc = i; + kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up; + } else { + /* Set to default queue if TC is disable */ + kinfo->tc_info[i].enable = false; + kinfo->tc_info[i].tqp_offset = 0; + kinfo->tc_info[i].tqp_count = 1; + kinfo->tc_info[i].tc = 0; + kinfo->tc_info[i].up = 0; + } + } +} + +static void hclge_tm_vport_info_update(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u32 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + hclge_tm_vport_tc_info_update(vport); + + vport++; + } +} + +static void hclge_tm_tc_info_init(struct hclge_dev *hdev) +{ + u8 i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + hdev->tm_info.tc_info[i].tc_id = i; + hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; + hdev->tm_info.tc_info[i].up = i; + hdev->tm_info.tc_info[i].pgid = 0; + hdev->tm_info.tc_info[i].bw_limit = + hdev->tm_info.pg_info[0].bw_limit; + } + + hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; +} + +static void hclge_tm_pg_info_init(struct hclge_dev *hdev) +{ + u8 i; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + int k; + + hdev->tm_info.pg_dwrr[i] = i ? 0 : 100; + + hdev->tm_info.pg_info[i].pg_id = i; + hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; + + hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE; + + if (i != 0) + continue; + + hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; + for (k = 0; k < hdev->tm_info.num_tc; k++) + hdev->tm_info.pg_info[i].tc_dwrr[k] = 100; + } +} + +static int hclge_tm_schd_info_init(struct hclge_dev *hdev) +{ + if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && + (hdev->tm_info.num_pg != 1)) + return -EINVAL; + + hclge_tm_pg_info_init(hdev); + + hclge_tm_tc_info_init(hdev); + + hclge_tm_vport_info_update(hdev); + + hdev->tm_info.fc_mode = HCLGE_FC_NONE; + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + + return 0; +} + +static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) +{ + int ret; + u32 i; + + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Cfg mapping */ + ret = hclge_tm_pg_to_pri_map_cfg( + hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) +{ + u8 ir_u, ir_b, ir_s; + int ret; + u32 i; + + /* Cfg pg schd */ + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + /* Pg to pri */ + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Calc shaper para */ + ret = hclge_shaper_para_calc( + hdev->tm_info.pg_info[i].bw_limit, + HCLGE_SHAPER_LVL_PG, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + ret = hclge_tm_pg_shapping_cfg(hdev, + HCLGE_TM_SHAP_C_BUCKET, i, + 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + ret = hclge_tm_pg_shapping_cfg(hdev, + HCLGE_TM_SHAP_P_BUCKET, i, + ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + u32 i; + + /* cfg pg schd */ + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + /* pg to prio */ + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Cfg dwrr */ + ret = hclge_tm_pg_weight_cfg(hdev, i, + hdev->tm_info.pg_dwrr[i]); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, + struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_queue **tqp = kinfo->tqp; + struct hnae3_tc_info *v_tc_info; + u32 i, j; + int ret; + + for (i = 0; i < kinfo->num_tc; i++) { + v_tc_info = &kinfo->tc_info[i]; + for (j = 0; j < v_tc_info->tqp_count; j++) { + struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j]; + + ret = hclge_tm_q_to_qs_map_cfg(hdev, + hclge_get_queue_id(q), + vport->qs_offset + i); + if (ret) + return ret; + } + } + + return 0; +} + +static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + /* Cfg qs -> pri mapping, one by one mapping */ + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_tm_qs_to_pri_map_cfg(hdev, i, i); + if (ret) + return ret; + } + } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { + int k; + /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ + for (k = 0; k < hdev->num_alloc_vport; k++) + for (i = 0; i < HNAE3_MAX_TC; i++) { + ret = hclge_tm_qs_to_pri_map_cfg( + hdev, vport[k].qs_offset + i, k); + if (ret) + return ret; + } + } else { + return -EINVAL; + } + + /* Cfg q -> qs mapping */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_vport_q_to_qs_map(hdev, vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) +{ + u8 ir_u, ir_b, ir_s; + int ret; + u32 i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_shaper_para_calc( + hdev->tm_info.tc_info[i].bw_limit, + HCLGE_SHAPER_LVL_PRI, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg( + hdev, HCLGE_TM_SHAP_C_BUCKET, i, + 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg( + hdev, HCLGE_TM_SHAP_P_BUCKET, i, + ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + u8 ir_u, ir_b, ir_s; + int ret; + + ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, + vport->vport_id, + 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, + vport->vport_id, + ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + struct hnae3_tc_info *v_tc_info; + u8 ir_u, ir_b, ir_s; + u32 i; + int ret; + + for (i = 0; i < kinfo->num_tc; i++) { + v_tc_info = &kinfo->tc_info[i]; + ret = hclge_shaper_para_calc( + hdev->tm_info.tc_info[i].bw_limit, + HCLGE_SHAPER_LVL_QSET, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + /* Need config vport shaper */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); + if (ret) + return ret; + + ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); + if (ret) + return ret; + } else { + ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) +{ + struct hclge_pg_info *pg_info; + u8 dwrr; + int ret; + u32 i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + pg_info = + &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; + dwrr = pg_info->tc_dwrr[i]; + + ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); + if (ret) + return ret; + + ret = hclge_tm_qs_weight_cfg(hdev, i, dwrr); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + u8 i; + + /* Vf dwrr */ + ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); + if (ret) + return ret; + + /* Qset dwrr */ + for (i = 0; i < kinfo->num_tc; i++) { + ret = hclge_tm_qs_weight_cfg( + hdev, vport->qs_offset + i, + hdev->tm_info.pg_info[0].tc_dwrr[i]); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); + if (ret) + return ret; + } else { + ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_map_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_pg_to_pri_map(hdev); + if (ret) + return ret; + + return hclge_tm_pri_q_qs_cfg(hdev); +} + +static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_pg_shaper_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_pri_shaper_cfg(hdev); +} + +int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_pg_dwrr_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_pri_dwrr_cfg(hdev); +} + +static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) +{ + int ret; + u8 i; + + /* Only being config on TC-Based scheduler mode */ + if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) + return 0; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + ret = hclge_tm_pg_schd_mode_cfg(hdev, i); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + u8 i; + + ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); + if (ret) + return ret; + + for (i = 0; i < kinfo->num_tc; i++) { + ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u8 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_tm_pri_schd_mode_cfg(hdev, i); + if (ret) + return ret; + + ret = hclge_tm_qs_schd_mode_cfg(hdev, i); + if (ret) + return ret; + } + } else { + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_schd_mode_vnet_base_cfg(vport); + if (ret) + return ret; + + vport++; + } + } + + return 0; +} + +static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_lvl2_schd_mode_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_lvl34_schd_mode_cfg(hdev); +} + +static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) +{ + int ret; + + /* Cfg tm mapping */ + ret = hclge_tm_map_cfg(hdev); + if (ret) + return ret; + + /* Cfg tm shaper */ + ret = hclge_tm_shaper_cfg(hdev); + if (ret) + return ret; + + /* Cfg dwrr */ + ret = hclge_tm_dwrr_cfg(hdev); + if (ret) + return ret; + + /* Cfg schd mode for each level schd */ + return hclge_tm_schd_mode_hw(hdev); +} + +int hclge_pause_setup_hw(struct hclge_dev *hdev) +{ + bool en = hdev->tm_info.fc_mode != HCLGE_FC_PFC; + int ret; + u8 i; + + ret = hclge_mac_pause_en_cfg(hdev, en, en); + if (ret) + return ret; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_tm_qs_bp_cfg(hdev, i); + if (ret) + return ret; + } + + return hclge_up_to_tc_map(hdev); +} + +int hclge_tm_init_hw(struct hclge_dev *hdev) +{ + int ret; + + if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && + (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) + return -ENOTSUPP; + + ret = hclge_tm_schd_setup_hw(hdev); + if (ret) + return ret; + + ret = hclge_pause_setup_hw(hdev); + if (ret) + return ret; + + return 0; +} + +int hclge_tm_schd_init(struct hclge_dev *hdev) +{ + int ret = hclge_tm_schd_info_init(hdev); + + if (ret) + return ret; + + return hclge_tm_init_hw(hdev); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h new file mode 100644 index 000000000000..7e67337dfaf2 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_TM_H +#define __HCLGE_TM_H + +#include <linux/types.h> + +/* MAC Pause */ +#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0) +#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1) + +#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0) + +/* SP or DWRR */ +#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0) +#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE) + +struct hclge_pg_to_pri_link_cmd { + u8 pg_id; + u8 rsvd1[3]; + u8 pri_bit_map; +}; + +struct hclge_qs_to_pri_link_cmd { + __le16 qs_id; + __le16 rsvd; + u8 priority; +#define HCLGE_TM_QS_PRI_LINK_VLD_MSK BIT(0) + u8 link_vld; +}; + +struct hclge_nq_to_qs_link_cmd { + __le16 nq_id; + __le16 rsvd; +#define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10) + __le16 qset_id; +}; + +struct hclge_pg_weight_cmd { + u8 pg_id; + u8 dwrr; +}; + +struct hclge_priority_weight_cmd { + u8 pri_id; + u8 dwrr; +}; + +struct hclge_qs_weight_cmd { + __le16 qs_id; + u8 dwrr; +}; + +#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0) +#define HCLGE_TM_SHAP_IR_B_LSH 0 +#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8) +#define HCLGE_TM_SHAP_IR_U_LSH 8 +#define HCLGE_TM_SHAP_IR_S_MSK GENMASK(15, 12) +#define HCLGE_TM_SHAP_IR_S_LSH 12 +#define HCLGE_TM_SHAP_BS_B_MSK GENMASK(20, 16) +#define HCLGE_TM_SHAP_BS_B_LSH 16 +#define HCLGE_TM_SHAP_BS_S_MSK GENMASK(25, 21) +#define HCLGE_TM_SHAP_BS_S_LSH 21 + +enum hclge_shap_bucket { + HCLGE_TM_SHAP_C_BUCKET = 0, + HCLGE_TM_SHAP_P_BUCKET, +}; + +struct hclge_pri_shapping_cmd { + u8 pri_id; + u8 rsvd[3]; + __le32 pri_shapping_para; +}; + +struct hclge_pg_shapping_cmd { + u8 pg_id; + u8 rsvd[3]; + __le32 pg_shapping_para; +}; + +struct hclge_bp_to_qs_map_cmd { + u8 tc_id; + u8 rsvd[2]; + u8 qs_group_id; + __le32 qs_bit_map; + u32 rsvd1; +}; + +#define hclge_tm_set_feild(dest, string, val) \ + hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH), val) +#define hclge_tm_get_feild(src, string) \ + hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH)) + +int hclge_tm_schd_init(struct hclge_dev *hdev); +int hclge_pause_setup_hw(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c new file mode 100644 index 000000000000..069ae426aa24 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -0,0 +1,2849 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/skbuff.h> +#include <linux/sctp.h> +#include <linux/vermagic.h> +#include <net/gre.h> +#include <net/vxlan.h> + +#include "hnae3.h" +#include "hns3_enet.h" + +const char hns3_driver_name[] = "hns3"; +const char hns3_driver_version[] = VERMAGIC_STRING; +static const char hns3_driver_string[] = + "Hisilicon Ethernet Network Driver for Hip08 Family"; +static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; +static struct hnae3_client client; + +/* hns3_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id hns3_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); + +static irqreturn_t hns3_irq_handle(int irq, void *dev) +{ + struct hns3_enet_tqp_vector *tqp_vector = dev; + + napi_schedule(&tqp_vector->napi); + + return IRQ_HANDLED; +} + +static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + unsigned int i; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) + continue; + + /* release the irq resource */ + free_irq(tqp_vectors->vector_irq, tqp_vectors); + tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; + } +} + +static int hns3_nic_init_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + int txrx_int_idx = 0; + int rx_int_idx = 0; + int tx_int_idx = 0; + unsigned int i; + int ret; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) + continue; + + if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "TxRx", + txrx_int_idx++); + txrx_int_idx++; + } else if (tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "Rx", + rx_int_idx++); + } else if (tqp_vectors->tx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "Tx", + tx_int_idx++); + } else { + /* Skip this unused q_vector */ + continue; + } + + tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; + + ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, + tqp_vectors->name, + tqp_vectors); + if (ret) { + netdev_err(priv->netdev, "request irq(%d) fail\n", + tqp_vectors->vector_irq); + return ret; + } + + tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; + } + + return 0; +} + +static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, + u32 mask_en) +{ + writel(mask_en, tqp_vector->mask_addr); +} + +static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) +{ + napi_enable(&tqp_vector->napi); + + /* enable vector */ + hns3_mask_vector_irq(tqp_vector, 1); +} + +static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) +{ + /* disable vector */ + hns3_mask_vector_irq(tqp_vector, 0); + + disable_irq(tqp_vector->vector_irq); + napi_disable(&tqp_vector->napi); +} + +static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value) +{ + /* this defines the configuration for GL (Interrupt Gap Limiter) + * GL defines inter interrupt gap. + * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing + */ + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET); +} + +static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector, + u32 rl_value) +{ + /* this defines the configuration for RL (Interrupt Rate Limiter). + * Rl defines rate of interrupts i.e. number of interrupts-per-second + * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing + */ + writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); +} + +static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector) +{ + /* initialize the configuration for interrupt coalescing. + * 1. GL (Interrupt Gap Limiter) + * 2. RL (Interrupt Rate Limiter) + */ + + /* Default :enable interrupt coalesce */ + tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; + tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K; + hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K); + /* for now we are disabling Interrupt RL - we + * will re-enable later + */ + hns3_set_vector_coalesc_rl(tqp_vector, 0); + tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW; + tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; +} + +static int hns3_nic_net_up(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int i, j; + int ret; + + /* get irq resource for all vectors */ + ret = hns3_nic_init_irq(priv); + if (ret) { + netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); + return ret; + } + + /* enable the vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_enable(&priv->tqp_vector[i]); + + /* start the ae_dev */ + ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; + if (ret) + goto out_start_err; + + return 0; + +out_start_err: + for (j = i - 1; j >= 0; j--) + hns3_vector_disable(&priv->tqp_vector[j]); + + hns3_nic_uninit_irq(priv); + + return ret; +} + +static int hns3_nic_net_open(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret; + + netif_carrier_off(netdev); + + ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_tx_queues fail, ret=%d!\n", + ret); + return ret; + } + + ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); + return ret; + } + + ret = hns3_nic_net_up(netdev); + if (ret) { + netdev_err(netdev, + "hns net up fail, ret=%d!\n", ret); + return ret; + } + + return 0; +} + +static void hns3_nic_net_down(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + const struct hnae3_ae_ops *ops; + int i; + + /* stop ae_dev */ + ops = priv->ae_handle->ae_algo->ops; + if (ops->stop) + ops->stop(priv->ae_handle); + + /* disable vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + + /* free irq resources */ + hns3_nic_uninit_irq(priv); +} + +static int hns3_nic_net_stop(struct net_device *netdev) +{ + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + + hns3_nic_net_down(netdev); + + return 0; +} + +void hns3_set_multicast_list(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + struct netdev_hw_addr *ha = NULL; + + if (h->ae_algo->ops->set_mc_addr) { + netdev_for_each_mc_addr(ha, netdev) + if (h->ae_algo->ops->set_mc_addr(h, ha->addr)) + netdev_err(netdev, "set multicast fail\n"); + } +} + +static int hns3_nic_uc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->add_uc_addr) + return h->ae_algo->ops->add_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_uc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->rm_uc_addr) + return h->ae_algo->ops->rm_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->add_uc_addr) + return h->ae_algo->ops->add_mc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->rm_uc_addr) + return h->ae_algo->ops->rm_mc_addr(h, addr); + + return 0; +} + +void hns3_nic_set_rx_mode(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->set_promisc_mode) { + if (netdev->flags & IFF_PROMISC) + h->ae_algo->ops->set_promisc_mode(h, 1); + else + h->ae_algo->ops->set_promisc_mode(h, 0); + } + if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) + netdev_err(netdev, "sync uc address fail\n"); + if (netdev->flags & IFF_MULTICAST) + if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) + netdev_err(netdev, "sync mc address fail\n"); +} + +static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, + u16 *mss, u32 *type_cs_vlan_tso) +{ + u32 l4_offset, hdr_len; + union l3_hdr_info l3; + union l4_hdr_info l4; + u32 l4_paylen; + int ret; + + if (!skb_is_gso(skb)) + return 0; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* Software should clear the IPv4's checksum field when tso is + * needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + + /* tunnel packet.*/ + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if ((!(skb_shinfo(skb)->gso_type & + SKB_GSO_PARTIAL)) && + (skb_shinfo(skb)->gso_type & + SKB_GSO_UDP_TUNNEL_CSUM)) { + /* Software should clear the udp's checksum + * field when tso is needed. + */ + l4.udp->check = 0; + } + /* reset l3&l4 pointers from outer to inner headers */ + l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* Software should clear the IPv4's checksum field when + * tso is needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + } + + /* normal or tunnel packet*/ + l4_offset = l4.hdr - skb->data; + hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner pseudo checksum when tso*/ + l4_paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(l4_paylen)); + + /* find the txbd field values */ + *paylen = skb->len - hdr_len; + hnae_set_bit(*type_cs_vlan_tso, + HNS3_TXD_TSO_B, 1); + + /* get MSS for TSO */ + *mss = skb_shinfo(skb)->gso_size; + + return 0; +} + +static void hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, + u8 *il4_proto) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + unsigned char *l4_hdr; + unsigned char *exthdr; + u8 l4_proto_tmp; + __be16 frag_off; + + /* find outer header point */ + l3.hdr = skb_network_header(skb); + l4_hdr = skb_inner_transport_header(skb); + + if (skb->protocol == htons(ETH_P_IPV6)) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (skb->protocol == htons(ETH_P_IP)) { + l4_proto_tmp = l3.v4->protocol; + } + + *ol4_proto = l4_proto_tmp; + + /* tunnel packet */ + if (!skb->encapsulation) { + *il4_proto = 0; + return; + } + + /* find inner header point */ + l3.hdr = skb_inner_network_header(skb); + l4_hdr = skb_inner_transport_header(skb); + + if (l3.v6->version == 6) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (l3.v4->version == 4) { + l4_proto_tmp = l3.v4->protocol; + } + + *il4_proto = l4_proto_tmp; +} + +static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + union { + struct tcphdr *tcp; + struct udphdr *udp; + struct gre_base_hdr *gre; + unsigned char *hdr; + } l4; + unsigned char *l2_hdr; + u8 l4_proto = ol4_proto; + u32 ol2_len; + u32 ol3_len; + u32 ol4_len; + u32 l2_len; + u32 l3_len; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute L2 header size for normal packet, defined in 2 Bytes */ + l2_len = l3.hdr - skb->data; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* tunnel packet*/ + if (skb->encapsulation) { + /* compute OL2 header size, defined in 2 Bytes */ + ol2_len = l2_len; + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, ol2_len >> 1); + + /* compute OL3 header size, defined in 4 Bytes */ + ol3_len = l4.hdr - l3.hdr; + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, ol3_len >> 2); + + /* MAC in UDP, MAC in GRE (0x6558)*/ + if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { + /* switch MAC header ptr from outer to inner header.*/ + l2_hdr = skb_inner_mac_header(skb); + + /* compute OL4 header size, defined in 4 Bytes. */ + ol4_len = l2_hdr - l4.hdr; + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, ol4_len >> 2); + + /* switch IP header ptr from outer to inner header */ + l3.hdr = skb_inner_network_header(skb); + + /* compute inner l2 header size, defined in 2 Bytes. */ + l2_len = l3.hdr - l2_hdr; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); + } else { + /* skb packet types not supported by hardware, + * txbd len fild doesn't be filled. + */ + return; + } + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + + l4_proto = il4_proto; + } + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, l3_len >> 2); + + /* compute inner(/normal) L4 header size, defined in 4 Bytes */ + switch (l4_proto) { + case IPPROTO_TCP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, l4.tcp->doff); + break; + case IPPROTO_SCTP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); + break; + case IPPROTO_UDP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); + break; + default: + /* skb packet types not supported by hardware, + * txbd len fild doesn't be filled. + */ + return; + } +} + +static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + u32 l4_proto = ol4_proto; + + l3.hdr = skb_network_header(skb); + + /* define OL3 type and tunnel type(OL4).*/ + if (skb->encapsulation) { + /* define outer network header type.*/ + if (skb->protocol == htons(ETH_P_IP)) { + if (skb_is_gso(skb)) + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); + else + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); + + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + } + + /* define tunnel type(OL4).*/ + switch (l4_proto) { + case IPPROTO_UDP: + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); + break; + case IPPROTO_GRE: + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + skb_checksum_help(skb); + return 0; + } + + l3.hdr = skb_inner_network_header(skb); + l4_proto = il4_proto; + } + + if (l3.v4->version == 4) { + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + if (skb_is_gso(skb)) + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + } else if (l3.v6->version == 6) { + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV6); + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + } + + switch (l4_proto) { + case IPPROTO_TCP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_TCP); + break; + case IPPROTO_UDP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_UDP); + break; + case IPPROTO_SCTP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + skb_checksum_help(skb); + return 0; + } + + return 0; +} + +static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) +{ + /* Config bd buffer end */ + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, + HNS3_TXD_BDTYPE_M, 0); + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1); +} + +static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + u32 ol_type_vlan_len_msec = 0; + u16 bdtp_fe_sc_vld_ra_ri = 0; + u32 type_cs_vlan_tso = 0; + struct sk_buff *skb; + u32 paylen = 0; + u16 mss = 0; + __be16 protocol; + u8 ol4_proto; + u8 il4_proto; + int ret; + + /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ + desc_cb->priv = priv; + desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; + + /* now, fill the descriptor */ + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16((u16)size); + hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); + desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + + if (type == DESC_TYPE_SKB) { + skb = (struct sk_buff *)priv; + paylen = cpu_to_le16(skb->len); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + skb_reset_mac_len(skb); + protocol = skb->protocol; + + /* vlan packet*/ + if (protocol == htons(ETH_P_8021Q)) { + protocol = vlan_get_protocol(skb); + skb->protocol = protocol; + } + hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + if (ret) + return ret; + + ret = hns3_set_tso(skb, &paylen, &mss, + &type_cs_vlan_tso); + if (ret) + return ret; + } + + /* Set txbd */ + desc->tx.ol_type_vlan_len_msec = + cpu_to_le32(ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = + cpu_to_le32(type_cs_vlan_tso); + desc->tx.paylen = cpu_to_le16(paylen); + desc->tx.mss = cpu_to_le16(mss); + } + + /* move ring pointer to next.*/ + ring_ptr_move_fw(ring, next_to_use); + + return 0; +} + +static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type) +{ + unsigned int frag_buf_num; + unsigned int k; + int sizeoflast; + int ret; + + frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + sizeoflast = size % HNS3_MAX_BD_SIZE; + sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; + + /* When the frag size is bigger than hardware, split this frag */ + for (k = 0; k < frag_buf_num; k++) { + ret = hns3_fill_desc(ring, priv, + (k == frag_buf_num - 1) ? + sizeoflast : HNS3_MAX_BD_SIZE, + dma + HNS3_MAX_BD_SIZE * k, + frag_end && (k == frag_buf_num - 1) ? 1 : 0, + (type == DESC_TYPE_SKB && !k) ? + DESC_TYPE_SKB : DESC_TYPE_PAGE); + if (ret) + return ret; + } + + return 0; +} + +static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, + struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = *out_skb; + struct skb_frag_struct *frag; + int bdnum_for_frag; + int frag_num; + int buf_num; + int size; + int i; + + size = skb_headlen(skb); + buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + + frag_num = skb_shinfo(skb)->nr_frags; + for (i = 0; i < frag_num; i++) { + frag = &skb_shinfo(skb)->frags[i]; + size = skb_frag_size(frag); + bdnum_for_frag = + (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) + return -ENOMEM; + + buf_num += bdnum_for_frag; + } + + if (buf_num > ring_space(ring)) + return -EBUSY; + + *bnum = buf_num; + return 0; +} + +static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, + struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = *out_skb; + int buf_num; + + /* No. of segments (plus a header) */ + buf_num = skb_shinfo(skb)->nr_frags + 1; + + if (buf_num > ring_space(ring)) + return -EBUSY; + + *bnum = buf_num; + + return 0; +} + +static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) +{ + struct device *dev = ring_to_dev(ring); + unsigned int i; + + for (i = 0; i < ring->desc_num; i++) { + /* check if this is where we started */ + if (ring->next_to_use == next_to_use_orig) + break; + + /* unmap the descriptor dma address */ + if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) + dma_unmap_single(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + + /* rollback one */ + ring_ptr_move_bw(ring, next_to_use); + } +} + +static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_nic_ring_data *ring_data = + &tx_ring_data(priv, skb->queue_mapping); + struct hns3_enet_ring *ring = ring_data->ring; + struct device *dev = priv->dev; + struct netdev_queue *dev_queue; + struct skb_frag_struct *frag; + int next_to_use_head; + int next_to_use_frag; + dma_addr_t dma; + int buf_num; + int seg_num; + int size; + int ret; + int i; + + /* Prefetch the data used later */ + prefetch(skb->data); + + switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { + case -EBUSY: + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_busy++; + u64_stats_update_end(&ring->syncp); + + goto out_net_tx_busy; + case -ENOMEM: + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + netdev_err(netdev, "no memory to xmit!\n"); + + goto out_err_tx_ok; + default: + break; + } + + /* No. of segments (plus a header) */ + seg_num = skb_shinfo(skb)->nr_frags + 1; + /* Fill the first part */ + size = skb_headlen(skb); + + next_to_use_head = ring->next_to_use; + + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) { + netdev_err(netdev, "TX head DMA map failed\n"); + ring->stats.sw_err_cnt++; + goto out_err_tx_ok; + } + + ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, + DESC_TYPE_SKB); + if (ret) + goto head_dma_map_err; + + next_to_use_frag = ring->next_to_use; + /* Fill the fragments */ + for (i = 1; i < seg_num; i++) { + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) { + netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); + ring->stats.sw_err_cnt++; + goto frag_dma_map_err; + } + ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, + seg_num - 1 == i ? 1 : 0, + DESC_TYPE_PAGE); + + if (ret) + goto frag_dma_map_err; + } + + /* Complete translate all packets */ + dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); + netdev_tx_sent_queue(dev_queue, skb->len); + + wmb(); /* Commit all data before submit */ + + hnae_queue_xmit(ring->tqp, buf_num); + + return NETDEV_TX_OK; + +frag_dma_map_err: + hns_nic_dma_unmap(ring, next_to_use_frag); + +head_dma_map_err: + hns_nic_dma_unmap(ring, next_to_use_head); + +out_err_tx_ok: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + +out_net_tx_busy: + netif_stop_subqueue(netdev, ring_data->queue_index); + smp_mb(); /* Commit all data before submit */ + + return NETDEV_TX_BUSY; +} + +static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + struct sockaddr *mac_addr = p; + int ret; + + if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) + return -EADDRNOTAVAIL; + + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data); + if (ret) { + netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); + return ret; + } + + ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); + + return 0; +} + +static int hns3_nic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { + priv->ops.fill_desc = hns3_fill_desc_tso; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; + } else { + priv->ops.fill_desc = hns3_fill_desc; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + } + + netdev->features = features; + return 0; +} + +static void +hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct hns3_enet_ring *ring; + unsigned int start; + unsigned int idx; + u64 tx_bytes = 0; + u64 rx_bytes = 0; + u64 tx_pkts = 0; + u64 rx_pkts = 0; + + for (idx = 0; idx < queue_num; idx++) { + /* fetch the tx stats */ + ring = priv->ring_data[idx].ring; + do { + tx_bytes += ring->stats.tx_bytes; + tx_pkts += ring->stats.tx_pkts; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + /* fetch the rx stats */ + ring = priv->ring_data[idx + queue_num].ring; + do { + rx_bytes += ring->stats.rx_bytes; + rx_pkts += ring->stats.rx_pkts; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } + + stats->tx_bytes = tx_bytes; + stats->tx_packets = tx_pkts; + stats->rx_bytes = rx_bytes; + stats->rx_packets = rx_pkts; + + stats->rx_errors = netdev->stats.rx_errors; + stats->multicast = netdev->stats.multicast; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + + stats->tx_errors = netdev->stats.tx_errors; + stats->rx_dropped = netdev->stats.rx_dropped; + stats->tx_dropped = netdev->stats.tx_dropped; + stats->collisions = netdev->stats.collisions; + stats->rx_over_errors = netdev->stats.rx_over_errors; + stats->rx_frame_errors = netdev->stats.rx_frame_errors; + stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; + stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; + stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; + stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; + stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; + stats->tx_window_errors = netdev->stats.tx_window_errors; + stats->rx_compressed = netdev->stats.rx_compressed; + stats->tx_compressed = netdev->stats.tx_compressed; +} + +static void hns3_add_tunnel_port(struct net_device *netdev, u16 port, + enum hns3_udp_tnl_type type) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; + struct hnae3_handle *h = priv->ae_handle; + + if (udp_tnl->used && udp_tnl->dst_port == port) { + udp_tnl->used++; + return; + } + + if (udp_tnl->used) { + netdev_warn(netdev, + "UDP tunnel [%d], port [%d] offload\n", type, port); + return; + } + + udp_tnl->dst_port = port; + udp_tnl->used = 1; + /* TBD send command to hardware to add port */ + if (h->ae_algo->ops->add_tunnel_udp) + h->ae_algo->ops->add_tunnel_udp(h, port); +} + +static void hns3_del_tunnel_port(struct net_device *netdev, u16 port, + enum hns3_udp_tnl_type type) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; + struct hnae3_handle *h = priv->ae_handle; + + if (!udp_tnl->used || udp_tnl->dst_port != port) { + netdev_warn(netdev, + "Invalid UDP tunnel port %d\n", port); + return; + } + + udp_tnl->used--; + if (udp_tnl->used) + return; + + udp_tnl->dst_port = 0; + /* TBD send command to hardware to del port */ + if (h->ae_algo->ops->del_tunnel_udp) + h->ae_algo->ops->add_tunnel_udp(h, port); +} + +/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports + * @netdev: This physical ports's netdev + * @ti: Tunnel information + */ +static void hns3_nic_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + u16 port_n = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); + break; + default: + netdev_err(netdev, "unsupported tunnel type %d\n", ti->type); + break; + } +} + +static void hns3_nic_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + u16 port_n = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); + break; + default: + break; + } +} + +static int hns3_setup_tc(struct net_device *netdev, u8 tc) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + struct hnae3_knic_private_info *kinfo = &h->kinfo; + unsigned int i; + int ret; + + if (tc > HNAE3_MAX_TC) + return -EINVAL; + + if (kinfo->num_tc == tc) + return 0; + + if (!netdev) + return -EINVAL; + + if (!tc) { + netdev_reset_tc(netdev); + return 0; + } + + /* Set num_tc for netdev */ + ret = netdev_set_num_tc(netdev, tc); + if (ret) + return ret; + + /* Set per TC queues for the VSI */ + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (kinfo->tc_info[i].enable) + netdev_set_tc_queue(netdev, + kinfo->tc_info[i].tc, + kinfo->tc_info[i].tqp_count, + kinfo->tc_info[i].tqp_offset); + } + + return 0; +} + +static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct tc_mqprio_qopt *mqprio = type_data; + + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; + + return hns3_setup_tc(dev, mqprio->num_tc); +} + +static int hns3_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); + + return ret; +} + +static int hns3_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); + + return ret; +} + +static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret = -EIO; + + if (h->ae_algo->ops->set_vf_vlan_filter) + ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, + qos, vlan_proto); + + return ret; +} + +static const struct net_device_ops hns3_nic_netdev_ops = { + .ndo_open = hns3_nic_net_open, + .ndo_stop = hns3_nic_net_stop, + .ndo_start_xmit = hns3_nic_net_xmit, + .ndo_set_mac_address = hns3_nic_net_set_mac_address, + .ndo_set_features = hns3_nic_set_features, + .ndo_get_stats64 = hns3_nic_get_stats64, + .ndo_setup_tc = hns3_nic_setup_tc, + .ndo_set_rx_mode = hns3_nic_set_rx_mode, + .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add, + .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del, + .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, + .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, +}; + +/* hns3_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in hns3_pci_tbl + * + * hns3_probe initializes a PF identified by a pci_dev structure. + * The OS initialization, configuring of the PF private structure, + * and a hardware reset occur. + * + * Returns 0 on success, negative on failure + */ +static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct hnae3_ae_dev *ae_dev; + int ret; + + ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), + GFP_KERNEL); + if (!ae_dev) { + ret = -ENOMEM; + return ret; + } + + ae_dev->pdev = pdev; + ae_dev->dev_type = HNAE3_DEV_KNIC; + pci_set_drvdata(pdev, ae_dev); + + return hnae3_register_ae_dev(ae_dev); +} + +/* hns3_remove - Device removal routine + * @pdev: PCI device information struct + */ +static void hns3_remove(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + hnae3_unregister_ae_dev(ae_dev); + + devm_kfree(&pdev->dev, ae_dev); + + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver hns3_driver = { + .name = hns3_driver_name, + .id_table = hns3_pci_tbl, + .probe = hns3_probe, + .remove = hns3_remove, +}; + +/* set default feature to hns3 */ +static void hns3_set_default_feature(struct net_device *netdev) +{ + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->vlan_features |= + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | + NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; +} + +static int hns3_alloc_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + unsigned int order = hnae_page_order(ring); + struct page *p; + + p = dev_alloc_pages(order); + if (!p) + return -ENOMEM; + + cb->priv = p; + cb->page_offset = 0; + cb->reuse_flag = 0; + cb->buf = page_address(p); + cb->length = hnae_page_size(ring); + cb->type = DESC_TYPE_PAGE; + + memset(cb->buf, 0, cb->length); + + return 0; +} + +static void hns3_free_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + if (cb->type == DESC_TYPE_SKB) + dev_kfree_skb_any((struct sk_buff *)cb->priv); + else if (!HNAE3_IS_TX_RING(ring)) + put_page((struct page *)cb->priv); + memset(cb, 0, sizeof(*cb)); +} + +static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) +{ + cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, + cb->length, ring_to_dma_dir(ring)); + + if (dma_mapping_error(ring_to_dev(ring), cb->dma)) + return -EIO; + + return 0; +} + +static void hns3_unmap_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + if (cb->type == DESC_TYPE_SKB) + dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); + else + dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); +} + +static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) +{ + hns3_unmap_buffer(ring, &ring->desc_cb[i]); + ring->desc[i].addr = 0; +} + +static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) +{ + struct hns3_desc_cb *cb = &ring->desc_cb[i]; + + if (!ring->desc_cb[i].dma) + return; + + hns3_buffer_detach(ring, i); + hns3_free_buffer(ring, cb); +} + +static void hns3_free_buffers(struct hns3_enet_ring *ring) +{ + int i; + + for (i = 0; i < ring->desc_num; i++) + hns3_free_buffer_detach(ring, i); +} + +/* free desc along with its attached buffer */ +static void hns3_free_desc(struct hns3_enet_ring *ring) +{ + hns3_free_buffers(ring); + + dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, + ring->desc_num * sizeof(ring->desc[0]), + DMA_BIDIRECTIONAL); + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; +} + +static int hns3_alloc_desc(struct hns3_enet_ring *ring) +{ + int size = ring->desc_num * sizeof(ring->desc[0]); + + ring->desc = kzalloc(size, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, + size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; + return -ENOMEM; + } + + return 0; +} + +static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + int ret; + + ret = hns3_alloc_buffer(ring, cb); + if (ret) + goto out; + + ret = hns3_map_buffer(ring, cb); + if (ret) + goto out_with_buf; + + return 0; + +out_with_buf: + hns3_free_buffers(ring); +out: + return ret; +} + +static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) +{ + int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); + + if (ret) + return ret; + + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + + return 0; +} + +/* Allocate memory for raw pkg, and map with dma */ +static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) +{ + int i, j, ret; + + for (i = 0; i < ring->desc_num; i++) { + ret = hns3_alloc_buffer_attach(ring, i); + if (ret) + goto out_buffer_fail; + } + + return 0; + +out_buffer_fail: + for (j = i - 1; j >= 0; j--) + hns3_free_buffer_detach(ring, j); + return ret; +} + +/* detach a in-used buffer and replace with a reserved one */ +static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, + struct hns3_desc_cb *res_cb) +{ + hns3_map_buffer(ring, &ring->desc_cb[i]); + ring->desc_cb[i] = *res_cb; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); +} + +static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) +{ + ring->desc_cb[i].reuse_flag = 0; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); +} + +static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, + int *pkts) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + + (*pkts) += (desc_cb->type == DESC_TYPE_SKB); + (*bytes) += desc_cb->length; + /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ + hns3_free_buffer_detach(ring, ring->next_to_clean); + + ring_ptr_move_fw(ring, next_to_clean); +} + +static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) +{ + int u = ring->next_to_use; + int c = ring->next_to_clean; + + if (unlikely(h > ring->desc_num)) + return 0; + + return u > c ? (h > c && h <= u) : (h > c || h <= u); +} + +int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct netdev_queue *dev_queue; + int bytes, pkts; + int head; + + head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); + rmb(); /* Make sure head is ready before touch any data */ + + if (is_ring_empty(ring) || head == ring->next_to_clean) + return 0; /* no data to poll */ + + if (!is_valid_clean_head(ring, head)) { + netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, + ring->next_to_use, ring->next_to_clean); + + u64_stats_update_begin(&ring->syncp); + ring->stats.io_err_cnt++; + u64_stats_update_end(&ring->syncp); + return -EIO; + } + + bytes = 0; + pkts = 0; + while (head != ring->next_to_clean && budget) { + hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); + /* Issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ring->next_to_clean]); + budget--; + } + + ring->tqp_vector->tx_group.total_bytes += bytes; + ring->tqp_vector->tx_group.total_packets += pkts; + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_bytes += bytes; + ring->stats.tx_pkts += pkts; + u64_stats_update_end(&ring->syncp); + + dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); + netdev_tx_completed_queue(dev_queue, pkts, bytes); + + if (unlikely(pkts && netif_carrier_ok(netdev) && + (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (netif_tx_queue_stopped(dev_queue)) { + netif_tx_wake_queue(dev_queue); + ring->stats.restart_queue++; + } + } + + return !!budget; +} + +static int hns3_desc_unused(struct hns3_enet_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + + return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; +} + +static void +hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) +{ + struct hns3_desc_cb *desc_cb; + struct hns3_desc_cb res_cbs; + int i, ret; + + for (i = 0; i < cleand_count; i++) { + desc_cb = &ring->desc_cb[ring->next_to_use]; + if (desc_cb->reuse_flag) { + u64_stats_update_begin(&ring->syncp); + ring->stats.reuse_pg_cnt++; + u64_stats_update_end(&ring->syncp); + + hns3_reuse_buffer(ring, ring->next_to_use); + } else { + ret = hns3_reserve_buffer_map(ring, &res_cbs); + if (ret) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + netdev_err(ring->tqp->handle->kinfo.netdev, + "hnae reserve buffer map failed.\n"); + break; + } + hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); + } + + ring_ptr_move_fw(ring, next_to_use); + } + + wmb(); /* Make all data has been write before submit */ + writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); +} + +/* hns3_nic_get_headlen - determine size of header for LRO/GRO + * @data: pointer to the start of the headers + * @max: total length of section to find headers in + * + * This function is meant to determine the length of headers that will + * be recognized by hardware for LRO, GRO, and RSC offloads. The main + * motivation of doing this is to only perform one pull for IPv4 TCP + * packets so that we can do basic things like calculating the gso_size + * based on the average data per packet. + */ +static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag, + unsigned int max_size) +{ + unsigned char *network; + u8 hlen; + + /* This should never happen, but better safe than sorry */ + if (max_size < ETH_HLEN) + return max_size; + + /* Initialize network frame pointer */ + network = data; + + /* Set first protocol and move network header forward */ + network += ETH_HLEN; + + /* Handle any vlan tag if present */ + if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S) + == HNS3_RX_FLAG_VLAN_PRESENT) { + if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) + return max_size; + + network += VLAN_HLEN; + } + + /* Handle L3 protocols */ + if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) + == HNS3_RX_FLAG_L3ID_IPV4) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct iphdr))) + return max_size; + + /* Access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (network[0] & 0x0F) << 2; + + /* Verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return network - data; + + /* Record next protocol if header is present */ + } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) + == HNS3_RX_FLAG_L3ID_IPV6) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct ipv6hdr))) + return max_size; + + /* Record next protocol */ + hlen = sizeof(struct ipv6hdr); + } else { + return network - data; + } + + /* Relocate pointer to start of L4 header */ + network += hlen; + + /* Finally sort out TCP/UDP */ + if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) + == HNS3_RX_FLAG_L4ID_TCP) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct tcphdr))) + return max_size; + + /* Access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (network[12] & 0xF0) >> 2; + + /* Verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return network - data; + + network += hlen; + } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) + == HNS3_RX_FLAG_L4ID_UDP) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct udphdr))) + return max_size; + + network += sizeof(struct udphdr); + } + + /* If everything has gone correctly network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + if ((typeof(max_size))(network - data) < max_size) + return network - data; + else + return max_size; +} + +static void hns3_nic_reuse_page(struct sk_buff *skb, int i, + struct hns3_enet_ring *ring, int pull_len, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc *desc; + int truesize, size; + int last_offset; + bool twobufs; + + twobufs = ((PAGE_SIZE < 8192) && + hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); + + desc = &ring->desc[ring->next_to_clean]; + size = le16_to_cpu(desc->rx.size); + + if (twobufs) { + truesize = hnae_buf_size(ring); + } else { + truesize = ALIGN(size, L1_CACHE_BYTES); + last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + } + + skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, + size - pull_len, truesize - pull_len); + + /* Avoid re-using remote pages,flag default unreuse */ + if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) + return; + + if (twobufs) { + /* If we are only owner of page we can reuse it */ + if (likely(page_count(desc_cb->priv) == 1)) { + /* Flip page offset to other buffer */ + desc_cb->page_offset ^= truesize; + + desc_cb->reuse_flag = 1; + /* bump ref count on page before it is given*/ + get_page(desc_cb->priv); + } + return; + } + + /* Move offset up to the next cache line */ + desc_cb->page_offset += truesize; + + if (desc_cb->page_offset <= last_offset) { + desc_cb->reuse_flag = 1; + /* Bump ref count on page before it is given*/ + get_page(desc_cb->priv); + } +} + +static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, + struct hns3_desc *desc) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + int l3_type, l4_type; + u32 bd_base_info; + int ol4_type; + u32 l234info; + + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + /* check if hardware has done checksum */ + if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + return; + + if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || + hnae_get_bit(l234info, HNS3_RXD_L4E_B) || + hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || + hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { + netdev_err(netdev, "L3/L4 error pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.l3l4_csum_err++; + u64_stats_update_end(&ring->syncp); + + return; + } + + l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + + ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); + switch (ol4_type) { + case HNS3_OL4_TYPE_MAC_IN_UDP: + case HNS3_OL4_TYPE_NVGRE: + skb->csum_level = 1; + case HNS3_OL4_TYPE_NO_TUN: + /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ + if (l3_type == HNS3_L3_TYPE_IPV4 || + (l3_type == HNS3_L3_TYPE_IPV6 && + (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } +} + +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, + struct sk_buff **out_skb, int *out_bnum) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct hns3_desc_cb *desc_cb; + struct hns3_desc *desc; + struct sk_buff *skb; + unsigned char *va; + u32 bd_base_info; + int pull_len; + u32 l234info; + int length; + int bnum; + + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + + prefetch(desc); + + length = le16_to_cpu(desc->rx.pkt_len); + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + + /* Check valid BD */ + if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + return -EFAULT; + + va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; + + /* Prefetch first cache line of first page + * Idea is to cache few bytes of the header of the packet. Our L1 Cache + * line size is 64B so need to prefetch twice to make it 128B. But in + * actual we can have greater size of caches with 128B Level 1 cache + * lines. In such a case, single fetch would suffice to cache in the + * relevant part of the header. + */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, + HNS3_RX_HEAD_SIZE); + if (unlikely(!skb)) { + netdev_err(netdev, "alloc rx skb fail\n"); + + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + return -ENOMEM; + } + + prefetchw(skb->data); + + bnum = 1; + if (length <= HNS3_RX_HEAD_SIZE) { + memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); + + /* We can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) + desc_cb->reuse_flag = 1; + else /* This page cannot be reused so discard it */ + put_page(desc_cb->priv); + + ring_ptr_move_fw(ring, next_to_clean); + } else { + u64_stats_update_begin(&ring->syncp); + ring->stats.seg_pkt_cnt++; + u64_stats_update_end(&ring->syncp); + + pull_len = hns3_nic_get_headlen(va, l234info, + HNS3_RX_HEAD_SIZE); + memcpy(__skb_put(skb, pull_len), va, + ALIGN(pull_len, sizeof(long))); + + hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + + while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + bnum++; + } + } + + *out_bnum = bnum; + + if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { + netdev_err(netdev, "no valid bd,%016llx,%016llx\n", + ((u64 *)desc)[0], ((u64 *)desc)[1]); + u64_stats_update_begin(&ring->syncp); + ring->stats.non_vld_descs++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EINVAL; + } + + if (unlikely((!desc->rx.pkt_len) || + hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { + netdev_err(netdev, "truncated pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.err_pkt_len++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EFAULT; + } + + if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { + netdev_err(netdev, "L2 error pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.l2_err++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EFAULT; + } + + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_pkts++; + ring->stats.rx_bytes += skb->len; + u64_stats_update_end(&ring->syncp); + + ring->tqp_vector->rx_group.total_bytes += skb->len; + + hns3_rx_checksum(ring, skb, desc); + return 0; +} + +static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget) +{ +#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + int recv_pkts, recv_bds, clean_count, err; + int unused_count = hns3_desc_unused(ring); + struct sk_buff *skb = NULL; + int num, bnum = 0; + + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); + rmb(); /* Make sure num taken effect before the other data is touched */ + + recv_pkts = 0, recv_bds = 0, clean_count = 0; + num -= unused_count; + + while (recv_pkts < budget && recv_bds < num) { + /* Reuse or realloc buffers */ + if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + hns3_nic_alloc_rx_buffers(ring, + clean_count + unused_count); + clean_count = 0; + unused_count = hns3_desc_unused(ring); + } + + /* Poll one pkt */ + err = hns3_handle_rx_bd(ring, &skb, &bnum); + if (unlikely(!skb)) /* This fault cannot be repaired */ + goto out; + + recv_bds += bnum; + clean_count += bnum; + if (unlikely(err)) { /* Do jump the err */ + recv_pkts++; + continue; + } + + /* Do update ip stack process */ + skb->protocol = eth_type_trans(skb, netdev); + (void)napi_gro_receive(&ring->tqp_vector->napi, skb); + + recv_pkts++; + } + +out: + /* Make all data has been write before submit */ + if (clean_count + unused_count > 0) + hns3_nic_alloc_rx_buffers(ring, + clean_count + unused_count); + + return recv_pkts; +} + +static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) +{ +#define HNS3_RX_ULTRA_PACKET_RATE 40000 + enum hns3_flow_level_range new_flow_level; + struct hns3_enet_tqp_vector *tqp_vector; + int packets_per_secs; + int bytes_per_usecs; + u16 new_int_gl; + int usecs; + + if (!ring_group->int_gl) + return false; + + if (ring_group->total_packets == 0) { + ring_group->int_gl = HNS3_INT_GL_50K; + ring_group->flow_level = HNS3_FLOW_LOW; + return true; + } + + /* Simple throttlerate management + * 0-10MB/s lower (50000 ints/s) + * 10-20MB/s middle (20000 ints/s) + * 20-1249MB/s high (18000 ints/s) + * > 40000pps ultra (8000 ints/s) + */ + new_flow_level = ring_group->flow_level; + new_int_gl = ring_group->int_gl; + tqp_vector = ring_group->ring->tqp_vector; + usecs = (ring_group->int_gl << 1); + bytes_per_usecs = ring_group->total_bytes / usecs; + /* 1000000 microseconds */ + packets_per_secs = ring_group->total_packets * 1000000 / usecs; + + switch (new_flow_level) { + case HNS3_FLOW_LOW: + if (bytes_per_usecs > 10) + new_flow_level = HNS3_FLOW_MID; + break; + case HNS3_FLOW_MID: + if (bytes_per_usecs > 20) + new_flow_level = HNS3_FLOW_HIGH; + else if (bytes_per_usecs <= 10) + new_flow_level = HNS3_FLOW_LOW; + break; + case HNS3_FLOW_HIGH: + case HNS3_FLOW_ULTRA: + default: + if (bytes_per_usecs <= 20) + new_flow_level = HNS3_FLOW_MID; + break; + } + + if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) && + (&tqp_vector->rx_group == ring_group)) + new_flow_level = HNS3_FLOW_ULTRA; + + switch (new_flow_level) { + case HNS3_FLOW_LOW: + new_int_gl = HNS3_INT_GL_50K; + break; + case HNS3_FLOW_MID: + new_int_gl = HNS3_INT_GL_20K; + break; + case HNS3_FLOW_HIGH: + new_int_gl = HNS3_INT_GL_18K; + break; + case HNS3_FLOW_ULTRA: + new_int_gl = HNS3_INT_GL_8K; + break; + default: + break; + } + + ring_group->total_bytes = 0; + ring_group->total_packets = 0; + ring_group->flow_level = new_flow_level; + if (new_int_gl != ring_group->int_gl) { + ring_group->int_gl = new_int_gl; + return true; + } + return false; +} + +static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) +{ + u16 rx_int_gl, tx_int_gl; + bool rx, tx; + + rx = hns3_get_new_int_gl(&tqp_vector->rx_group); + tx = hns3_get_new_int_gl(&tqp_vector->tx_group); + rx_int_gl = tqp_vector->rx_group.int_gl; + tx_int_gl = tqp_vector->tx_group.int_gl; + if (rx && tx) { + if (rx_int_gl > tx_int_gl) { + tqp_vector->tx_group.int_gl = rx_int_gl; + tqp_vector->tx_group.flow_level = + tqp_vector->rx_group.flow_level; + hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl); + } else { + tqp_vector->rx_group.int_gl = tx_int_gl; + tqp_vector->rx_group.flow_level = + tqp_vector->tx_group.flow_level; + hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl); + } + } +} + +static int hns3_nic_common_poll(struct napi_struct *napi, int budget) +{ + struct hns3_enet_ring *ring; + int rx_pkt_total = 0; + + struct hns3_enet_tqp_vector *tqp_vector = + container_of(napi, struct hns3_enet_tqp_vector, napi); + bool clean_complete = true; + int rx_budget; + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + hns3_for_each_ring(ring, tqp_vector->tx_group) { + if (!hns3_clean_tx_ring(ring, budget)) + clean_complete = false; + } + + /* make sure rx ring budget not smaller than 1 */ + rx_budget = max(budget / tqp_vector->num_tqps, 1); + + hns3_for_each_ring(ring, tqp_vector->rx_group) { + int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget); + + if (rx_cleaned >= rx_budget) + clean_complete = false; + + rx_pkt_total += rx_cleaned; + } + + tqp_vector->rx_group.total_packets += rx_pkt_total; + + if (!clean_complete) + return budget; + + napi_complete(napi); + hns3_update_new_int_gl(tqp_vector); + hns3_mask_vector_irq(tqp_vector, 1); + + return rx_pkt_total; +} + +static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node *head) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *cur_chain = head; + struct hnae3_ring_chain_node *chain; + struct hns3_enet_ring *tx_ring; + struct hns3_enet_ring *rx_ring; + + tx_ring = tqp_vector->tx_group.ring; + if (tx_ring) { + cur_chain->tqp_index = tx_ring->tqp->tqp_index; + hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + + cur_chain->next = NULL; + + while (tx_ring->next) { + tx_ring = tx_ring->next; + + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), + GFP_KERNEL); + if (!chain) + return -ENOMEM; + + cur_chain->next = chain; + chain->tqp_index = tx_ring->tqp->tqp_index; + hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + + cur_chain = chain; + } + } + + rx_ring = tqp_vector->rx_group.ring; + if (!tx_ring && rx_ring) { + cur_chain->next = NULL; + cur_chain->tqp_index = rx_ring->tqp->tqp_index; + hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + + rx_ring = rx_ring->next; + } + + while (rx_ring) { + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + + cur_chain->next = chain; + chain->tqp_index = rx_ring->tqp->tqp_index; + hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + cur_chain = chain; + + rx_ring = rx_ring->next; + } + + return 0; +} + +static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node *head) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *chain_tmp, *chain; + + chain = head->next; + + while (chain) { + chain_tmp = chain->next; + devm_kfree(&pdev->dev, chain); + chain = chain_tmp; + } +} + +static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, + struct hns3_enet_ring *ring) +{ + ring->next = group->ring; + group->ring = ring; + + group->count++; +} + +static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_vector_info *vector; + struct pci_dev *pdev = h->pdev; + u16 tqp_num = h->kinfo.num_tqps; + u16 vector_num; + int ret = 0; + u16 i; + + /* RSS size, cpu online and vector_num should be the same */ + /* Should consider 2p/4p later */ + vector_num = min_t(u16, num_online_cpus(), tqp_num); + vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), + GFP_KERNEL); + if (!vector) + return -ENOMEM; + + vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); + + priv->vector_num = vector_num; + priv->tqp_vector = (struct hns3_enet_tqp_vector *) + devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), + GFP_KERNEL); + if (!priv->tqp_vector) + return -ENOMEM; + + for (i = 0; i < tqp_num; i++) { + u16 vector_i = i % vector_num; + + tqp_vector = &priv->tqp_vector[vector_i]; + + hns3_add_ring_to_group(&tqp_vector->tx_group, + priv->ring_data[i].ring); + + hns3_add_ring_to_group(&tqp_vector->rx_group, + priv->ring_data[i + tqp_num].ring); + + tqp_vector->idx = vector_i; + tqp_vector->mask_addr = vector[vector_i].io_addr; + tqp_vector->vector_irq = vector[vector_i].vector; + tqp_vector->num_tqps++; + + priv->ring_data[i].ring->tqp_vector = tqp_vector; + priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; + } + + for (i = 0; i < vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + + tqp_vector->rx_group.total_bytes = 0; + tqp_vector->rx_group.total_packets = 0; + tqp_vector->tx_group.total_bytes = 0; + tqp_vector->tx_group.total_packets = 0; + hns3_vector_gl_rl_init(tqp_vector); + tqp_vector->handle = h; + + ret = hns3_get_vector_ring_chain(tqp_vector, + &vector_ring_chain); + if (ret) + goto out; + + ret = h->ae_algo->ops->map_ring_to_vector(h, + tqp_vector->vector_irq, &vector_ring_chain); + if (ret) + goto out; + + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + + netif_napi_add(priv->netdev, &tqp_vector->napi, + hns3_nic_common_poll, NAPI_POLL_WEIGHT); + } + +out: + devm_kfree(&pdev->dev, vector); + return ret; +} + +static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct pci_dev *pdev = h->pdev; + int i, ret; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + + ret = hns3_get_vector_ring_chain(tqp_vector, + &vector_ring_chain); + if (ret) + return ret; + + ret = h->ae_algo->ops->unmap_ring_from_vector(h, + tqp_vector->vector_irq, &vector_ring_chain); + if (ret) + return ret; + + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + + if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { + (void)irq_set_affinity_hint( + priv->tqp_vector[i].vector_irq, + NULL); + devm_free_irq(&pdev->dev, + priv->tqp_vector[i].vector_irq, + &priv->tqp_vector[i]); + } + + priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; + + netif_napi_del(&priv->tqp_vector[i].napi); + } + + devm_kfree(&pdev->dev, priv->tqp_vector); + + return 0; +} + +static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, + int ring_type) +{ + struct hns3_nic_ring_data *ring_data = priv->ring_data; + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct pci_dev *pdev = priv->ae_handle->pdev; + struct hns3_enet_ring *ring; + + ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); + if (!ring) + return -ENOMEM; + + if (ring_type == HNAE3_RING_TYPE_TX) { + ring_data[q->tqp_index].ring = ring; + ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; + } else { + ring_data[q->tqp_index + queue_num].ring = ring; + ring->io_base = q->io_base; + } + + hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); + + ring_data[q->tqp_index].queue_index = q->tqp_index; + + ring->tqp = q; + ring->desc = NULL; + ring->desc_cb = NULL; + ring->dev = priv->dev; + ring->desc_dma_addr = 0; + ring->buf_size = q->buf_size; + ring->desc_num = q->desc_num; + ring->next_to_use = 0; + ring->next_to_clean = 0; + + return 0; +} + +static int hns3_queue_to_ring(struct hnae3_queue *tqp, + struct hns3_nic_priv *priv) +{ + int ret; + + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); + if (ret) + return ret; + + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); + if (ret) + return ret; + + return 0; +} + +static int hns3_get_ring_config(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct pci_dev *pdev = h->pdev; + int i, ret; + + priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps * + sizeof(*priv->ring_data) * 2, + GFP_KERNEL); + if (!priv->ring_data) + return -ENOMEM; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); + if (ret) + goto err; + } + + return 0; +err: + devm_kfree(&pdev->dev, priv->ring_data); + return ret; +} + +static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) +{ + int ret; + + if (ring->desc_num <= 0 || ring->buf_size <= 0) + return -EINVAL; + + ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), + GFP_KERNEL); + if (!ring->desc_cb) { + ret = -ENOMEM; + goto out; + } + + ret = hns3_alloc_desc(ring); + if (ret) + goto out_with_desc_cb; + + if (!HNAE3_IS_TX_RING(ring)) { + ret = hns3_alloc_ring_buffers(ring); + if (ret) + goto out_with_desc; + } + + return 0; + +out_with_desc: + hns3_free_desc(ring); +out_with_desc_cb: + kfree(ring->desc_cb); + ring->desc_cb = NULL; +out: + return ret; +} + +static void hns3_fini_ring(struct hns3_enet_ring *ring) +{ + hns3_free_desc(ring); + kfree(ring->desc_cb); + ring->desc_cb = NULL; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +int hns3_buf_size2type(u32 buf_size) +{ + int bd_size_type; + + switch (buf_size) { + case 512: + bd_size_type = HNS3_BD_SIZE_512_TYPE; + break; + case 1024: + bd_size_type = HNS3_BD_SIZE_1024_TYPE; + break; + case 2048: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + break; + case 4096: + bd_size_type = HNS3_BD_SIZE_4096_TYPE; + break; + default: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + } + + return bd_size_type; +} + +static void hns3_init_ring_hw(struct hns3_enet_ring *ring) +{ + dma_addr_t dma = ring->desc_dma_addr; + struct hnae3_queue *q = ring->tqp; + + if (!HNAE3_IS_TX_RING(ring)) { + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, + (u32)dma); + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, + hns3_buf_size2type(ring->buf_size)); + hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + + } else { + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, + (u32)dma); + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG, + hns3_buf_size2type(ring->buf_size)); + hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + } +} + +static int hns3_init_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int ring_num = h->kinfo.num_tqps * 2; + int i, j; + int ret; + + for (i = 0; i < ring_num; i++) { + ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); + if (ret) { + dev_err(priv->dev, + "Alloc ring memory fail! ret=%d\n", ret); + goto out_when_alloc_ring_memory; + } + + hns3_init_ring_hw(priv->ring_data[i].ring); + + u64_stats_init(&priv->ring_data[i].ring->syncp); + } + + return 0; + +out_when_alloc_ring_memory: + for (j = i - 1; j >= 0; j--) + hns3_fini_ring(priv->ring_data[i].ring); + + return -ENOMEM; +} + +static int hns3_uninit_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (h->ae_algo->ops->reset_queue) + h->ae_algo->ops->reset_queue(h, i); + + hns3_fini_ring(priv->ring_data[i].ring); + hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); + } + + return 0; +} + +/* Set mac addr if it is configured. or leave it to the AE driver */ +static void hns3_init_mac_addr(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u8 mac_addr_temp[ETH_ALEN]; + + if (h->ae_algo->ops->get_mac_addr) { + h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); + ether_addr_copy(netdev->dev_addr, mac_addr_temp); + } + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(netdev->dev_addr)) { + eth_hw_addr_random(netdev); + dev_warn(priv->dev, "using random MAC address %pM\n", + netdev->dev_addr); + /* Also copy this new MAC address into hdev */ + if (h->ae_algo->ops->set_mac_addr) + h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); + } +} + +static void hns3_nic_set_priv_ops(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if ((netdev->features & NETIF_F_TSO) || + (netdev->features & NETIF_F_TSO6)) { + priv->ops.fill_desc = hns3_fill_desc_tso; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; + } else { + priv->ops.fill_desc = hns3_fill_desc; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + } +} + +static int hns3_client_init(struct hnae3_handle *handle) +{ + struct pci_dev *pdev = handle->pdev; + struct hns3_nic_priv *priv; + struct net_device *netdev; + int ret; + + netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), + handle->kinfo.num_tqps); + if (!netdev) + return -ENOMEM; + + priv = netdev_priv(netdev); + priv->dev = &pdev->dev; + priv->netdev = netdev; + priv->ae_handle = handle; + + handle->kinfo.netdev = netdev; + handle->priv = (void *)priv; + + hns3_init_mac_addr(netdev); + + hns3_set_default_feature(netdev); + + netdev->watchdog_timeo = HNS3_TX_TIMEOUT; + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->netdev_ops = &hns3_nic_netdev_ops; + SET_NETDEV_DEV(netdev, &pdev->dev); + hns3_ethtool_set_ops(netdev); + hns3_nic_set_priv_ops(netdev); + + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = hns3_get_ring_config(priv); + if (ret) { + ret = -ENOMEM; + goto out_get_ring_cfg; + } + + ret = hns3_nic_init_vector_data(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_vector_data; + } + + ret = hns3_init_all_ring(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_ring_data; + } + + ret = register_netdev(netdev); + if (ret) { + dev_err(priv->dev, "probe register netdev fail!\n"); + goto out_reg_netdev_fail; + } + + return ret; + +out_reg_netdev_fail: +out_init_ring_data: + (void)hns3_nic_uninit_vector_data(priv); + priv->ring_data = NULL; +out_init_vector_data: +out_get_ring_cfg: + priv->ae_handle = NULL; + free_netdev(netdev); + return ret; +} + +static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + if (netdev->reg_state != NETREG_UNINITIALIZED) + unregister_netdev(netdev); + + ret = hns3_nic_uninit_vector_data(priv); + if (ret) + netdev_err(netdev, "uninit vector error\n"); + + ret = hns3_uninit_all_ring(priv); + if (ret) + netdev_err(netdev, "uninit ring error\n"); + + priv->ring_data = NULL; + + free_netdev(netdev); +} + +static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) +{ + struct net_device *netdev = handle->kinfo.netdev; + + if (!netdev) + return; + + if (linkup) { + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + netdev_info(netdev, "link up\n"); + } else { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + netdev_info(netdev, "link down\n"); + } +} + +const struct hnae3_client_ops client_ops = { + .init_instance = hns3_client_init, + .uninit_instance = hns3_client_uninit, + .link_status_change = hns3_link_status_change, +}; + +/* hns3_init_module - Driver registration routine + * hns3_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init hns3_init_module(void) +{ + int ret; + + pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); + pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); + + client.type = HNAE3_CLIENT_KNIC; + snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", + hns3_driver_name); + + client.ops = &client_ops; + + ret = hnae3_register_client(&client); + if (ret) + return ret; + + ret = pci_register_driver(&hns3_driver); + if (ret) + hnae3_unregister_client(&client); + + return ret; +} +module_init(hns3_init_module); + +/* hns3_exit_module - Driver exit cleanup routine + * hns3_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit hns3_exit_module(void) +{ + pci_unregister_driver(&hns3_driver); + hnae3_unregister_client(&client); +} +module_exit(hns3_exit_module); + +MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("pci:hns-nic"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h new file mode 100644 index 000000000000..a6e8f15a4669 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h @@ -0,0 +1,592 @@ +/* + * Copyright (c) 2016 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HNS3_ENET_H +#define __HNS3_ENET_H + +#include "hnae3.h" + +extern const char hns3_driver_version[]; + +enum hns3_nic_state { + HNS3_NIC_STATE_TESTING, + HNS3_NIC_STATE_RESETTING, + HNS3_NIC_STATE_REINITING, + HNS3_NIC_STATE_DOWN, + HNS3_NIC_STATE_DISABLED, + HNS3_NIC_STATE_REMOVING, + HNS3_NIC_STATE_SERVICE_INITED, + HNS3_NIC_STATE_SERVICE_SCHED, + HNS3_NIC_STATE2_RESET_REQUESTED, + HNS3_NIC_STATE_MAX +}; + +#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000 +#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004 +#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008 +#define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C +#define HNS3_RING_RX_RING_TAIL_REG 0x00018 +#define HNS3_RING_RX_RING_HEAD_REG 0x0001C +#define HNS3_RING_RX_RING_FBDNUM_REG 0x00020 +#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C + +#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040 +#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044 +#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048 +#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C +#define HNS3_RING_TX_RING_TAIL_REG 0x00058 +#define HNS3_RING_TX_RING_HEAD_REG 0x0005C +#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 +#define HNS3_RING_TX_RING_OFFSET_REG 0x00064 +#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C + +#define HNS3_RING_PREFETCH_EN_REG 0x0007C +#define HNS3_RING_CFG_VF_NUM_REG 0x00080 +#define HNS3_RING_ASID_REG 0x0008C +#define HNS3_RING_RX_VM_REG 0x00090 +#define HNS3_RING_T0_BE_RST 0x00094 +#define HNS3_RING_COULD_BE_RST 0x00098 +#define HNS3_RING_WRR_WEIGHT_REG 0x0009c + +#define HNS3_RING_INTMSK_RXWL_REG 0x000A0 +#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4 +#define HNS3_RX_RING_INT_STS_REG 0x000A8 +#define HNS3_RING_INTMSK_TXWL_REG 0x000AC +#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0 +#define HNS3_TX_RING_INT_STS_REG 0x000B4 +#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8 +#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC +#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4 +#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8 + +#define HNS3_RING_MB_CTRL_REG 0x00100 +#define HNS3_RING_MB_DATA_BASE_REG 0x00200 + +#define HNS3_TX_REG_OFFSET 0x40 + +#define HNS3_RX_HEAD_SIZE 256 + +#define HNS3_TX_TIMEOUT (5 * HZ) +#define HNS3_RING_NAME_LEN 16 +#define HNS3_BUFFER_SIZE_2048 2048 +#define HNS3_RING_MAX_PENDING 32768 + +#define HNS3_BD_SIZE_512_TYPE 0 +#define HNS3_BD_SIZE_1024_TYPE 1 +#define HNS3_BD_SIZE_2048_TYPE 2 +#define HNS3_BD_SIZE_4096_TYPE 3 + +#define HNS3_RX_FLAG_VLAN_PRESENT 0x1 +#define HNS3_RX_FLAG_L3ID_IPV4 0x0 +#define HNS3_RX_FLAG_L3ID_IPV6 0x1 +#define HNS3_RX_FLAG_L4ID_UDP 0x0 +#define HNS3_RX_FLAG_L4ID_TCP 0x1 + +#define HNS3_RXD_DMAC_S 0 +#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) +#define HNS3_RXD_VLAN_S 2 +#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) +#define HNS3_RXD_L3ID_S 4 +#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) +#define HNS3_RXD_L4ID_S 8 +#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) +#define HNS3_RXD_FRAG_B 12 +#define HNS3_RXD_L2E_B 16 +#define HNS3_RXD_L3E_B 17 +#define HNS3_RXD_L4E_B 18 +#define HNS3_RXD_TRUNCAT_B 19 +#define HNS3_RXD_HOI_B 20 +#define HNS3_RXD_DOI_B 21 +#define HNS3_RXD_OL3E_B 22 +#define HNS3_RXD_OL4E_B 23 + +#define HNS3_RXD_ODMAC_S 0 +#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) +#define HNS3_RXD_OVLAN_S 2 +#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) +#define HNS3_RXD_OL3ID_S 4 +#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) +#define HNS3_RXD_OL4ID_S 8 +#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) +#define HNS3_RXD_FBHI_S 12 +#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) +#define HNS3_RXD_FBLI_S 14 +#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) + +#define HNS3_RXD_BDTYPE_S 0 +#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) +#define HNS3_RXD_VLD_B 4 +#define HNS3_RXD_UDP0_B 5 +#define HNS3_RXD_EXTEND_B 7 +#define HNS3_RXD_FE_B 8 +#define HNS3_RXD_LUM_B 9 +#define HNS3_RXD_CRCP_B 10 +#define HNS3_RXD_L3L4P_B 11 +#define HNS3_RXD_TSIND_S 12 +#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) +#define HNS3_RXD_LKBK_B 15 +#define HNS3_RXD_HDL_S 16 +#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S) +#define HNS3_RXD_HSIND_B 31 + +#define HNS3_TXD_L3T_S 0 +#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) +#define HNS3_TXD_L4T_S 2 +#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) +#define HNS3_TXD_L3CS_B 4 +#define HNS3_TXD_L4CS_B 5 +#define HNS3_TXD_VLAN_B 6 +#define HNS3_TXD_TSO_B 7 + +#define HNS3_TXD_L2LEN_S 8 +#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) +#define HNS3_TXD_L3LEN_S 16 +#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) +#define HNS3_TXD_L4LEN_S 24 +#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S) + +#define HNS3_TXD_OL3T_S 0 +#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) +#define HNS3_TXD_OVLAN_B 2 +#define HNS3_TXD_MACSEC_B 3 +#define HNS3_TXD_TUNTYPE_S 4 +#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) + +#define HNS3_TXD_BDTYPE_S 0 +#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) +#define HNS3_TXD_FE_B 4 +#define HNS3_TXD_SC_S 5 +#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) +#define HNS3_TXD_EXTEND_B 7 +#define HNS3_TXD_VLD_B 8 +#define HNS3_TXD_RI_B 9 +#define HNS3_TXD_RA_B 10 +#define HNS3_TXD_TSYN_B 11 +#define HNS3_TXD_DECTTL_S 12 +#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) + +#define HNS3_TXD_MSS_S 0 +#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) + +#define HNS3_VECTOR_TX_IRQ BIT_ULL(0) +#define HNS3_VECTOR_RX_IRQ BIT_ULL(1) + +#define HNS3_VECTOR_NOT_INITED 0 +#define HNS3_VECTOR_INITED 1 + +#define HNS3_MAX_BD_SIZE 65535 +#define HNS3_MAX_BD_PER_FRAG 8 +#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS + +#define HNS3_VECTOR_GL0_OFFSET 0x100 +#define HNS3_VECTOR_GL1_OFFSET 0x200 +#define HNS3_VECTOR_GL2_OFFSET 0x300 +#define HNS3_VECTOR_RL_OFFSET 0x900 +#define HNS3_VECTOR_RL_EN_B 6 + +enum hns3_pkt_l3t_type { + HNS3_L3T_NONE, + HNS3_L3T_IPV6, + HNS3_L3T_IPV4, + HNS3_L3T_RESERVED +}; + +enum hns3_pkt_l4t_type { + HNS3_L4T_UNKNOWN, + HNS3_L4T_TCP, + HNS3_L4T_UDP, + HNS3_L4T_SCTP +}; + +enum hns3_pkt_ol3t_type { + HNS3_OL3T_NONE, + HNS3_OL3T_IPV6, + HNS3_OL3T_IPV4_NO_CSUM, + HNS3_OL3T_IPV4_CSUM +}; + +enum hns3_pkt_tun_type { + HNS3_TUN_NONE, + HNS3_TUN_MAC_IN_UDP, + HNS3_TUN_NVGRE, + HNS3_TUN_OTHER +}; + +/* hardware spec ring buffer format */ +struct __packed hns3_desc { + __le64 addr; + union { + struct { + __le16 vlan_tag; + __le16 send_size; + union { + __le32 type_cs_vlan_tso_len; + struct { + __u8 type_cs_vlan_tso; + __u8 l2_len; + __u8 l3_len; + __u8 l4_len; + }; + }; + __le16 outer_vlan_tag; + __le16 tv; + + union { + __le32 ol_type_vlan_len_msec; + struct { + __u8 ol_type_vlan_msec; + __u8 ol2_len; + __u8 ol3_len; + __u8 ol4_len; + }; + }; + + __le32 paylen; + __le16 bdtp_fe_sc_vld_ra_ri; + __le16 mss; + } tx; + + struct { + __le32 l234_info; + __le16 pkt_len; + __le16 size; + + __le32 rss_hash; + __le16 fd_id; + __le16 vlan_tag; + + union { + __le32 ol_info; + struct { + __le16 o_dm_vlan_id_fb; + __le16 ot_vlan_tag; + }; + }; + + __le32 bd_base_info; + } rx; + }; +}; + +struct hns3_desc_cb { + dma_addr_t dma; /* dma address of this desc */ + void *buf; /* cpu addr for a desc */ + + /* priv data for the desc, e.g. skb when use with ip stack*/ + void *priv; + u16 page_offset; + u16 reuse_flag; + + u16 length; /* length of the buffer */ + + /* desc type, used by the ring user to mark the type of the priv data */ + u16 type; +}; + +enum hns3_pkt_l3type { + HNS3_L3_TYPE_IPV4, + HNS3_L3_TYPE_IPV6, + HNS3_L3_TYPE_ARP, + HNS3_L3_TYPE_RARP, + HNS3_L3_TYPE_IPV4_OPT, + HNS3_L3_TYPE_IPV6_EXT, + HNS3_L3_TYPE_LLDP, + HNS3_L3_TYPE_BPDU, + HNS3_L3_TYPE_MAC_PAUSE, + HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ + + /* reserved for 0xA~0xB*/ + + HNS3_L3_TYPE_CNM = 0xc, + + /* reserved for 0xD~0xE*/ + + HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_l4type { + HNS3_L4_TYPE_UDP, + HNS3_L4_TYPE_TCP, + HNS3_L4_TYPE_GRE, + HNS3_L4_TYPE_SCTP, + HNS3_L4_TYPE_IGMP, + HNS3_L4_TYPE_ICMP, + + /* reserved for 0x6~0xE */ + + HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol3type { + HNS3_OL3_TYPE_IPV4 = 0, + HNS3_OL3_TYPE_IPV6, + /* reserved for 0x2~0x3 */ + HNS3_OL3_TYPE_IPV4_OPT = 4, + HNS3_OL3_TYPE_IPV6_EXT, + + /* reserved for 0x6~0xE*/ + + HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol4type { + HNS3_OL4_TYPE_NO_TUN, + HNS3_OL4_TYPE_MAC_IN_UDP, + HNS3_OL4_TYPE_NVGRE, + HNS3_OL4_TYPE_UNKNOWN +}; + +struct ring_stats { + u64 io_err_cnt; + u64 sw_err_cnt; + u64 seg_pkt_cnt; + union { + struct { + u64 tx_pkts; + u64 tx_bytes; + u64 tx_err_cnt; + u64 restart_queue; + u64 tx_busy; + }; + struct { + u64 rx_pkts; + u64 rx_bytes; + u64 rx_err_cnt; + u64 reuse_pg_cnt; + u64 err_pkt_len; + u64 non_vld_descs; + u64 err_bd_num; + u64 l2_err; + u64 l3l4_csum_err; + }; + }; +}; + +struct hns3_enet_ring { + u8 __iomem *io_base; /* base io address for the ring */ + struct hns3_desc *desc; /* dma map address space */ + struct hns3_desc_cb *desc_cb; + struct hns3_enet_ring *next; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_queue *tqp; + char ring_name[HNS3_RING_NAME_LEN]; + struct device *dev; /* will be used for DMA mapping of descriptors */ + + /* statistic */ + struct ring_stats stats; + struct u64_stats_sync syncp; + + dma_addr_t desc_dma_addr; + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 desc_num; /* total number of desc */ + u16 max_desc_num_per_pkt; + u16 max_raw_data_sz_per_desc; + u16 max_pkt_size; + int next_to_use; /* idx of next spare desc */ + + /* idx of lastest sent desc, the ring is empty when equal to + * next_to_use + */ + int next_to_clean; + + u32 flag; /* ring attribute */ + int irq_init_flag; + + int numa_node; + cpumask_t affinity_mask; +}; + +struct hns_queue; + +struct hns3_nic_ring_data { + struct hns3_enet_ring *ring; + struct napi_struct napi; + int queue_index; + int (*poll_one)(struct hns3_nic_ring_data *, int, void *); + void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *); + void (*fini_process)(struct hns3_nic_ring_data *); +}; + +struct hns3_nic_ops { + int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type); + int (*maybe_stop_tx)(struct sk_buff **out_skb, + int *bnum, struct hns3_enet_ring *ring); + void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); +}; + +enum hns3_flow_level_range { + HNS3_FLOW_LOW = 0, + HNS3_FLOW_MID = 1, + HNS3_FLOW_HIGH = 2, + HNS3_FLOW_ULTRA = 3, +}; + +enum hns3_link_mode_bits { + HNS3_LM_FIBRE_BIT = BIT(0), + HNS3_LM_AUTONEG_BIT = BIT(1), + HNS3_LM_TP_BIT = BIT(2), + HNS3_LM_PAUSE_BIT = BIT(3), + HNS3_LM_BACKPLANE_BIT = BIT(4), + HNS3_LM_10BASET_HALF_BIT = BIT(5), + HNS3_LM_10BASET_FULL_BIT = BIT(6), + HNS3_LM_100BASET_HALF_BIT = BIT(7), + HNS3_LM_100BASET_FULL_BIT = BIT(8), + HNS3_LM_1000BASET_FULL_BIT = BIT(9), + HNS3_LM_10000BASEKR_FULL_BIT = BIT(10), + HNS3_LM_25000BASEKR_FULL_BIT = BIT(11), + HNS3_LM_40000BASELR4_FULL_BIT = BIT(12), + HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13), + HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14), + HNS3_LM_COUNT = 15 +}; + +#define HNS3_INT_GL_50K 0x000A +#define HNS3_INT_GL_20K 0x0019 +#define HNS3_INT_GL_18K 0x001B +#define HNS3_INT_GL_8K 0x003E + +struct hns3_enet_ring_group { + /* array of pointers to rings */ + struct hns3_enet_ring *ring; + u64 total_bytes; /* total bytes processed this group */ + u64 total_packets; /* total packets processed this group */ + u16 count; + enum hns3_flow_level_range flow_level; + u16 int_gl; +}; + +struct hns3_enet_tqp_vector { + struct hnae3_handle *handle; + u8 __iomem *mask_addr; + int vector_irq; + int irq_init_flag; + + u16 idx; /* index in the TQP vector array per handle. */ + + struct napi_struct napi; + + struct hns3_enet_ring_group rx_group; + struct hns3_enet_ring_group tx_group; + + u16 num_tqps; /* total number of tqps in TQP vector */ + + cpumask_t affinity_mask; + char name[HNAE3_INT_NAME_LEN]; + + /* when 0 should adjust interrupt coalesce parameter */ + u8 int_adapt_down; +} ____cacheline_internodealigned_in_smp; + +enum hns3_udp_tnl_type { + HNS3_UDP_TNL_VXLAN, + HNS3_UDP_TNL_GENEVE, + HNS3_UDP_TNL_MAX, +}; + +struct hns3_udp_tunnel { + u16 dst_port; + int used; +}; + +struct hns3_nic_priv { + struct hnae3_handle *ae_handle; + u32 enet_ver; + u32 port_id; + struct net_device *netdev; + struct device *dev; + struct hns3_nic_ops ops; + + /** + * the cb for nic to manage the ring buffer, the first half of the + * array is for tx_ring and vice versa for the second half + */ + struct hns3_nic_ring_data *ring_data; + struct hns3_enet_tqp_vector *tqp_vector; + u16 vector_num; + + /* The most recently read link state */ + int link; + u64 tx_timeout_count; + + unsigned long state; + + struct timer_list service_timer; + + struct work_struct service_task; + + struct notifier_block notifier_block; + /* Vxlan/Geneve information */ + struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; +}; + +union l3_hdr_info { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union l4_hdr_info { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +/* the distance between [begin, end) in a ring buffer + * note: there is a unuse slot between the begin and the end + */ +static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end) +{ + return (end - begin + ring->desc_num) % ring->desc_num; +} + +static inline int ring_space(struct hns3_enet_ring *ring) +{ + return ring->desc_num - + ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; +} + +static inline int is_ring_empty(struct hns3_enet_ring *ring) +{ + return ring->next_to_use == ring->next_to_clean; +} + +static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + writel(value, reg_addr + reg); +} + +#define hns3_write_dev(a, reg, value) \ + hns3_write_reg((a)->io_base, (reg), (value)) + +#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ + (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) + +#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) + +#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ + DMA_TO_DEVICE : DMA_FROM_DEVICE) + +#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) + +#define hnae_buf_size(_ring) ((_ring)->buf_size) +#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) +#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) + +/* iterator for handling rings in ring group */ +#define hns3_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +void hns3_ethtool_set_ops(struct net_device *netdev); + +int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c new file mode 100644 index 000000000000..0ad65e47c77e --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/etherdevice.h> +#include <linux/string.h> + +#include "hns3_enet.h" + +struct hns3_stats { + char stats_string[ETH_GSTRING_LEN]; + int stats_size; + int stats_offset; +}; + +/* tqp related stats */ +#define HNS3_TQP_STAT(_string, _member) { \ + .stats_string = _string, \ + .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ + .stats_offset = offsetof(struct hns3_enet_ring, stats), \ +} \ + +static const struct hns3_stats hns3_txq_stats[] = { + /* Tx per-queue statistics */ + HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt), + HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("tx_pkts", tx_pkts), + HNS3_TQP_STAT("tx_bytes", tx_bytes), + HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt), + HNS3_TQP_STAT("tx_restart_queue", restart_queue), + HNS3_TQP_STAT("tx_busy", tx_busy), +}; + +#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) + +static const struct hns3_stats hns3_rxq_stats[] = { + /* Rx per-queue statistics */ + HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt), + HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("rx_pkts", rx_pkts), + HNS3_TQP_STAT("rx_bytes", rx_bytes), + HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt), + HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt), + HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len), + HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs), + HNS3_TQP_STAT("rx_err_bd_num", err_bd_num), + HNS3_TQP_STAT("rx_l2_err", l2_err), + HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err), +}; + +#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) + +#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) + +struct hns3_link_mode_mapping { + u32 hns3_link_mode; + u32 ethtool_link_mode; +}; + +static const struct hns3_link_mode_mapping hns3_lm_map[] = { + {HNS3_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, + {HNS3_LM_AUTONEG_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, + {HNS3_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, + {HNS3_LM_PAUSE_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, + {HNS3_LM_BACKPLANE_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, + {HNS3_LM_10BASET_HALF_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT}, + {HNS3_LM_10BASET_FULL_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT}, + {HNS3_LM_100BASET_HALF_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT}, + {HNS3_LM_100BASET_FULL_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT}, + {HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, +}; + +static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, + bool is_advertised) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hns3_lm_map); i++) { + if (!(caps & hns3_lm_map[i].hns3_link_mode)) + continue; + + if (is_advertised) { + ethtool_link_ksettings_zero_link_mode(cmd, + advertising); + __set_bit(hns3_lm_map[i].ethtool_link_mode, + cmd->link_modes.advertising); + } else { + ethtool_link_ksettings_zero_link_mode(cmd, + supported); + __set_bit(hns3_lm_map[i].ethtool_link_mode, + cmd->link_modes.supported); + } + } +} + +static int hns3_get_sset_count(struct net_device *netdev, int stringset) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + + if (!ops->get_sset_count) + return -EOPNOTSUPP; + + switch (stringset) { + case ETH_SS_STATS: + return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + + ops->get_sset_count(h, stringset)); + + case ETH_SS_TEST: + return ops->get_sset_count(h, stringset); + } + + return 0; +} + +static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, + u32 stat_count, u32 num_tqps) +{ +#define MAX_PREFIX_SIZE (8 + 4) + u32 size_left; + u32 i, j; + u32 n1; + + for (i = 0; i < num_tqps; i++) { + for (j = 0; j < stat_count; j++) { + data[ETH_GSTRING_LEN - 1] = '\0'; + + /* first, prepend the prefix string */ + n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i); + n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); + size_left = (ETH_GSTRING_LEN - 1) - n1; + + /* now, concatenate the stats string to it */ + strncat(data, stats[j].stats_string, size_left); + data += ETH_GSTRING_LEN; + } + } + + return data; +} + +static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + + /* get strings for Tx */ + data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, + kinfo->num_tqps); + + /* get strings for Rx */ + data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, + kinfo->num_tqps); + + return data; +} + +static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + char *buff = (char *)data; + + if (!ops->get_strings) + return; + + switch (stringset) { + case ETH_SS_STATS: + buff = hns3_get_strings_tqps(h, buff); + h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); + break; + case ETH_SS_TEST: + ops->get_strings(h, stringset, data); + break; + } +} + +static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) +{ + struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_enet_ring *ring; + u8 *stat; + u32 i; + + /* get stats for Tx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = nic_priv->ring_data[i].ring; + for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { + stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; + *data++ = *(u64 *)stat; + } + } + + /* get stats for Rx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; + for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { + stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; + *data++ = *(u64 *)stat; + } + } + + return data; +} + +/* hns3_get_stats - get detail statistics. + * @netdev: net device + * @stats: statistics info. + * @data: statistics data. + */ +void hns3_get_stats(struct net_device *netdev, struct ethtool_stats *stats, + u64 *data) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u64 *p = data; + + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { + netdev_err(netdev, "could not get any statistics\n"); + return; + } + + h->ae_algo->ops->update_stats(h, &netdev->stats); + + /* get per-queue stats */ + p = hns3_get_stats_tqps(h, p); + + /* get MAC & other misc hardware stats */ + h->ae_algo->ops->get_stats(h, p); +} + +static void hns3_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + strncpy(drvinfo->version, hns3_driver_version, + sizeof(drvinfo->version)); + drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; + + strncpy(drvinfo->driver, h->pdev->driver->name, + sizeof(drvinfo->driver)); + drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; + + strncpy(drvinfo->bus_info, pci_name(h->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", + priv->ae_handle->ae_algo->ops->get_fw_version(h)); +} + +static u32 hns3_get_link(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h; + + h = priv->ae_handle; + + if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) + return h->ae_algo->ops->get_status(h); + else + return 0; +} + +static void hns3_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + int queue_num = priv->ae_handle->kinfo.num_tqps; + + param->tx_max_pending = HNS3_RING_MAX_PENDING; + param->rx_max_pending = HNS3_RING_MAX_PENDING; + + param->tx_pending = priv->ring_data[0].ring->desc_num; + param->rx_pending = priv->ring_data[queue_num].ring->desc_num; +} + +static void hns3_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) + h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, + ¶m->rx_pause, ¶m->tx_pause); +} + +static int hns3_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u32 supported_caps; + u32 advertised_caps; + u8 media_type; + u8 link_stat; + u8 auto_neg; + u8 duplex; + u32 speed; + + if (!h->ae_algo || !h->ae_algo->ops) + return -EOPNOTSUPP; + + /* 1.auto_neg&speed&duplex from cmd */ + if (h->ae_algo->ops->get_ksettings_an_result) { + h->ae_algo->ops->get_ksettings_an_result(h, &auto_neg, + &speed, &duplex); + cmd->base.autoneg = auto_neg; + cmd->base.speed = speed; + cmd->base.duplex = duplex; + + link_stat = hns3_get_link(netdev); + if (!link_stat) { + cmd->base.speed = (u32)SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + } + + /* 2.media_type get from bios parameter block */ + if (h->ae_algo->ops->get_media_type) + h->ae_algo->ops->get_media_type(h, &media_type); + + switch (media_type) { + case HNAE3_MEDIA_TYPE_FIBER: + cmd->base.port = PORT_FIBRE; + supported_caps = HNS3_LM_FIBRE_BIT | HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | HNS3_LM_1000BASET_FULL_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_COPPER: + cmd->base.port = PORT_TP; + supported_caps = HNS3_LM_TP_BIT | HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | HNS3_LM_10BASET_HALF_BIT; + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_BACKPLANE: + cmd->base.port = PORT_NONE; + supported_caps = HNS3_LM_BACKPLANE_BIT | HNS3_LM_PAUSE_BIT | + HNS3_LM_AUTONEG_BIT | HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | HNS3_LM_10BASET_HALF_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_UNKNOWN: + default: + cmd->base.port = PORT_OTHER; + supported_caps = 0; + advertised_caps = 0; + break; + } + + /* now, map driver link modes to ethtool link modes */ + hns3_driv_to_eth_caps(supported_caps, cmd, false); + hns3_driv_to_eth_caps(advertised_caps, cmd, true); + + /* 3.mdix_ctrl&mdix get from phy reg */ + if (h->ae_algo->ops->get_mdix_mode) + h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, + &cmd->base.eth_tp_mdix); + /* 4.mdio_support */ + cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; + + return 0; +} + +static u32 hns3_get_rss_key_size(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || + !h->ae_algo->ops->get_rss_key_size) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss_key_size(h); +} + +static u32 hns3_get_rss_indir_size(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || + !h->ae_algo->ops->get_rss_indir_size) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss_indir_size(h); +} + +static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss(h, indir, key, hfunc); +} + +static int hns3_set_rss(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) + return -EOPNOTSUPP; + + /* currently we only support Toeplitz hash */ + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { + netdev_err(netdev, + "hash func not supported (only Toeplitz hash)\n"); + return -EOPNOTSUPP; + } + if (!indir) { + netdev_err(netdev, + "set rss failed for indir is empty\n"); + return -EOPNOTSUPP; + } + + return h->ae_algo->ops->set_rss(h, indir, key, hfunc); +} + +static int hns3_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_tc_size) + return -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = h->ae_algo->ops->get_tc_size(h); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct ethtool_ops hns3_ethtool_ops = { + .get_drvinfo = hns3_get_drvinfo, + .get_link = hns3_get_link, + .get_ringparam = hns3_get_ringparam, + .get_pauseparam = hns3_get_pauseparam, + .get_strings = hns3_get_strings, + .get_ethtool_stats = hns3_get_stats, + .get_sset_count = hns3_get_sset_count, + .get_rxnfc = hns3_get_rxnfc, + .get_rxfh_key_size = hns3_get_rss_key_size, + .get_rxfh_indir_size = hns3_get_rss_indir_size, + .get_rxfh = hns3_get_rss, + .set_rxfh = hns3_set_rss, + .get_link_ksettings = hns3_get_link_ksettings, +}; + +void hns3_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &hns3_ethtool_ops; +} diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index b9d310f20bcc..4878b7169e0f 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3102,8 +3102,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", NULL); if (!dn_log_port_id) { - pr_err("bad device node: eth_dn name=%s\n", - eth_dn->full_name); + pr_err("bad device node: eth_dn name=%pOF\n", eth_dn); continue; } @@ -3425,7 +3424,7 @@ static int ehea_probe_adapter(struct platform_device *dev) if (!adapter->handle) { dev_err(&dev->dev, "failed getting handle for adapter" - " '%s'\n", dev->dev.of_node->full_name); + " '%pOF'\n", dev->dev.of_node); ret = -ENODEV; goto out_free_ad; } diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 259e69a52ec5..95135d20458f 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -133,8 +133,7 @@ static inline void emac_report_timeout_error(struct emac_instance *dev, EMAC_FTR_440EP_PHY_CLK_FIX)) DBG(dev, "%s" NL, error); else if (net_ratelimit()) - printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name, - error); + printk(KERN_ERR "%pOF: %s\n", dev->ofdev->dev.of_node, error); } /* EMAC PHY clock workaround: @@ -2258,8 +2257,8 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev, strlcpy(info->driver, "ibm_emac", sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s", - dev->cell_index, dev->ofdev->dev.of_node->full_name); + snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF", + dev->cell_index, dev->ofdev->dev.of_node); } static const struct ethtool_ops emac_ethtool_ops = { @@ -2431,8 +2430,8 @@ static int emac_read_uint_prop(struct device_node *np, const char *name, const u32 *prop = of_get_property(np, name, &len); if (prop == NULL || len < sizeof(u32)) { if (fatal) - printk(KERN_ERR "%s: missing %s property\n", - np->full_name, name); + printk(KERN_ERR "%pOF: missing %s property\n", + np, name); return -ENODEV; } *val = *prop; @@ -2768,7 +2767,7 @@ static int emac_init_phy(struct emac_instance *dev) #endif mutex_unlock(&emac_phy_map_lock); if (i == 0x20) { - printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name); + printk(KERN_WARNING "%pOF: can't find PHY!\n", np); return -ENXIO; } @@ -2894,8 +2893,8 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x; #else - printk(KERN_ERR "%s: Flow control not disabled!\n", - np->full_name); + printk(KERN_ERR "%pOF: Flow control not disabled!\n", + np); return -ENXIO; #endif } @@ -2918,8 +2917,7 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_TAH dev->features |= EMAC_FTR_HAS_TAH; #else - printk(KERN_ERR "%s: TAH support not enabled !\n", - np->full_name); + printk(KERN_ERR "%pOF: TAH support not enabled !\n", np); return -ENXIO; #endif } @@ -2928,8 +2926,7 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_ZMII dev->features |= EMAC_FTR_HAS_ZMII; #else - printk(KERN_ERR "%s: ZMII support not enabled !\n", - np->full_name); + printk(KERN_ERR "%pOF: ZMII support not enabled !\n", np); return -ENXIO; #endif } @@ -2938,8 +2935,7 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_RGMII dev->features |= EMAC_FTR_HAS_RGMII; #else - printk(KERN_ERR "%s: RGMII support not enabled !\n", - np->full_name); + printk(KERN_ERR "%pOF: RGMII support not enabled !\n", np); return -ENXIO; #endif } @@ -2947,8 +2943,8 @@ static int emac_init_config(struct emac_instance *dev) /* Read MAC-address */ p = of_get_property(np, "local-mac-address", NULL); if (p == NULL) { - printk(KERN_ERR "%s: Can't find local-mac-address property\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't find local-mac-address property\n", + np); return -ENXIO; } memcpy(dev->ndev->dev_addr, p, ETH_ALEN); @@ -3043,23 +3039,21 @@ static int emac_probe(struct platform_device *ofdev) dev->emac_irq = irq_of_parse_and_map(np, 0); dev->wol_irq = irq_of_parse_and_map(np, 1); if (!dev->emac_irq) { - printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name); + printk(KERN_ERR "%pOF: Can't map main interrupt\n", np); goto err_free; } ndev->irq = dev->emac_irq; /* Map EMAC regs */ if (of_address_to_resource(np, 0, &dev->rsrc_regs)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't get registers address\n", np); goto err_irq_unmap; } // TODO : request_mem_region dev->emacp = ioremap(dev->rsrc_regs.start, resource_size(&dev->rsrc_regs)); if (dev->emacp == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); err = -ENOMEM; goto err_irq_unmap; } @@ -3068,8 +3062,7 @@ static int emac_probe(struct platform_device *ofdev) err = emac_wait_deps(dev); if (err) { printk(KERN_ERR - "%s: Timeout waiting for dependent devices\n", - np->full_name); + "%pOF: Timeout waiting for dependent devices\n", np); /* display more info about what's missing ? */ goto err_reg_unmap; } @@ -3084,8 +3077,8 @@ static int emac_probe(struct platform_device *ofdev) dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan); err = mal_register_commac(dev->mal, &dev->commac); if (err) { - printk(KERN_ERR "%s: failed to register with mal %s!\n", - np->full_name, dev->mal_dev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: failed to register with mal %pOF!\n", + np, dev->mal_dev->dev.of_node); goto err_rel_deps; } dev->rx_skb_size = emac_rx_skb_size(ndev->mtu); @@ -3161,8 +3154,8 @@ static int emac_probe(struct platform_device *ofdev) err = register_netdev(ndev); if (err) { - printk(KERN_ERR "%s: failed to register net device (%d)!\n", - np->full_name, err); + printk(KERN_ERR "%pOF: failed to register net device (%d)!\n", + np, err); goto err_detach_tah; } @@ -3176,8 +3169,8 @@ static int emac_probe(struct platform_device *ofdev) wake_up_all(&emac_probe_wait); - printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n", - ndev->name, dev->cell_index, np->full_name, ndev->dev_addr); + printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n", + ndev->name, dev->cell_index, np, ndev->dev_addr); if (dev->phy_mode == PHY_MODE_SGMII) printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name); diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h index 5bdfc174a07e..9d06d3be3161 100644 --- a/drivers/net/ethernet/ibm/emac/debug.h +++ b/drivers/net/ethernet/ibm/emac/debug.h @@ -31,7 +31,7 @@ #endif #define EMAC_DBG(d, name, fmt, arg...) \ - printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg) + printk(KERN_DEBUG #name "%pOF: " fmt, d->ofdev->dev.of_node, ## arg) #if DBG_LEVEL > 0 # define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x) diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 91b1a558f37d..2c74baa2398a 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -579,8 +579,8 @@ static int mal_probe(struct platform_device *ofdev) mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | MAL_FTR_COMMON_ERR_INT); #else - printk(KERN_ERR "%s: Support for 405EZ not enabled!\n", - ofdev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n", + ofdev->dev.of_node); err = -ENODEV; goto fail; #endif @@ -687,8 +687,8 @@ static int mal_probe(struct platform_device *ofdev) mal_enable_eob_irq(mal); printk(KERN_INFO - "MAL v%d %s, %d TX channels, %d RX channels\n", - mal->version, ofdev->dev.of_node->full_name, + "MAL v%d %pOF, %d TX channels, %d RX channels\n", + mal->version, ofdev->dev.of_node, mal->num_tx_chans, mal->num_rx_chans); /* Advertise this instance to the rest of the world */ diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index 206ccbbae7bb..c4a1ac38bba8 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -104,8 +104,8 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode) /* Check if we need to attach to a RGMII */ if (input < 0 || !rgmii_valid_mode(mode)) { - printk(KERN_ERR "%s: unsupported settings !\n", - ofdev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: unsupported settings !\n", + ofdev->dev.of_node); return -ENODEV; } @@ -114,8 +114,8 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode) /* Enable this input */ out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input)); - printk(KERN_NOTICE "%s: input %d in %s mode\n", - ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode)); + printk(KERN_NOTICE "%pOF: input %d in %s mode\n", + ofdev->dev.of_node, input, rgmii_mode_name(mode)); ++dev->users; @@ -249,8 +249,7 @@ static int rgmii_probe(struct platform_device *ofdev) rc = -ENXIO; if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't get registers address\n", np); goto err_free; } @@ -258,8 +257,7 @@ static int rgmii_probe(struct platform_device *ofdev) dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start, sizeof(struct rgmii_regs)); if (dev->base == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); goto err_free; } @@ -278,8 +276,8 @@ static int rgmii_probe(struct platform_device *ofdev) out_be32(&dev->base->fer, 0); printk(KERN_INFO - "RGMII %s initialized with%s MDIO support\n", - ofdev->dev.of_node->full_name, + "RGMII %pOF initialized with%s MDIO support\n", + ofdev->dev.of_node, (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out"); wmb(); diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index 32cb6c9007c5..9912456dca48 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -58,8 +58,7 @@ void tah_reset(struct platform_device *ofdev) --n; if (unlikely(!n)) - printk(KERN_ERR "%s: reset timeout\n", - ofdev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: reset timeout\n", ofdev->dev.of_node); /* 10KB TAH TX FIFO accommodates the max MTU of 9000 */ out_be32(&p->mr, @@ -105,8 +104,7 @@ static int tah_probe(struct platform_device *ofdev) rc = -ENXIO; if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't get registers address\n", np); goto err_free; } @@ -114,8 +112,7 @@ static int tah_probe(struct platform_device *ofdev) dev->base = (struct tah_regs __iomem *)ioremap(regs.start, sizeof(struct tah_regs)); if (dev->base == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); goto err_free; } @@ -124,8 +121,7 @@ static int tah_probe(struct platform_device *ofdev) /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ tah_reset(ofdev); - printk(KERN_INFO - "TAH %s initialized\n", ofdev->dev.of_node->full_name); + printk(KERN_INFO "TAH %pOF initialized\n", ofdev->dev.of_node); wmb(); return 0; diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index 8727b865ea02..89c42d362292 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -121,15 +121,15 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode) } else dev->mode = *mode; - printk(KERN_NOTICE "%s: bridge in %s mode\n", - ofdev->dev.of_node->full_name, + printk(KERN_NOTICE "%pOF: bridge in %s mode\n", + ofdev->dev.of_node, zmii_mode_name(dev->mode)); } else { /* All inputs must use the same mode */ if (*mode != PHY_MODE_NA && *mode != dev->mode) { printk(KERN_ERR - "%s: invalid mode %d specified for input %d\n", - ofdev->dev.of_node->full_name, *mode, input); + "%pOF: invalid mode %d specified for input %d\n", + ofdev->dev.of_node, *mode, input); mutex_unlock(&dev->lock); return -EINVAL; } @@ -250,8 +250,7 @@ static int zmii_probe(struct platform_device *ofdev) rc = -ENXIO; if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't get registers address\n", np); goto err_free; } @@ -259,8 +258,7 @@ static int zmii_probe(struct platform_device *ofdev) dev->base = (struct zmii_regs __iomem *)ioremap(regs.start, sizeof(struct zmii_regs)); if (dev->base == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); goto err_free; } @@ -270,8 +268,7 @@ static int zmii_probe(struct platform_device *ofdev) /* Disable all inputs by default */ out_be32(&dev->base->fer, 0); - printk(KERN_INFO - "ZMII %s initialized\n", ofdev->dev.of_node->full_name); + printk(KERN_INFO "ZMII %pOF initialized\n", ofdev->dev.of_node); wmb(); platform_set_drvdata(ofdev, dev); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c45e8e3b82d3..5ac873173b2e 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -347,6 +347,31 @@ static void replenish_pools(struct ibmvnic_adapter *adapter) } } +static void release_stats_buffers(struct ibmvnic_adapter *adapter) +{ + kfree(adapter->tx_stats_buffers); + kfree(adapter->rx_stats_buffers); +} + +static int init_stats_buffers(struct ibmvnic_adapter *adapter) +{ + adapter->tx_stats_buffers = + kcalloc(adapter->req_tx_queues, + sizeof(struct ibmvnic_tx_queue_stats), + GFP_KERNEL); + if (!adapter->tx_stats_buffers) + return -ENOMEM; + + adapter->rx_stats_buffers = + kcalloc(adapter->req_rx_queues, + sizeof(struct ibmvnic_rx_queue_stats), + GFP_KERNEL); + if (!adapter->rx_stats_buffers) + return -ENOMEM; + + return 0; +} + static void release_stats_token(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; @@ -374,6 +399,7 @@ static int init_stats_token(struct ibmvnic_adapter *adapter) } adapter->stats_token = stok; + netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); return 0; } @@ -387,6 +413,8 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) for (i = 0; i < rx_scrqs; i++) { rx_pool = &adapter->rx_pool[i]; + netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); + rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); if (rc) return rc; @@ -419,6 +447,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) for (i = 0; i < rx_scrqs; i++) { rx_pool = &adapter->rx_pool[i]; + netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); + kfree(rx_pool->free_map); free_long_term_buff(adapter, &rx_pool->long_term_buff); @@ -465,7 +495,7 @@ static int init_rx_pools(struct net_device *netdev) rx_pool = &adapter->rx_pool[i]; netdev_dbg(adapter->netdev, - "Initializing rx_pool %d, %lld buffs, %lld bytes each\n", + "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", i, adapter->req_rx_add_entries_per_subcrq, be64_to_cpu(size_array[i])); @@ -515,6 +545,8 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); for (i = 0; i < tx_scrqs; i++) { + netdev_dbg(adapter->netdev, "Re-setting tx_pool[%d]\n", i); + tx_pool = &adapter->tx_pool[i]; rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); @@ -545,6 +577,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter) tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); for (i = 0; i < tx_scrqs; i++) { + netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); tx_pool = &adapter->tx_pool[i]; kfree(tx_pool->tx_buff); free_long_term_buff(adapter, &tx_pool->long_term_buff); @@ -571,6 +604,11 @@ static int init_tx_pools(struct net_device *netdev) for (i = 0; i < tx_subcrqs; i++) { tx_pool = &adapter->tx_pool[i]; + + netdev_dbg(adapter->netdev, + "Initializing tx_pool[%d], %lld buffs\n", + i, adapter->req_tx_entries_per_subcrq); + tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq, sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); @@ -641,8 +679,10 @@ static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) if (!adapter->napi_enabled) return; - for (i = 0; i < adapter->req_rx_queues; i++) + for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); napi_disable(&adapter->napi[i]); + } adapter->napi_enabled = false; } @@ -700,12 +740,16 @@ static void release_resources(struct ibmvnic_adapter *adapter) release_rx_pools(adapter); release_stats_token(adapter); + release_stats_buffers(adapter); release_error_buffers(adapter); if (adapter->napi) { for (i = 0; i < adapter->req_rx_queues; i++) { - if (&adapter->napi[i]) + if (&adapter->napi[i]) { + netdev_dbg(adapter->netdev, + "Releasing napi[%d]\n", i); netif_napi_del(&adapter->napi[i]); + } } } } @@ -718,7 +762,8 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) bool resend; int rc; - netdev_err(netdev, "setting link state %d\n", link_state); + netdev_dbg(netdev, "setting link state %d\n", link_state); + memset(&crq, 0, sizeof(crq)); crq.logical_link_state.first = IBMVNIC_CRQ_CMD; crq.logical_link_state.cmd = LOGICAL_LINK_STATE; @@ -755,6 +800,9 @@ static int set_real_num_queues(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); int rc; + netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", + adapter->req_tx_queues, adapter->req_rx_queues); + rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); if (rc) { netdev_err(netdev, "failed to set the number of tx queues\n"); @@ -777,6 +825,10 @@ static int init_resources(struct ibmvnic_adapter *adapter) if (rc) return rc; + rc = init_stats_buffers(adapter); + if (rc) + return rc; + rc = init_stats_token(adapter); if (rc) return rc; @@ -788,6 +840,7 @@ static int init_resources(struct ibmvnic_adapter *adapter) return -ENOMEM; for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(netdev, "Adding napi[%d]\n", i); netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, NAPI_POLL_WEIGHT); } @@ -816,6 +869,7 @@ static int __ibmvnic_open(struct net_device *netdev) * set the logical link state to up */ for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); if (prev_state == VNIC_CLOSED) enable_irq(adapter->rx_scrq[i]->irq); else @@ -823,6 +877,7 @@ static int __ibmvnic_open(struct net_device *netdev) } for (i = 0; i < adapter->req_tx_queues; i++) { + netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); if (prev_state == VNIC_CLOSED) enable_irq(adapter->tx_scrq[i]->irq); else @@ -896,6 +951,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter) if (!tx_pool) continue; + netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); for (j = 0; j < tx_entries; j++) { if (tx_pool->tx_buff[j].skb) { dev_kfree_skb_any(tx_pool->tx_buff[j].skb); @@ -923,8 +979,11 @@ static int __ibmvnic_close(struct net_device *netdev) if (adapter->tx_scrq) { for (i = 0; i < adapter->req_tx_queues; i++) - if (adapter->tx_scrq[i]->irq) + if (adapter->tx_scrq[i]->irq) { + netdev_dbg(adapter->netdev, + "Disabling tx_scrq[%d] irq\n", i); disable_irq(adapter->tx_scrq[i]->irq); + } } rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); @@ -943,8 +1002,11 @@ static int __ibmvnic_close(struct net_device *netdev) break; } - if (adapter->rx_scrq[i]->irq) + if (adapter->rx_scrq[i]->irq) { + netdev_dbg(adapter->netdev, + "Disabling rx_scrq[%d] irq\n", i); disable_irq(adapter->rx_scrq[i]->irq); + } } } @@ -1259,6 +1321,9 @@ out: netdev->stats.tx_packets += tx_packets; adapter->tx_send_failed += tx_send_failed; adapter->tx_map_failed += tx_map_failed; + adapter->tx_stats_buffers[queue_num].packets += tx_packets; + adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; + adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; return ret; } @@ -1334,6 +1399,9 @@ static int do_reset(struct ibmvnic_adapter *adapter, struct net_device *netdev = adapter->netdev; int i, rc; + netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", + rwi->reset_reason); + netif_carrier_off(netdev); adapter->reset_reason = rwi->reset_reason; @@ -1458,6 +1526,7 @@ static void __ibmvnic_reset(struct work_struct *work) } if (rc) { + netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); mutex_unlock(&adapter->reset_lock); return; @@ -1491,7 +1560,7 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter, list_for_each(entry, &adapter->rwi_list) { tmp = list_entry(entry, struct ibmvnic_rwi, list); if (tmp->reset_reason == reason) { - netdev_err(netdev, "Matching reset found, skipping\n"); + netdev_dbg(netdev, "Skipping matching reset\n"); mutex_unlock(&adapter->rwi_lock); return; } @@ -1507,6 +1576,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter, rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); mutex_unlock(&adapter->rwi_lock); + + netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); schedule_work(&adapter->ibmvnic_reset); } @@ -1560,7 +1631,8 @@ restart_poll: rx_comp.correlator); /* do error checking */ if (next->rx_comp.rc) { - netdev_err(netdev, "rx error %x\n", next->rx_comp.rc); + netdev_dbg(netdev, "rx buffer returned with rc %x\n", + be16_to_cpu(next->rx_comp.rc)); /* free the entry */ next->rx_comp.first = 0; remove_buff_from_pool(adapter, rx_buff); @@ -1599,6 +1671,8 @@ restart_poll: napi_gro_receive(napi, skb); /* send it up */ netdev->stats.rx_packets++; netdev->stats.rx_bytes += length; + adapter->rx_stats_buffers[scrq_num].packets++; + adapter->rx_stats_buffers[scrq_num].bytes += length; frames_processed++; } @@ -1708,18 +1782,36 @@ static u32 ibmvnic_get_link(struct net_device *netdev) static void ibmvnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { - ring->rx_max_pending = 0; - ring->tx_max_pending = 0; + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; + ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; - ring->rx_pending = 0; - ring->tx_pending = 0; + ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; + ring->tx_pending = adapter->req_tx_entries_per_subcrq; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; } +static void ibmvnic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + channels->max_rx = adapter->max_rx_queues; + channels->max_tx = adapter->max_tx_queues; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = adapter->req_rx_queues; + channels->tx_count = adapter->req_tx_queues; + channels->other_count = 0; + channels->combined_count = 0; +} + static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + struct ibmvnic_adapter *adapter = netdev_priv(dev); int i; if (stringset != ETH_SS_STATS) @@ -1727,13 +1819,39 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); + + for (i = 0; i < adapter->req_tx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); + data += ETH_GSTRING_LEN; + } + + for (i = 0; i < adapter->req_rx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); + data += ETH_GSTRING_LEN; + } } static int ibmvnic_get_sset_count(struct net_device *dev, int sset) { + struct ibmvnic_adapter *adapter = netdev_priv(dev); + switch (sset) { case ETH_SS_STATS: - return ARRAY_SIZE(ibmvnic_stats); + return ARRAY_SIZE(ibmvnic_stats) + + adapter->req_tx_queues * NUM_TX_STATS + + adapter->req_rx_queues * NUM_RX_STATS; default: return -EOPNOTSUPP; } @@ -1744,7 +1862,7 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, { struct ibmvnic_adapter *adapter = netdev_priv(dev); union ibmvnic_crq crq; - int i; + int i, j; memset(&crq, 0, sizeof(crq)); crq.request_statistics.first = IBMVNIC_CRQ_CMD; @@ -1759,7 +1877,26 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, wait_for_completion(&adapter->stats_done); for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) - data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset); + data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, + ibmvnic_stats[i].offset)); + + for (j = 0; j < adapter->req_tx_queues; j++) { + data[i] = adapter->tx_stats_buffers[j].packets; + i++; + data[i] = adapter->tx_stats_buffers[j].bytes; + i++; + data[i] = adapter->tx_stats_buffers[j].dropped_packets; + i++; + } + + for (j = 0; j < adapter->req_rx_queues; j++) { + data[i] = adapter->rx_stats_buffers[j].packets; + i++; + data[i] = adapter->rx_stats_buffers[j].bytes; + i++; + data[i] = adapter->rx_stats_buffers[j].interrupts; + i++; + } } static const struct ethtool_ops ibmvnic_ethtool_ops = { @@ -1768,6 +1905,7 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { .set_msglevel = ibmvnic_set_msglevel, .get_link = ibmvnic_get_link, .get_ringparam = ibmvnic_get_ringparam, + .get_channels = ibmvnic_get_channels, .get_strings = ibmvnic_get_strings, .get_sset_count = ibmvnic_get_sset_count, .get_ethtool_stats = ibmvnic_get_ethtool_stats, @@ -1800,12 +1938,14 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) int i, rc; for (i = 0; i < adapter->req_tx_queues; i++) { + netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); if (rc) return rc; } for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); if (rc) return rc; @@ -1909,6 +2049,8 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) if (!adapter->tx_scrq[i]) continue; + netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", + i); if (adapter->tx_scrq[i]->irq) { free_irq(adapter->tx_scrq[i]->irq, adapter->tx_scrq[i]); @@ -1928,6 +2070,8 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) if (!adapter->rx_scrq[i]) continue; + netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", + i); if (adapter->rx_scrq[i]->irq) { free_irq(adapter->rx_scrq[i]->irq, adapter->rx_scrq[i]); @@ -2064,6 +2208,8 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) struct ibmvnic_sub_crq_queue *scrq = instance; struct ibmvnic_adapter *adapter = scrq->adapter; + adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; + if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { disable_scrq_irq(adapter, scrq); __napi_schedule(&adapter->napi[scrq->scrq_num]); @@ -2080,6 +2226,8 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) int rc = 0; for (i = 0; i < adapter->req_tx_queues; i++) { + netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", + i); scrq = adapter->tx_scrq[i]; scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); @@ -2101,6 +2249,8 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) } for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", + i); scrq = adapter->rx_scrq[i]; scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); if (!scrq->irq) { @@ -3739,31 +3889,35 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) do { rc = ibmvnic_init(adapter); - if (rc && rc != EAGAIN) { - free_netdev(netdev); - return rc; - } + if (rc && rc != EAGAIN) + goto ibmvnic_init_fail; } while (rc == EAGAIN); netdev->mtu = adapter->req_mtu - ETH_HLEN; rc = device_create_file(&dev->dev, &dev_attr_failover); - if (rc) { - free_netdev(netdev); - return rc; - } + if (rc) + goto ibmvnic_init_fail; rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); - device_remove_file(&dev->dev, &dev_attr_failover); - free_netdev(netdev); - return rc; + goto ibmvnic_register_fail; } dev_info(&dev->dev, "ibmvnic registered\n"); adapter->state = VNIC_PROBED; return 0; + +ibmvnic_register_fail: + device_remove_file(&dev->dev, &dev_attr_failover); + +ibmvnic_init_fail: + release_sub_crqs(adapter); + release_crq_queue(adapter); + free_netdev(netdev); + + return rc; } static int ibmvnic_remove(struct vio_dev *dev) @@ -3859,15 +4013,11 @@ static int ibmvnic_resume(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int i; if (adapter->state != VNIC_OPEN) return 0; - /* kick the interrupt handlers just in case we lost an interrupt */ - for (i = 0; i < adapter->req_rx_queues; i++) - ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, - adapter->rx_scrq[i]); + tasklet_schedule(&adapter->tasklet); return 0; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 8eff6e15f4bb..d02257ccc377 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -166,6 +166,20 @@ struct ibmvnic_statistics { u8 reserved[72]; } __packed __aligned(8); +#define NUM_TX_STATS 3 +struct ibmvnic_tx_queue_stats { + u64 packets; + u64 bytes; + u64 dropped_packets; +}; + +#define NUM_RX_STATS 3 +struct ibmvnic_rx_queue_stats { + u64 packets; + u64 bytes; + u64 interrupts; +}; + struct ibmvnic_acl_buffer { __be32 len; __be32 version; @@ -956,6 +970,9 @@ struct ibmvnic_adapter { int tx_send_failed; int tx_map_failed; + struct ibmvnic_tx_queue_stats *tx_stats_buffers; + struct ibmvnic_rx_queue_stats *rx_stats_buffers; + int phys_link_state; int logical_link_state; diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 66bd5060a65b..d803b1a12349 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -100,6 +100,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_CNP_I219_V6 0x15BE #define E1000_DEV_ID_PCH_CNP_I219_LM7 0x15BB #define E1000_DEV_ID_PCH_CNP_I219_V7 0x15BC +#define E1000_DEV_ID_PCH_ICP_I219_LM8 0x15DF +#define E1000_DEV_ID_PCH_ICP_I219_V8 0x15E0 +#define E1000_DEV_ID_PCH_ICP_I219_LM9 0x15E1 +#define E1000_DEV_ID_PCH_ICP_I219_V9 0x15E2 #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 68ea8b4555ab..d6d4ed7acf03 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2437,6 +2437,8 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) if (hw->phy.revision < 2) { e1000e_phy_sw_reset(hw); ret_val = e1e_wphy(hw, MII_BMCR, 0x3140); + if (ret_val) + return ret_val; } } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 2dcb5463d9b8..327dfe5bedc0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7544,6 +7544,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM8), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 5e37387c7082..e69d49d91d67 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1265,15 +1265,17 @@ err_queueing_scheme: return err; } -static int __fm10k_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct tc_mqprio_qopt *mqprio = type_data; + + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return fm10k_setup_tc(dev, tc->mqprio->num_tc); + return fm10k_setup_tc(dev, mqprio->num_tc); } static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9692a5294fa3..1d29152256fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1091,7 +1091,7 @@ static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; u32 *reg_buf = p; - int i, j, ri; + unsigned int i, j, ri; u32 reg; /* Tell ethtool which driver-version-specific regs output we have. @@ -1550,9 +1550,9 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + unsigned int j; int i = 0; char *p; - int j; struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); unsigned int start; @@ -1637,7 +1637,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; char *p = (char *)data; - int i; + unsigned int i; switch (stringset) { case ETH_SS_TEST: diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2db93d3f6d23..a7e5a76703e7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4773,7 +4773,7 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf) { struct net_device *netdev; struct i40e_vsi *vsi; - int i; + unsigned int i; /* Only for LAN VSI */ vsi = pf->vsi[pf->lan_vsi]; @@ -5656,16 +5656,17 @@ exit: return ret; } -static int __i40e_setup_tc(struct net_device *netdev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) { - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct tc_mqprio_qopt *mqprio = type_data; + + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return i40e_setup_tc(netdev, tc->mqprio->num_tc); + return i40e_setup_tc(netdev, mqprio->num_tc); } /** @@ -7520,6 +7521,18 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) i40e_flush(hw); } +static const char *i40e_tunnel_name(struct i40e_udp_port_config *port) +{ + switch (port->type) { + case UDP_TUNNEL_TYPE_VXLAN: + return "vxlan"; + case UDP_TUNNEL_TYPE_GENEVE: + return "geneve"; + default: + return "unknown"; + } +} + /** * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters * @pf: board private structure @@ -7565,14 +7578,14 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) ret = i40e_aq_del_udp_tunnel(hw, i, NULL); if (ret) { - dev_dbg(&pf->pdev->dev, - "%s %s port %d, index %d failed, err %s aq_err %s\n", - pf->udp_ports[i].type ? "vxlan" : "geneve", - port ? "add" : "delete", - port, i, - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + dev_info(&pf->pdev->dev, + "%s %s port %d, index %d failed, err %s aq_err %s\n", + i40e_tunnel_name(&pf->udp_ports[i]), + port ? "add" : "delete", + port, i, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); pf->udp_ports[i].port = 0; } } @@ -9589,6 +9602,7 @@ static int i40e_xdp(struct net_device *dev, return i40e_xdp_setup(vsi, xdp->prog); case XDP_QUERY_PROG: xdp->prog_attached = i40e_enabled_xdp_vsi(vsi); + xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; default: return -EINVAL; @@ -12089,7 +12103,10 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); i40e_stop_misc_vector(pf); - + if (pf->msix_entries) { + synchronize_irq(pf->msix_entries[0].vector); + free_irq(pf->msix_entries[0].vector, pf); + } retval = pci_save_state(pdev); if (retval) return retval; @@ -12129,6 +12146,15 @@ static int i40e_resume(struct pci_dev *pdev) /* handling the reset will rebuild the device state */ if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { clear_bit(__I40E_DOWN, pf->state); + if (pf->msix_entries) { + err = request_irq(pf->msix_entries[0].vector, + i40e_intr, 0, pf->int_name, pf); + if (err) { + dev_err(&pf->pdev->dev, + "request_irq for %s failed: %d\n", + pf->int_name, err); + } + } i40e_reset_and_rebuild(pf, false, false); } @@ -12168,12 +12194,14 @@ static int __init i40e_init_module(void) i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); - /* we will see if single thread per module is enough for now, - * it can't be any worse than using the system workqueue which - * was already single threaded + /* There is no need to throttle the number of active tasks because + * each device limits its own task using a state bit for scheduling + * the service task, and the device tasks do not interfere with each + * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM + * since we need to be able to guarantee forward progress even under + * memory pressure. */ - i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, - i40e_driver_name); + i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); if (!i40e_wq) { pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 800bd55d0159..6fdecd70dcbc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -134,8 +134,25 @@ i40e_i40e_acquire_nvm_exit: **/ void i40e_release_nvm(struct i40e_hw *hw) { - if (!hw->nvm.blank_nvm_mode) - i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + i40e_status ret_code = I40E_SUCCESS; + u32 total_delay = 0; + + if (hw->nvm.blank_nvm_mode) + return; + + ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + + /* there are some rare cases when trying to release the resource + * results in an admin Q timeout, so handle them correctly + */ + while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && + (total_delay < hw->aq.asq_cmd_timeout)) { + usleep_range(1000, 2000); + ret_code = i40e_aq_release_resource(hw, + I40E_NVM_RESOURCE_ID, + 0, NULL); + total_delay++; + } } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 1a0be835fa06..0129ed3b78ec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -158,13 +158,12 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec64 now, then; + struct timespec64 now; - then = ns_to_timespec64(delta); mutex_lock(&pf->tmreg_lock); i40e_ptp_read(pf, &now); - now = timespec64_add(now, then); + timespec64_add_ns(&now, delta); i40e_ptp_write(pf, (const struct timespec64 *)&now); mutex_unlock(&pf->tmreg_lock); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2194960d5855..8a969d8f0790 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -860,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); -#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this @@ -2065,7 +2065,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false, xdp_xmit = false; - while (likely(total_rx_packets < budget)) { + while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; struct xdp_buff xdp; @@ -2198,7 +2198,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_bytes += total_rx_bytes; /* guarantee a trip back through this routine if there was a failure */ - return failure ? budget : total_rx_packets; + return failure ? budget : (int)total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) @@ -2453,9 +2453,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, hlen = (hdr.network[0] & 0x0F) << 2; l4_proto = hdr.ipv4->protocol; } else { - hlen = hdr.network - skb->data; - l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); - hlen -= hdr.network - skb->data; + /* find the start of the innermost ipv6 header */ + unsigned int inner_hlen = hdr.network - skb->data; + unsigned int h_offset = inner_hlen; + + /* this function updates h_offset to the end of the header */ + l4_proto = + ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL); + /* hlen will contain our best estimate of the tcp header */ + hlen = h_offset - inner_hlen; } if (l4_proto != IPPROTO_TCP) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index ecbe40ea8ffe..979110d59f67 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1567,7 +1567,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", vf->vf_id); - ret = I40E_ERR_PARAM; + aq_ret = I40E_ERR_PARAM; goto err; } vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; @@ -1741,16 +1741,14 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, NULL); } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - aq_ret = 0; - if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) { - aq_ret = - i40e_aq_set_vsi_uc_promisc_on_vlan(hw, - vsi->seid, - alluni, - f->vlan, - NULL); - aq_err = pf->hw.aq.asq_last_status; - } + if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) + continue; + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, + vsi->seid, + alluni, + f->vlan, + NULL); + aq_err = pf->hw.aq.asq_last_status; if (aq_ret) dev_err(&pf->pdev->dev, "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", @@ -2764,7 +2762,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) spin_unlock_bh(&vsi->mac_filter_hash_lock); - dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); /* program mac filter */ if (i40e_sync_vsi_filters(vsi)) { dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); @@ -2772,7 +2769,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; } ether_addr_copy(vf->default_lan_addr.addr, mac); - vf->pf_set_mac = true; + + if (is_zero_ether_addr(mac)) { + vf->pf_set_mac = false; + dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); + } else { + vf->pf_set_mac = true; + dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", + mac, vf_id); + } + /* Force the VF driver stop so it has to reload with new MAC address */ i40e_vc_disable_vf(pf, vf); dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h index 5e314fd3c016..a90737786c34 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h @@ -54,7 +54,7 @@ struct i40e_dma_mem { void *va; dma_addr_t pa; u32 size; -} __packed; +}; #define i40e_allocate_dma_mem(h, m, unused, s, a) \ i40evf_allocate_dma_mem_d(h, m, s, a) @@ -63,7 +63,7 @@ struct i40e_dma_mem { struct i40e_virt_mem { void *va; u32 size; -} __packed; +}; #define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s) #define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 12b02e530503..d91676ccf125 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -275,7 +275,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); -#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this @@ -1299,7 +1299,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false; - while (likely(total_rx_packets < budget)) { + while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; unsigned int size; @@ -1406,7 +1406,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_bytes += total_rx_bytes; /* guarantee a trip back through this routine if there was a failure */ - return failure ? budget : total_rx_packets; + return failure ? budget : (int)total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 6cc92089fecb..7901cc85cbe5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -39,6 +39,17 @@ #include <linux/tcp.h> #include <linux/sctp.h> #include <linux/ipv6.h> +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/gfp.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/socket.h> +#include <linux/jiffies.h> #include <net/ip6_checksum.h> #include <net/udp.h> diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 9bb2cc7dd4e4..76fd89c1dbb2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -165,7 +165,7 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct i40evf_adapter *adapter = netdev_priv(netdev); - int i, j; + unsigned int i, j; char *p; for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { @@ -197,7 +197,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) int i; if (sset == ETH_SS_STATS) { - for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { + for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) { memcpy(p, i40evf_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 7c213a347909..93536b9fc629 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1957,8 +1957,8 @@ static void i40evf_adminq_task(struct work_struct *work) container_of(work, struct i40evf_adapter, adminq_task); struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; - struct virtchnl_msg *v_msg; - i40e_status ret; + enum virtchnl_ops v_op; + i40e_status ret, v_ret; u32 val, oldval; u16 pending; @@ -1970,15 +1970,15 @@ static void i40evf_adminq_task(struct work_struct *work) if (!event.msg_buf) goto out; - v_msg = (struct virtchnl_msg *)&event.desc; do { ret = i40evf_clean_arq_element(hw, &event, &pending); - if (ret || !v_msg->v_opcode) + v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low); + + if (ret || !v_op) break; /* No event to process or error cleaning ARQ */ - i40evf_virtchnl_completion(adapter, v_msg->v_opcode, - (i40e_status)v_msg->v_retval, - event.msg_buf, + i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, event.msg_len); if (pending != 0) memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 4a50870e0fa7..c37cc8bccf47 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -340,6 +340,9 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; break; + case BCM54616_E_PHY_ID: + phy->type = e1000_phy_bcm54616; + break; default: ret_val = -E1000_ERR_PHY; goto out; @@ -1659,6 +1662,9 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) case e1000_phy_82580: ret_val = igb_copper_link_setup_82580(hw); break; + case e1000_phy_bcm54616: + ret_val = 0; + break; default: ret_val = -E1000_ERR_PHY; break; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index d8517779439b..1de82f247312 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -889,6 +889,7 @@ #define I210_I_PHY_ID 0x01410C00 #define M88E1543_E_PHY_ID 0x01410EA0 #define M88E1512_E_PHY_ID 0x01410DD0 +#define BCM54616_E_PHY_ID 0x03625D10 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 2fb2213cd562..6c9485ab4b57 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -128,6 +128,7 @@ enum e1000_phy_type { e1000_phy_ife, e1000_phy_82580, e1000_phy_i210, + e1000_phy_bcm54616, }; enum e1000_bus_type { @@ -491,13 +492,16 @@ struct e1000_fc_info { struct e1000_mbx_operations { s32 (*init_params)(struct e1000_hw *hw); - s32 (*read)(struct e1000_hw *, u32 *, u16, u16); - s32 (*write)(struct e1000_hw *, u32 *, u16, u16); - s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); - s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); - s32 (*check_for_msg)(struct e1000_hw *, u16); - s32 (*check_for_ack)(struct e1000_hw *, u16); - s32 (*check_for_rst)(struct e1000_hw *, u16); + s32 (*read)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); + s32 (*write)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*read_posted)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write_posted)(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id); + s32 (*check_for_msg)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_ack)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_rst)(struct e1000_hw *hw, u16 mbx_id); + s32 (*unlock)(struct e1000_hw *hw, u16 mbx_id); }; struct e1000_mbx_stats { diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 00e263f0c030..bffd58f7b2a1 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -32,7 +32,8 @@ * * returns SUCCESS if it successfully read message from buffer **/ -s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; @@ -42,7 +43,7 @@ s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) size = mbx->size; if (mbx->ops.read) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); + ret_val = mbx->ops.read(hw, msg, size, mbx_id, unlock); return ret_val; } @@ -125,6 +126,24 @@ s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) } /** + * igb_unlock_mbx - unlock the mailbox + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the mailbox was unlocked or else ERR_MBX + **/ +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.unlock) + ret_val = mbx->ops.unlock(hw, mbx_id); + + return ret_val; +} + +/** * igb_poll_for_msg - Wait for message notification * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write @@ -204,7 +223,7 @@ static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, ret_val = igb_poll_for_msg(hw, mbx_id); if (!ret_val) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); + ret_val = mbx->ops.read(hw, msg, size, mbx_id, true); out: return ret_val; } @@ -341,6 +360,26 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) } /** + * igb_release_mbx_lock_pf - release mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we released the mailbox lock + **/ +static s32 igb_release_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 p2v_mailbox; + + /* drop PF lock of mailbox, if set */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + wr32(E1000_P2VMAILBOX(vf_number), + p2v_mailbox & ~E1000_P2VMAILBOX_PFU); + + return 0; +} + +/** * igb_write_mbx_pf - Places a message in the mailbox * @hw: pointer to the HW structure * @msg: The message buffer @@ -385,13 +424,14 @@ out_no_write: * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index + * @unlock: unlock the mailbox when done? * * This function copies a message from the mailbox buffer to the caller's * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 vf_number) + u16 vf_number, bool unlock) { s32 ret_val; u16 i; @@ -405,8 +445,12 @@ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, for (i = 0; i < size; i++) msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); - /* Acknowledge the message and release buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + /* Acknowledge the message and release mailbox lock (or not) */ + if (unlock) + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + else + wr32(E1000_P2VMAILBOX(vf_number), + E1000_P2VMAILBOX_ACK | E1000_P2VMAILBOX_PFU); /* update stats */ hw->mbx.stats.msgs_rx++; @@ -437,6 +481,7 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw) mbx->ops.check_for_msg = igb_check_for_msg_pf; mbx->ops.check_for_ack = igb_check_for_ack_pf; mbx->ops.check_for_rst = igb_check_for_rst_pf; + mbx->ops.unlock = igb_release_mbx_lock_pf; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index 3e7fed73df15..a62b08e1572e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -67,11 +67,13 @@ #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ -s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_check_for_msg(struct e1000_hw *, u16); -s32 igb_check_for_ack(struct e1000_hw *, u16); -s32 igb_check_for_rst(struct e1000_hw *, u16); -s32 igb_init_mbx_params_pf(struct e1000_hw *); +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id); +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id); +s32 igb_init_mbx_params_pf(struct e1000_hw *hw); #endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ec62410b035a..fd4a46b03cc8 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1791,6 +1791,8 @@ void igb_down(struct igb_adapter *adapter) wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ + igb_nfc_filter_exit(adapter); + netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); @@ -3317,8 +3319,6 @@ static int __igb_close(struct net_device *netdev, bool suspending) igb_down(adapter); igb_free_irq(adapter); - igb_nfc_filter_exit(adapter); - igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); @@ -5380,7 +5380,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); - if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + if (adapter->tstamp_config.tx_type & HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; @@ -5745,8 +5746,6 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) event.type = PTP_CLOCK_PPS; if (adapter->ptp_caps.pps) ptp_clock_event(adapter->ptp_clock, &event); - else - dev_err(&adapter->pdev->dev, "unexpected SYS WRAP"); ack |= TSINTR_SYS_WRAP; } @@ -6676,32 +6675,33 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) struct vf_data_storage *vf_data = &adapter->vf_data[vf]; s32 retval; - retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); + retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false); if (retval) { /* if receive failed revoke VF CTS stats and restart init */ dev_err(&pdev->dev, "Error receiving message from VF\n"); vf_data->flags &= ~IGB_VF_FLAG_CTS; if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) - return; + goto unlock; goto out; } /* this is a message we already processed, do nothing */ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) - return; + goto unlock; /* until the vf completes a reset it should not be * allowed to start any configuration. */ if (msgbuf[0] == E1000_VF_RESET) { + /* unlocks mailbox */ igb_vf_reset_msg(adapter, vf); return; } if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) - return; + goto unlock; retval = -1; goto out; } @@ -6742,7 +6742,12 @@ out: else msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + /* unlocks mailbox */ igb_write_mbx(hw, msgbuf, 1, vf); + return; + +unlock: + igb_unlock_mbx(hw, vf); } static void igb_msg_task(struct igb_adapter *adapter) diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 34faa113a8a0..a127688e83e6 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -296,8 +296,12 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) struct e1000_hw *hw = &adapter->hw; *data = 0; + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.check_for_link(hw); + spin_unlock_bh(&hw->mbx_lock); + if (!(er32(STATUS) & E1000_STATUS_LU)) *data = 1; diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index 01752f44ace2..c9a441632e9f 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -264,6 +264,8 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) s32 err; u16 i; + WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock)); + /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); if (err) @@ -300,6 +302,8 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) s32 err; u16 i; + WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock)); + /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); if (err) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 1b9cbbe88f6f..1ed556911b14 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1235,7 +1235,12 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; + + spin_lock_bh(&hw->mbx_lock); + e1000_rlpml_set_vf(hw, max_frame_size); + + spin_unlock_bh(&hw->mbx_lock); } static int igbvf_vlan_rx_add_vid(struct net_device *netdev, @@ -1244,10 +1249,16 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + spin_lock_bh(&hw->mbx_lock); + if (hw->mac.ops.set_vfta(hw, vid, true)) { dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); + spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } + + spin_unlock_bh(&hw->mbx_lock); + set_bit(vid, adapter->active_vlans); return 0; } @@ -1258,11 +1269,17 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + spin_lock_bh(&hw->mbx_lock); + if (hw->mac.ops.set_vfta(hw, vid, false)) { dev_err(&adapter->pdev->dev, "Failed to remove vlan id %d\n", vid); + spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } + + spin_unlock_bh(&hw->mbx_lock); + clear_bit(vid, adapter->active_vlans); return 0; } @@ -1428,7 +1445,11 @@ static void igbvf_set_multi(struct net_device *netdev) netdev_for_each_mc_addr(ha, netdev) memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); + + spin_unlock_bh(&hw->mbx_lock); kfree(mta_list); } @@ -1449,16 +1470,24 @@ static int igbvf_set_uni(struct net_device *netdev) return -ENOSPC; } + spin_lock_bh(&hw->mbx_lock); + /* Clear all unicast MAC filters */ hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL); + spin_unlock_bh(&hw->mbx_lock); + if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; /* Add MAC filters one by one */ netdev_for_each_uc_addr(ha, netdev) { + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD, ha->addr); + + spin_unlock_bh(&hw->mbx_lock); udelay(200); } } @@ -1503,12 +1532,16 @@ static void igbvf_reset(struct igbvf_adapter *adapter) struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; + spin_lock_bh(&hw->mbx_lock); + /* Allow time for pending master requests to run */ if (mac->ops.reset_hw(hw)) dev_err(&adapter->pdev->dev, "PF still resetting\n"); mac->ops.init_hw(hw); + spin_unlock_bh(&hw->mbx_lock); + if (is_valid_ether_addr(adapter->hw.mac.addr)) { memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); @@ -1643,6 +1676,7 @@ static int igbvf_sw_init(struct igbvf_adapter *adapter) igbvf_irq_disable(adapter); spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->hw.mbx_lock); set_bit(__IGBVF_DOWN, &adapter->state); return 0; @@ -1786,8 +1820,12 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + spin_unlock_bh(&hw->mbx_lock); + if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) return -EADDRNOTAVAIL; @@ -1858,7 +1896,12 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter) if (test_bit(__IGBVF_DOWN, &adapter->state)) return false; + spin_lock_bh(&hw->mbx_lock); + ret_val = hw->mac.ops.check_for_link(hw); + + spin_unlock_bh(&hw->mbx_lock); + link_active = !hw->mac.get_link_status; /* if check for link returns error we will need to reset */ @@ -2808,6 +2851,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + spin_lock_bh(&hw->mbx_lock); + /*reset the controller to put the device in a known good state */ err = hw->mac.ops.reset_hw(hw); if (err) { @@ -2824,6 +2869,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->addr_len); } + spin_unlock_bh(&hw->mbx_lock); + if (!is_valid_ether_addr(netdev->dev_addr)) { dev_info(&pdev->dev, "Assigning random MAC address.\n"); eth_hw_addr_random(netdev); diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 528be116184e..9577ccf4b26a 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -149,7 +149,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) msgbuf[0] = E1000_VF_RESET; mbx->ops.write_posted(hw, msgbuf, 1); - msleep(10); + mdelay(10); /* set our "perm_addr" based on info provided by PF */ ret_val = mbx->ops.read_posted(hw, msgbuf, 3); @@ -230,6 +230,7 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u16 *hash_list = (u16 *)&msgbuf[1]; u32 hash_value; u32 cnt, i; + s32 ret_val; /* Each entry in the list uses 1 16 bit word. We have 30 * 16 bit words available in our HW msg buffer (minus 1 for the @@ -250,7 +251,9 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, mc_addr_list += ETH_ALEN; } - mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); + ret_val = mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); + if (!ret_val) + mbx->ops.read_posted(hw, msgbuf, 1); } /** @@ -293,11 +296,14 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; + s32 ret_val; msgbuf[0] = E1000_VF_SET_LPE; msgbuf[1] = max_size; - mbx->ops.write_posted(hw, msgbuf, 2); + ret_val = mbx->ops.write_posted(hw, msgbuf, 2); + if (!ret_val) + mbx->ops.read_posted(hw, msgbuf, 1); } /** diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 4cf78b0dec50..d213eefb6169 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -245,6 +245,7 @@ struct e1000_hw { struct e1000_mac_info mac; struct e1000_mbx_info mbx; + spinlock_t mbx_lock; /* serializes mailbox ops */ union { struct e1000_dev_spec_vf vf; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 4e35e7017f3d..2c19070d2a0b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -79,16 +79,28 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->phy.media_type) { case ixgbe_media_type_fiber: - hw->mac.ops.check_link(hw, &speed, &link_up, false); - /* if link is down, assume supported */ - if (link_up) - supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + /* flow control autoneg black list */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + supported = false; + break; + default: + hw->mac.ops.check_link(hw, &speed, &link_up, false); + /* if link is down, assume supported */ + if (link_up) + supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? true : false; - else - supported = true; + else + supported = true; + } + break; case ixgbe_media_type_backplane: - supported = true; + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) + supported = false; + else + supported = true; break; case ixgbe_media_type_copper: /* only some copper devices support flow control autoneg */ @@ -111,6 +123,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) break; } + if (!supported) + hw_dbg(hw, "Device %x does not support flow control autoneg\n", + hw->device_id); + return supported; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index b45fdc98033d..f1bfae0c41d0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1018,8 +1018,12 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; struct ixgbe_ring *ring; - ixgbe_for_each_ring(ring, q_vector->tx) - adapter->tx_ring[ring->queue_index] = NULL; + ixgbe_for_each_ring(ring, q_vector->tx) { + if (ring_is_xdp(ring)) + adapter->xdp_ring[ring->queue_index] = NULL; + else + adapter->tx_ring[ring->queue_index] = NULL; + } ixgbe_for_each_ring(ring, q_vector->rx) adapter->rx_ring[ring->queue_index] = NULL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f1dbdf26d8e1..c6b132476de4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -386,7 +386,7 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) if (ixgbe_removed(reg_addr)) return IXGBE_FAILED_READ_REG; if (unlikely(hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) { + IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { struct ixgbe_adapter *adapter; int i; @@ -2214,7 +2214,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) { - int result = IXGBE_XDP_PASS; + int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; u32 act; @@ -2231,6 +2231,13 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, case XDP_TX: result = ixgbe_xmit_xdp_ring(adapter, xdp); break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (!err) + result = IXGBE_XDP_TX; + else + result = IXGBE_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(act); /* fallthrough */ @@ -2408,6 +2415,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ wmb(); writel(ring->next_to_use, ring->tail); + + xdp_do_flush_map(); } u64_stats_update_begin(&rx_ring->syncp); @@ -5810,6 +5819,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) usleep_range(10000, 20000); + /* synchronize_sched() needed for pending XDP buffers to drain */ + if (adapter->xdp_ring[0]) + synchronize_sched(); netif_tx_stop_all_queues(netdev); /* call carrier off first to avoid false dev_watchdog timeouts */ @@ -8839,7 +8851,6 @@ static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, } static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, - __be16 protocol, struct tc_cls_u32_offload *cls) { u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); @@ -8941,7 +8952,7 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, LIST_HEAD(actions); int err; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; tcf_exts_to_list(exts, &actions); @@ -9025,9 +9036,9 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, } static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, - __be16 protocol, struct tc_cls_u32_offload *cls) { + __be16 protocol = cls->common.protocol; u32 loc = cls->knode.handle & 0xfffff; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_mat_field *field_ptr; @@ -9214,41 +9225,49 @@ free_jump: return err; } -static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int ixgbe_setup_tc_cls_u32(struct net_device *dev, + struct tc_cls_u32_offload *cls_u32) { struct ixgbe_adapter *adapter = netdev_priv(dev); - if (chain_index) + if (TC_H_MAJ(cls_u32->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + cls_u32->common.chain_index) return -EOPNOTSUPP; - if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && - tc->type == TC_SETUP_CLSU32) { - switch (tc->cls_u32->command) { - case TC_CLSU32_NEW_KNODE: - case TC_CLSU32_REPLACE_KNODE: - return ixgbe_configure_clsu32(adapter, - proto, tc->cls_u32); - case TC_CLSU32_DELETE_KNODE: - return ixgbe_delete_clsu32(adapter, tc->cls_u32); - case TC_CLSU32_NEW_HNODE: - case TC_CLSU32_REPLACE_HNODE: - return ixgbe_configure_clsu32_add_hnode(adapter, proto, - tc->cls_u32); - case TC_CLSU32_DELETE_HNODE: - return ixgbe_configure_clsu32_del_hnode(adapter, - tc->cls_u32); - default: - return -EINVAL; - } + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return ixgbe_configure_clsu32(adapter, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return ixgbe_delete_clsu32(adapter, cls_u32); + case TC_CLSU32_NEW_HNODE: + case TC_CLSU32_REPLACE_HNODE: + return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32); + case TC_CLSU32_DELETE_HNODE: + return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32); + default: + return -EOPNOTSUPP; } +} - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; - - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; +static int ixgbe_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return ixgbe_setup_tc(dev, mqprio->num_tc); +} - return ixgbe_setup_tc(dev, tc->mqprio->num_tc); +static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_CLSU32: + return ixgbe_setup_tc_cls_u32(dev, type_data); + case TC_SETUP_MQPRIO: + return ixgbe_setup_tc_mqprio(dev, type_data); + default: + return -EOPNOTSUPP; + } } #ifdef CONFIG_PCI_IOV @@ -9823,6 +9842,53 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) } } +static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring *ring; + int err; + + if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) + return -EINVAL; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + if (unlikely(!ring)) + return -EINVAL; + + err = ixgbe_xmit_xdp_ring(adapter, xdp); + if (err != IXGBE_XDP_TX) + return -ENOMEM; + + return 0; +} + +static void ixgbe_xdp_flush(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring *ring; + + /* Its possible the device went down between xdp xmit and flush so + * we need to ensure device is still up. + */ + if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) + return; + + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + if (unlikely(!ring)) + return; + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); + + return; +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -9869,6 +9935,8 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, .ndo_features_check = ixgbe_features_check, .ndo_xdp = ixgbe_xdp, + .ndo_xdp_xmit = ixgbe_xdp_xmit, + .ndo_xdp_flush = ixgbe_xdp_flush, }; /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 0760bd7eeb01..112d24c6c9ce 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -679,8 +679,9 @@ update_vlvfb: static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, int vf, int index, unsigned char *mac_addr) { - struct list_head *pos; struct vf_macvlans *entry; + struct list_head *pos; + int retval = 0; if (index <= 1) { list_for_each(pos, &adapter->vf_mvs.l) { @@ -721,13 +722,15 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, if (!entry || !entry->free) return -ENOSPC; + retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval < 0) + return retval; + entry->free = false; entry->is_macvlan = true; entry->vf = vf; memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); - ixgbe_add_mac_filter(adapter, mac_addr, vf); - return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 9c2460c5ef1b..ffa0ee5cd0f5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3778,8 +3778,8 @@ struct ixgbe_info { #define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19) #define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20) #define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21) -#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23) -#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) +#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE BIT(25) +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) /* X552 only */ #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 72d84a065e34..19fbb2f28ea4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1555,9 +1555,14 @@ static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) **/ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) { + struct ixgbe_mac_info *mac = &hw->mac; s32 status; u32 reg_val; + /* iXFI is only supported with X552 */ + if (mac->type != ixgbe_mac_X550EM_x) + return IXGBE_ERR_LINK_SETUP; + /* Disable AN and force speed to 10G Serial. */ status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), @@ -1874,8 +1879,10 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, else force_speed = IXGBE_LINK_SPEED_1GB_FULL; - /* If internal link mode is XFI, then setup XFI internal link. */ - if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + /* If X552 and internal link mode is XFI, then setup XFI internal link. + */ + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { status = ixgbe_setup_ixfi_x550em(hw, &force_speed); if (status) @@ -2404,17 +2411,30 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); /* Enable link status change alarm */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - MDIO_MMD_AN, ®); - if (status) - return status; - reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + /* Enable the LASI interrupts on X552 devices to receive notifications + * of the link configurations of the external PHY and correspondingly + * support the configuration of the internal iXFI link, since iXFI does + * not support auto-negotiation. This is not required for X553 devices + * having KR support, which performs auto-negotiations and which is used + * as the internal link to the external PHY. Hence adding a check here + * to avoid enabling LASI interrupts for X553 devices. + */ + if (hw->mac.type != ixgbe_mac_x550em_a) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + MDIO_MMD_AN, ®); + if (status) + return status; - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - MDIO_MMD_AN, reg); - if (status) - return status; + reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + MDIO_MMD_AN, reg); + if (status) + return status; + } /* Enable high temperature failure and global fault alarms */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, @@ -2615,7 +2635,8 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return IXGBE_ERR_CONFIG; - if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { + if (!(hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { speed = IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; return ixgbe_setup_kr_speed_x550em(hw, speed); @@ -2822,7 +2843,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) { bool pause, asm_dir; u32 reg_val; - s32 rc; + s32 rc = 0; /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { @@ -2865,32 +2886,37 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) return IXGBE_ERR_CONFIG; } - if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR && - hw->device_id != IXGBE_DEV_ID_X550EM_A_KR && - hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L) - return 0; - - rc = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, - ®_val); - if (rc) - return rc; - - reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); - if (pause) - reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; - if (asm_dir) - reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; - rc = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, - reg_val); - - /* This device does not fully support AN. */ - hw->fc.disable_fc_autoneg = true; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + rc = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + ®_val); + if (rc) + return rc; + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + rc = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + reg_val); + + /* This device does not fully support AN. */ + hw->fc.disable_fc_autoneg = true; + break; + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->fc.disable_fc_autoneg = true; + break; + default: + break; + } return rc; } diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 48d21c1e09f2..39bc8fbbdd65 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -120,6 +120,9 @@ #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_TXQ_THRESH_REG 0x2094 +#define MVPP2_TXQ_THRESH_OFFSET 16 +#define MVPP2_TXQ_THRESH_MASK 0x3fff #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 #define MVPP2_TXQ_INDEX_REG 0x2098 #define MVPP2_TXQ_PREF_BUF_REG 0x209c @@ -183,9 +186,12 @@ #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 /* Interrupt Cause and Mask registers */ +#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port)) +#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0 + #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 -#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) +#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf @@ -206,6 +212,7 @@ #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) @@ -372,6 +379,7 @@ /* Coalescing */ #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL +#define MVPP2_TXDONE_COAL_USEC 1000 #define MVPP2_RX_COAL_PKTS 32 #define MVPP2_RX_COAL_USEC 100 @@ -685,7 +693,8 @@ enum mvpp2_prs_l3_cast { #define MVPP21_ADDR_SPACE_SZ 0 #define MVPP22_ADDR_SPACE_SZ SZ_64K -#define MVPP2_MAX_CPUS 4 +#define MVPP2_MAX_THREADS 8 +#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS enum mvpp2_bm_type { MVPP2_BM_FREE, @@ -701,11 +710,12 @@ struct mvpp2 { void __iomem *lms_base; void __iomem *iface_base; - /* On PPv2.2, each CPU can access the base register through a - * separate address space, each 64 KB apart from each - * other. + /* On PPv2.2, each "software thread" can access the base + * register through a separate address space, each 64 KB apart + * from each other. Typically, such address spaces will be + * used per CPU. */ - void __iomem *cpu_base[MVPP2_MAX_CPUS]; + void __iomem *swth_base[MVPP2_MAX_THREADS]; /* Common clocks */ struct clk *pp_clk; @@ -752,6 +762,18 @@ struct mvpp2_port_pcpu { struct tasklet_struct tx_done_tasklet; }; +struct mvpp2_queue_vector { + int irq; + struct napi_struct napi; + enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type; + int sw_thread_id; + u16 sw_thread_mask; + int first_rxq; + int nrxqs; + u32 pending_cause_rx; + struct mvpp2_port *port; +}; + struct mvpp2_port { u8 id; @@ -760,22 +782,19 @@ struct mvpp2_port { */ int gop_id; - int irq; - struct mvpp2 *priv; /* Per-port registers' base address */ void __iomem *base; struct mvpp2_rx_queue **rxqs; + unsigned int nrxqs; struct mvpp2_tx_queue **txqs; + unsigned int ntxqs; struct net_device *dev; int pkt_size; - u32 pending_cause_rx; - struct napi_struct napi; - /* Per-CPU port control */ struct mvpp2_port_pcpu __percpu *pcpu; @@ -797,6 +816,12 @@ struct mvpp2_port { /* Index of first port's physical RXQ */ u8 first_rxq; + + struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; + unsigned int nqvecs; + bool has_tx_irqs; + + u32 tx_time_coal; }; /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the @@ -1062,12 +1087,14 @@ struct mvpp2_bm_pool { u32 port_map; }; -/* Static declaractions */ +/* Queue modes */ +#define MVPP2_QDIST_SINGLE_MODE 0 +#define MVPP2_QDIST_MULTI_MODE 1 -/* Number of RXQs used by single port */ -static int rxq_number = MVPP2_DEFAULT_RXQ; -/* Number of TXQs used by single port */ -static int txq_number = MVPP2_MAX_TXQ; +static int queue_mode = MVPP2_QDIST_SINGLE_MODE; + +module_param(queue_mode, int, 0444); +MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); #define MVPP2_DRIVER_NAME "mvpp2" #define MVPP2_DRIVER_VERSION "1.0" @@ -1076,12 +1103,12 @@ static int txq_number = MVPP2_MAX_TXQ; static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) { - writel(data, priv->cpu_base[0] + offset); + writel(data, priv->swth_base[0] + offset); } static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) { - return readl(priv->cpu_base[0] + offset); + return readl(priv->swth_base[0] + offset); } /* These accessors should be used to access: @@ -1123,13 +1150,13 @@ static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data) { - writel(data, priv->cpu_base[cpu] + offset); + writel(data, priv->swth_base[cpu] + offset); } static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset) { - return readl(priv->cpu_base[cpu] + offset); + return readl(priv->swth_base[cpu] + offset); } static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, @@ -4070,7 +4097,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) port->pool_long->port_map |= (1 << port->id); - for (rxq = 0; rxq < rxq_number; rxq++) + for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); } @@ -4084,7 +4111,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) port->pool_short->port_map |= (1 << port->id); - for (rxq = 0; rxq < rxq_number; rxq++) + for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_short_pool_set(port, rxq, port->pool_short->id); } @@ -4125,22 +4152,40 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) { - int cpu, cpu_mask = 0; + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; - for_each_present_cpu(cpu) - cpu_mask |= 1 << cpu; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask)); + MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); } static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) { - int cpu, cpu_mask = 0; + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; - for_each_present_cpu(cpu) - cpu_mask |= 1 << cpu; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask)); + MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); } /* Mask the current CPU's Rx/Tx interrupts @@ -4162,11 +4207,40 @@ static void mvpp2_interrupts_mask(void *arg) static void mvpp2_interrupts_unmask(void *arg) { struct mvpp2_port *port = arg; + u32 val; + + val = MVPP2_CAUSE_MISC_SUM_MASK | + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + if (port->has_tx_irqs) + val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; mvpp2_percpu_write(port->priv, smp_processor_id(), - MVPP2_ISR_RX_TX_MASK_REG(port->id), - (MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); +} + +static void +mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) +{ + u32 val; + int i; + + if (port->priv->hw_version != MVPP22) + return; + + if (mask) + val = 0; + else + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *v = port->qvecs + i; + + if (v->type != MVPP2_QUEUE_VECTOR_SHARED) + continue; + + mvpp2_percpu_write(port->priv, v->sw_thread_id, + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); + } } /* Port configuration routines */ @@ -4376,7 +4450,7 @@ static void mvpp2_defaults_set(struct mvpp2_port *port) MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); /* Enable Rx cache snoop */ - for (lrxq = 0; lrxq < rxq_number; lrxq++) { + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val |= MVPP2_SNOOP_PKT_SIZE_MASK | @@ -4394,7 +4468,7 @@ static void mvpp2_ingress_enable(struct mvpp2_port *port) u32 val; int lrxq, queue; - for (lrxq = 0; lrxq < rxq_number; lrxq++) { + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val &= ~MVPP2_RXQ_DISABLE_MASK; @@ -4407,7 +4481,7 @@ static void mvpp2_ingress_disable(struct mvpp2_port *port) u32 val; int lrxq, queue; - for (lrxq = 0; lrxq < rxq_number; lrxq++) { + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val |= MVPP2_RXQ_DISABLE_MASK; @@ -4426,7 +4500,7 @@ static void mvpp2_egress_enable(struct mvpp2_port *port) /* Enable all initialized TXs. */ qmap = 0; - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; if (txq->descs) @@ -4712,7 +4786,7 @@ static void mvpp2_txq_sent_counter_clear(void *arg) struct mvpp2_port *port = arg; int queue; - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { int id = port->txqs[queue]->id; mvpp2_percpu_read(port->priv, smp_processor_id(), @@ -4753,7 +4827,7 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); } - for (txq = 0; txq < txq_number; txq++) { + for (txq = 0; txq < port->ntxqs; txq++) { val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; @@ -4787,6 +4861,23 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, put_cpu(); } +/* For some reason in the LSP this is done on each CPU. Why ? */ +static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + int cpu = get_cpu(); + u32 val; + + if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) + txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; + + val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); + + put_cpu(); +} + static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) { u64 tmp = (u64)clk_hz * usec; @@ -4823,6 +4914,22 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); } +static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) +{ + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + + if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { + port->tx_time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + } + + mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); +} + /* Free Tx queue skbuffs */ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, @@ -4881,7 +4988,8 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, netif_tx_wake_queue(nq); } -static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) +static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, + int cpu) { struct mvpp2_tx_queue *txq; struct mvpp2_txq_pcpu *txq_pcpu; @@ -4892,7 +5000,7 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) if (!txq) break; - txq_pcpu = this_cpu_ptr(txq->pcpu); + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); if (txq_pcpu->count) { mvpp2_txq_done(port, txq, txq_pcpu); @@ -5229,7 +5337,7 @@ static void mvpp2_cleanup_txqs(struct mvpp2_port *port) val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; mvpp2_txq_clean(port, txq); mvpp2_txq_deinit(port, txq); @@ -5246,7 +5354,7 @@ static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) { int queue; - for (queue = 0; queue < rxq_number; queue++) + for (queue = 0; queue < port->nrxqs; queue++) mvpp2_rxq_deinit(port, port->rxqs[queue]); } @@ -5255,7 +5363,7 @@ static int mvpp2_setup_rxqs(struct mvpp2_port *port) { int queue, err; - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { err = mvpp2_rxq_init(port, port->rxqs[queue]); if (err) goto err_cleanup; @@ -5273,13 +5381,21 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port) struct mvpp2_tx_queue *txq; int queue, err; - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; err = mvpp2_txq_init(port, txq); if (err) goto err_cleanup; } + if (port->has_tx_irqs) { + mvpp2_tx_time_coal_set(port); + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + mvpp2_tx_pkts_coal_set(port, txq); + } + } + on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); return 0; @@ -5291,11 +5407,11 @@ err_cleanup: /* The callback for per-port interrupt */ static irqreturn_t mvpp2_isr(int irq, void *dev_id) { - struct mvpp2_port *port = (struct mvpp2_port *)dev_id; + struct mvpp2_queue_vector *qv = dev_id; - mvpp2_interrupts_disable(port); + mvpp2_qvec_interrupt_disable(qv); - napi_schedule(&port->napi); + napi_schedule(&qv->napi); return IRQ_HANDLED; } @@ -5385,8 +5501,8 @@ static void mvpp2_tx_proc_cb(unsigned long data) port_pcpu->timer_scheduled = false; /* Process all the Tx queues */ - cause = (1 << txq_number) - 1; - tx_todo = mvpp2_tx_done(port, cause); + cause = (1 << port->ntxqs) - 1; + tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); /* Set the timer in case not all the packets were processed */ if (tx_todo) @@ -5498,8 +5614,8 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) } /* Main rx processing */ -static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, - struct mvpp2_rx_queue *rxq) +static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, + int rx_todo, struct mvpp2_rx_queue *rxq) { struct net_device *dev = port->dev; int rx_received; @@ -5577,7 +5693,7 @@ err_drop_frame: skb->protocol = eth_type_trans(skb, dev); mvpp2_rx_csum(port, rx_status, skb); - napi_gro_receive(&port->napi, skb); + napi_gro_receive(napi, skb); } if (rcvd_pkts) { @@ -5762,7 +5878,8 @@ out: mvpp2_txq_done(port, txq, txq_pcpu); /* Set the timer in case not all frags were processed */ - if (txq_pcpu->count <= frags && txq_pcpu->count > 0) { + if (!port->has_tx_irqs && txq_pcpu->count <= frags && + txq_pcpu->count > 0) { struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); mvpp2_timer_set(port_pcpu); @@ -5783,11 +5900,14 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause) static int mvpp2_poll(struct napi_struct *napi, int budget) { - u32 cause_rx_tx, cause_rx, cause_misc; + u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); + struct mvpp2_queue_vector *qv; int cpu = smp_processor_id(); + qv = container_of(napi, struct mvpp2_queue_vector, napi); + /* Rx/Tx cause register * * Bits 0-15: each bit indicates received packets on the Rx queue @@ -5798,11 +5918,10 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_percpu_read(port->priv, cpu, + cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); - cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; + cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; if (cause_misc) { mvpp2_cause_error(port->dev, cause_misc); @@ -5813,10 +5932,16 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } - cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + if (cause_tx) { + cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; + mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + } /* Process RX packets */ - cause_rx |= port->pending_cause_rx; + cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_rx <<= qv->first_rxq; + cause_rx |= qv->pending_cause_rx; while (cause_rx && budget > 0) { int count; struct mvpp2_rx_queue *rxq; @@ -5825,7 +5950,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) if (!rxq) break; - count = mvpp2_rx(port, budget, rxq); + count = mvpp2_rx(port, napi, budget, rxq); rx_done += count; budget -= count; if (budget > 0) { @@ -5841,9 +5966,9 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) cause_rx = 0; napi_complete_done(napi, rx_done); - mvpp2_interrupts_enable(port); + mvpp2_qvec_interrupt_enable(qv); } - port->pending_cause_rx = cause_rx; + qv->pending_cause_rx = cause_rx; return rx_done; } @@ -5851,11 +5976,13 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) static void mvpp2_start_dev(struct mvpp2_port *port) { struct net_device *ndev = port->dev; + int i; mvpp2_gmac_max_rx_size_set(port); mvpp2_txp_max_tx_size_set(port); - napi_enable(&port->napi); + for (i = 0; i < port->nqvecs; i++) + napi_enable(&port->qvecs[i].napi); /* Enable interrupts on all CPUs */ mvpp2_interrupts_enable(port); @@ -5869,6 +5996,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port) static void mvpp2_stop_dev(struct mvpp2_port *port) { struct net_device *ndev = port->dev; + int i; /* Stop new packets from arriving to RXQs */ mvpp2_ingress_disable(port); @@ -5878,7 +6006,8 @@ static void mvpp2_stop_dev(struct mvpp2_port *port) /* Disable interrupts on all CPUs */ mvpp2_interrupts_disable(port); - napi_disable(&port->napi); + for (i = 0; i < port->nqvecs; i++) + napi_disable(&port->qvecs[i].napi); netif_carrier_off(port->dev); netif_tx_stop_all_queues(port->dev); @@ -5964,6 +6093,46 @@ static void mvpp2_phy_disconnect(struct mvpp2_port *port) phy_disconnect(ndev->phydev); } +static int mvpp2_irqs_init(struct mvpp2_port *port) +{ + int err, i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); + if (err) + goto err; + + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) + irq_set_affinity_hint(qv->irq, + cpumask_of(qv->sw_thread_id)); + } + + return 0; +err: + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + irq_set_affinity_hint(qv->irq, NULL); + free_irq(qv->irq, qv); + } + + return err; +} + +static void mvpp2_irqs_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + irq_set_affinity_hint(qv->irq, NULL); + free_irq(qv->irq, qv); + } +} + static int mvpp2_open(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); @@ -6006,9 +6175,9 @@ static int mvpp2_open(struct net_device *dev) goto err_cleanup_rxqs; } - err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port); + err = mvpp2_irqs_init(port); if (err) { - netdev_err(port->dev, "cannot request IRQ %d\n", port->irq); + netdev_err(port->dev, "cannot init IRQs\n"); goto err_cleanup_txqs; } @@ -6021,13 +6190,14 @@ static int mvpp2_open(struct net_device *dev) /* Unmask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_unmask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, false); mvpp2_start_dev(port); return 0; err_free_irq: - free_irq(port->irq, port); + mvpp2_irqs_deinit(port); err_cleanup_txqs: mvpp2_cleanup_txqs(port); err_cleanup_rxqs: @@ -6046,14 +6216,17 @@ static int mvpp2_stop(struct net_device *dev) /* Mask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_mask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, true); - free_irq(port->irq, port); - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + mvpp2_irqs_deinit(port); + if (!port->has_tx_irqs) { + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); - hrtimer_cancel(&port_pcpu->tx_done_timer); - port_pcpu->timer_scheduled = false; - tasklet_kill(&port_pcpu->tx_done_tasklet); + hrtimer_cancel(&port_pcpu->tx_done_timer); + port_pcpu->timer_scheduled = false; + tasklet_kill(&port_pcpu->tx_done_tasklet); + } } mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); @@ -6228,7 +6401,7 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, struct mvpp2_port *port = netdev_priv(dev); int queue; - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq = port->rxqs[queue]; rxq->time_coal = c->rx_coalesce_usecs; @@ -6237,10 +6410,18 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, mvpp2_rx_time_coal_set(port, rxq); } - for (queue = 0; queue < txq_number; queue++) { + if (port->has_tx_irqs) { + port->tx_time_coal = c->tx_coalesce_usecs; + mvpp2_tx_time_coal_set(port); + } + + for (queue = 0; queue < port->ntxqs; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; txq->done_pkts_coal = c->tx_max_coalesced_frames; + + if (port->has_tx_irqs) + mvpp2_tx_pkts_coal_set(port, txq); } return 0; @@ -6365,6 +6546,129 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; +/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that + * had a single IRQ defined per-port. + */ +static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2_queue_vector *v = &port->qvecs[0]; + + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + v->sw_thread_id = 0; + v->sw_thread_mask = *cpumask_bits(cpu_online_mask); + v->port = port; + v->irq = irq_of_parse_and_map(port_node, 0); + if (v->irq <= 0) + return -EINVAL; + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + + port->nqvecs = 1; + + return 0; +} + +static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2_queue_vector *v; + int i, ret; + + port->nqvecs = num_possible_cpus(); + if (queue_mode == MVPP2_QDIST_SINGLE_MODE) + port->nqvecs += 1; + + for (i = 0; i < port->nqvecs; i++) { + char irqname[16]; + + v = port->qvecs + i; + + v->port = port; + v->type = MVPP2_QUEUE_VECTOR_PRIVATE; + v->sw_thread_id = i; + v->sw_thread_mask = BIT(i); + + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + + if (queue_mode == MVPP2_QDIST_MULTI_MODE) { + v->first_rxq = i * MVPP2_DEFAULT_RXQ; + v->nrxqs = MVPP2_DEFAULT_RXQ; + } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && + i == (port->nqvecs - 1)) { + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + strncpy(irqname, "rx-shared", sizeof(irqname)); + } + + v->irq = of_irq_get_byname(port_node, irqname); + if (v->irq <= 0) { + ret = -EINVAL; + goto err; + } + + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + } + + return 0; + +err: + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); + return ret; +} + +static int mvpp2_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + if (port->has_tx_irqs) + return mvpp2_multi_queue_vectors_init(port, port_node); + else + return mvpp2_simple_queue_vectors_init(port, port_node); +} + +static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); +} + +/* Configure Rx queue group interrupt for this port */ +static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + int i; + + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), + port->nrxqs); + return; + } + + /* Handle the more complicated PPv2.2 case */ + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + if (!qv->nrxqs) + continue; + + val = qv->sw_thread_id; + val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = qv->first_rxq; + val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } +} + /* Initialize port HW */ static int mvpp2_port_init(struct mvpp2_port *port) { @@ -6373,15 +6677,22 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct mvpp2_txq_pcpu *txq_pcpu; int queue, cpu, err; - if (port->first_rxq + rxq_number > + /* Checks for hardware constraints */ + if (port->first_rxq + port->nrxqs > MVPP2_MAX_PORTS * priv->max_port_rxqs) return -EINVAL; + if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) || + (port->ntxqs > MVPP2_MAX_TXQ)) + return -EINVAL; + /* Disable port */ mvpp2_egress_disable(port); mvpp2_port_disable(port); - port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), + port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; + + port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), GFP_KERNEL); if (!port->txqs) return -ENOMEM; @@ -6389,7 +6700,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) /* Associate physical Tx queues to this port and initialize. * The mapping is predefined. */ - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { int queue_phy_id = mvpp2_txq_phys(port->id, queue); struct mvpp2_tx_queue *txq; @@ -6416,7 +6727,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) port->txqs[queue] = txq; } - port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), + port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), GFP_KERNEL); if (!port->rxqs) { err = -ENOMEM; @@ -6424,7 +6735,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) } /* Allocate and initialize Rx queue for this port */ - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq; /* Map physical Rx queue to port's logical Rx queue */ @@ -6441,22 +6752,10 @@ static int mvpp2_port_init(struct mvpp2_port *port) port->rxqs[queue] = rxq; } - /* Configure Rx queue group interrupt for this port */ - if (priv->hw_version == MVPP21) { - mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), - rxq_number); - } else { - u32 val; - - val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); - - val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); - } + mvpp2_rx_irqs_setup(port); /* Create Rx descriptor rings */ - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq = port->rxqs[queue]; rxq->size = port->rx_ring_size; @@ -6484,7 +6783,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) return 0; err_free_percpu: - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { if (!port->txqs[queue]) continue; free_percpu(port->txqs[queue]->pcpu); @@ -6492,6 +6791,30 @@ err_free_percpu: return err; } +/* Checks if the port DT description has the TX interrupts + * described. On PPv2.1, there are no such interrupts. On PPv2.2, + * there are available, but we need to keep support for old DTs. + */ +static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, + struct device_node *port_node) +{ + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", + "tx-cpu2", "tx-cpu3" }; + int ret, i; + + if (priv->hw_version == MVPP21) + return false; + + for (i = 0; i < 5; i++) { + ret = of_property_match_string(port_node, "interrupt-names", + irqs[i]); + if (ret < 0) + return false; + } + + return true; +} + /* Ports initialization */ static int mvpp2_port_probe(struct platform_device *pdev, struct device_node *port_node, @@ -6505,12 +6828,25 @@ static int mvpp2_port_probe(struct platform_device *pdev, const char *dt_mac_addr; const char *mac_from; char hw_mac_addr[ETH_ALEN]; + unsigned int ntxqs, nrxqs; + bool has_tx_irqs; u32 id; int features; int phy_mode; int err, i, cpu; - dev = alloc_etherdev_mqs(sizeof(*port), txq_number, rxq_number); + has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); + + if (!has_tx_irqs) + queue_mode = MVPP2_QDIST_SINGLE_MODE; + + ntxqs = MVPP2_MAX_TXQ; + if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) + nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); + else + nrxqs = MVPP2_DEFAULT_RXQ; + + dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); if (!dev) return -ENOMEM; @@ -6540,20 +6876,22 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->ethtool_ops = &mvpp2_eth_tool_ops; port = netdev_priv(dev); + port->dev = dev; + port->ntxqs = ntxqs; + port->nrxqs = nrxqs; + port->priv = priv; + port->has_tx_irqs = has_tx_irqs; - port->irq = irq_of_parse_and_map(port_node, 0); - if (port->irq <= 0) { - err = -EINVAL; + err = mvpp2_queue_vectors_init(port, port_node); + if (err) goto err_free_netdev; - } if (of_property_read_bool(port_node, "marvell,loopback")) port->flags |= MVPP2_F_LOOPBACK; - port->priv = priv; port->id = id; if (priv->hw_version == MVPP21) - port->first_rxq = port->id * rxq_number; + port->first_rxq = port->id * port->nrxqs; else port->first_rxq = port->id * priv->max_port_rxqs; @@ -6565,14 +6903,14 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(port->base)) { err = PTR_ERR(port->base); - goto err_free_irq; + goto err_deinit_qvecs; } } else { if (of_property_read_u32(port_node, "gop-port-id", &port->gop_id)) { err = -EINVAL; dev_err(&pdev->dev, "missing gop-port-id value\n"); - goto err_free_irq; + goto err_deinit_qvecs; } port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); @@ -6582,7 +6920,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); if (!port->stats) { err = -ENOMEM; - goto err_free_irq; + goto err_deinit_qvecs; } dt_mac_addr = of_get_mac_address(port_node); @@ -6603,7 +6941,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->tx_ring_size = MVPP2_MAX_TXD; port->rx_ring_size = MVPP2_MAX_RXD; - port->dev = dev; SET_NETDEV_DEV(dev, &pdev->dev); err = mvpp2_port_init(port); @@ -6626,19 +6963,21 @@ static int mvpp2_port_probe(struct platform_device *pdev, goto err_free_txq_pcpu; } - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + if (!port->has_tx_irqs) { + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); - hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL_PINNED); - port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; - port_pcpu->timer_scheduled = false; + hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; + port_pcpu->timer_scheduled = false; - tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb, - (unsigned long)dev); + tasklet_init(&port_pcpu->tx_done_tasklet, + mvpp2_tx_proc_cb, + (unsigned long)dev); + } } - netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); features = NETIF_F_SG | NETIF_F_IP_CSUM; dev->features = features | NETIF_F_RXCSUM; dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO; @@ -6662,12 +7001,12 @@ static int mvpp2_port_probe(struct platform_device *pdev, err_free_port_pcpu: free_percpu(port->pcpu); err_free_txq_pcpu: - for (i = 0; i < txq_number; i++) + for (i = 0; i < port->ntxqs; i++) free_percpu(port->txqs[i]->pcpu); err_free_stats: free_percpu(port->stats); -err_free_irq: - irq_dispose_mapping(port->irq); +err_deinit_qvecs: + mvpp2_queue_vectors_deinit(port); err_free_netdev: of_node_put(phy_node); free_netdev(dev); @@ -6683,9 +7022,9 @@ static void mvpp2_port_remove(struct mvpp2_port *port) of_node_put(port->phy_node); free_percpu(port->pcpu); free_percpu(port->stats); - for (i = 0; i < txq_number; i++) + for (i = 0; i < port->ntxqs; i++) free_percpu(port->txqs[i]->pcpu); - irq_dispose_mapping(port->irq); + mvpp2_queue_vectors_deinit(port); free_netdev(port->dev); } @@ -6800,13 +7139,6 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) int err, i; u32 val; - /* Checks for hardware constraints */ - if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) || - (txq_number > MVPP2_MAX_TXQ)) { - dev_err(&pdev->dev, "invalid queue size parameter\n"); - return -EINVAL; - } - /* MBUS windows configuration */ dram_target_info = mv_mbus_dram_info(); if (dram_target_info) @@ -6845,23 +7177,6 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) /* Rx Fifo Init */ mvpp2_rx_fifo_init(priv); - /* Reset Rx queue group interrupt configuration */ - for (i = 0; i < MVPP2_MAX_PORTS; i++) { - if (priv->hw_version == MVPP21) { - mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i), - rxq_number); - continue; - } else { - u32 val; - - val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); - - val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); - } - } - if (priv->hw_version == MVPP21) writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); @@ -6892,7 +7207,7 @@ static int mvpp2_probe(struct platform_device *pdev) struct mvpp2 *priv; struct resource *res; void __iomem *base; - int port_count, cpu; + int port_count, i; int err; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@ -6919,12 +7234,12 @@ static int mvpp2_probe(struct platform_device *pdev) return PTR_ERR(priv->iface_base); } - for_each_present_cpu(cpu) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { u32 addr_space_sz; addr_space_sz = (priv->hw_version == MVPP21 ? MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); - priv->cpu_base[cpu] = base + cpu * addr_space_sz; + priv->swth_base[i] = base + i * addr_space_sz; } if (priv->hw_version == MVPP21) diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index 698bb89aa901..f9149d2a4694 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -7,11 +7,11 @@ config NET_VENDOR_MEDIATEK if NET_VENDOR_MEDIATEK config NET_MEDIATEK_SOC - tristate "MediaTek MT7623 Gigabit ethernet support" - depends on NET_VENDOR_MEDIATEK && (MACH_MT7623 || MACH_MT2701) + tristate "MediaTek SoC Gigabit Ethernet support" + depends on NET_VENDOR_MEDIATEK select PHYLIB ---help--- This driver supports the gigabit ethernet MACs in the - MediaTek MT2701/MT7623 chipset family. + MediaTek SoC family. endif #NET_VENDOR_MEDIATEK diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index e588a0cdb074..5e81a7263654 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -53,7 +53,8 @@ static const struct mtk_ethtool_stats { }; static const char * const mtk_clks_source_name[] = { - "ethif", "esw", "gp1", "gp2", "trgpll" + "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m", + "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll" }; void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) @@ -163,6 +164,47 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) mtk_w32(eth, val, TRGMII_TCK_CTRL); } +static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id) +{ + u32 val; + + /* Setup the link timer and QPHY power up inside SGMIISYS */ + regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER, + SGMII_LINK_TIMER_DEFAULT); + + regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val); + val |= SGMII_REMOTE_FAULT_DIS; + regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val); + + regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val); + val |= SGMII_AN_RESTART; + regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val); + + regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val); + val &= ~SGMII_PHYA_PWD; + regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val); + + /* Determine MUX for which GMAC uses the SGMII interface */ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) { + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + val &= ~SYSCFG0_SGMII_MASK; + val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2; + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); + + dev_info(eth->dev, "setup shared sgmii for gmac=%d\n", + mac_id); + } + + /* Setup the GMAC1 going through SGMII path when SoC also support + * ESW on GMAC1 + */ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) && + !mac_id) { + mtk_w32(eth, 0, MTK_MAC_MISC); + dev_info(eth->dev, "setup gmac1 going through sgmii"); + } +} + static void mtk_phy_link_adjust(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); @@ -185,7 +227,8 @@ static void mtk_phy_link_adjust(struct net_device *dev) break; }; - if (mac->id == 0 && !mac->trgmii) + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && + !mac->id && !mac->trgmii) mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed); if (dev->phydev->link) @@ -269,6 +312,7 @@ static int mtk_phy_connect(struct net_device *dev) if (!np) return -ENODEV; + mac->ge_mode = 0; switch (of_get_phy_mode(np)) { case PHY_INTERFACE_MODE_TRGMII: mac->trgmii = true; @@ -276,7 +320,10 @@ static int mtk_phy_connect(struct net_device *dev) case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII: - mac->ge_mode = 0; + break; + case PHY_INTERFACE_MODE_SGMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) + mtk_gmac_sgmii_hw_setup(eth, mac->id); break; case PHY_INTERFACE_MODE_MII: mac->ge_mode = 1; @@ -1032,7 +1079,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) unsigned int done[MTK_MAX_DEVS]; unsigned int bytes[MTK_MAX_DEVS]; u32 cpu, dma; - static int condition; int total = 0, i; memset(done, 0, sizeof(done)); @@ -1056,10 +1102,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) mac = 1; skb = tx_buf->skb; - if (!skb) { - condition = 1; + if (!skb) break; - } if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { bytes[mac] += skb->len; @@ -1241,9 +1285,19 @@ static void mtk_tx_clean(struct mtk_eth *eth) static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) { - struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; + struct mtk_rx_ring *ring; int rx_data_len, rx_dma_size; int i; + u32 offset = 0; + + if (rx_flag == MTK_RX_FLAGS_QDMA) { + if (ring_no) + return -EINVAL; + ring = ð->rx_ring_qdma; + offset = 0x1000; + } else { + ring = ð->rx_ring[ring_no]; + } if (rx_flag == MTK_RX_FLAGS_HWLRO) { rx_data_len = MTK_MAX_LRO_RX_LENGTH; @@ -1293,17 +1347,16 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) */ wmb(); - mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no)); - mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no)); - mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX); + mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset); + mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset); + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset); + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset); return 0; } -static void mtk_rx_clean(struct mtk_eth *eth, int ring_no) +static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) { - struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; int i; if (ring->data && ring->dma) { @@ -1629,6 +1682,10 @@ static int mtk_dma_init(struct mtk_eth *eth) if (err) return err; + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); + if (err) + return err; + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); if (err) return err; @@ -1668,12 +1725,13 @@ static void mtk_dma_free(struct mtk_eth *eth) eth->phy_scratch_ring = 0; } mtk_tx_clean(eth); - mtk_rx_clean(eth, 0); + mtk_rx_clean(eth, ð->rx_ring[0]); + mtk_rx_clean(eth, ð->rx_ring_qdma); if (eth->hwlro) { mtk_hwlro_rx_uninit(eth); for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) - mtk_rx_clean(eth, i); + mtk_rx_clean(eth, ð->rx_ring[i]); } kfree(eth->scratch_head); @@ -1740,7 +1798,9 @@ static int mtk_start_dma(struct mtk_eth *eth) mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | - MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO, + MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | + MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_BT_32DWORDS, MTK_QDMA_GLO_CFG); mtk_w32(eth, @@ -1837,9 +1897,36 @@ static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) mdelay(10); } +static void mtk_clk_disable(struct mtk_eth *eth) +{ + int clk; + + for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--) + clk_disable_unprepare(eth->clks[clk]); +} + +static int mtk_clk_enable(struct mtk_eth *eth) +{ + int clk, ret; + + for (clk = 0; clk < MTK_CLK_MAX ; clk++) { + ret = clk_prepare_enable(eth->clks[clk]); + if (ret) + goto err_disable_clks; + } + + return 0; + +err_disable_clks: + while (--clk >= 0) + clk_disable_unprepare(eth->clks[clk]); + + return ret; +} + static int mtk_hw_init(struct mtk_eth *eth) { - int i, val; + int i, val, ret; if (test_and_set_bit(MTK_HW_INIT, ð->state)) return 0; @@ -1847,10 +1934,10 @@ static int mtk_hw_init(struct mtk_eth *eth) pm_runtime_enable(eth->dev); pm_runtime_get_sync(eth->dev); - clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]); - clk_prepare_enable(eth->clks[MTK_CLK_ESW]); - clk_prepare_enable(eth->clks[MTK_CLK_GP1]); - clk_prepare_enable(eth->clks[MTK_CLK_GP2]); + ret = mtk_clk_enable(eth); + if (ret) + goto err_disable_pm; + ethsys_reset(eth, RSTCTRL_FE); ethsys_reset(eth, RSTCTRL_PPE); @@ -1918,6 +2005,12 @@ static int mtk_hw_init(struct mtk_eth *eth) } return 0; + +err_disable_pm: + pm_runtime_put_sync(eth->dev); + pm_runtime_disable(eth->dev); + + return ret; } static int mtk_hw_deinit(struct mtk_eth *eth) @@ -1925,10 +2018,7 @@ static int mtk_hw_deinit(struct mtk_eth *eth) if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) return 0; - clk_disable_unprepare(eth->clks[MTK_CLK_GP2]); - clk_disable_unprepare(eth->clks[MTK_CLK_GP1]); - clk_disable_unprepare(eth->clks[MTK_CLK_ESW]); - clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]); + mtk_clk_disable(eth); pm_runtime_put_sync(eth->dev); pm_runtime_disable(eth->dev); @@ -2395,6 +2485,7 @@ static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id) static bool mtk_is_hwlro_supported(struct mtk_eth *eth) { switch (eth->chip_id) { + case MT7622_ETH: case MT7623_ETH: return true; } @@ -2406,6 +2497,7 @@ static int mtk_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct device_node *mac_np; + const struct of_device_id *match; struct mtk_eth *eth; int err; int i; @@ -2414,6 +2506,9 @@ static int mtk_probe(struct platform_device *pdev) if (!eth) return -ENOMEM; + match = of_match_device(of_mtk_match, &pdev->dev); + eth->soc = (struct mtk_soc_data *)match->data; + eth->dev = &pdev->dev; eth->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(eth->base)) @@ -2430,6 +2525,16 @@ static int mtk_probe(struct platform_device *pdev) return PTR_ERR(eth->ethsys); } + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { + eth->sgmiisys = + syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,sgmiisys"); + if (IS_ERR(eth->sgmiisys)) { + dev_err(&pdev->dev, "no sgmiisys regmap found\n"); + return PTR_ERR(eth->sgmiisys); + } + } + eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mediatek,pctl"); if (IS_ERR(eth->pctl)) { @@ -2450,7 +2555,12 @@ static int mtk_probe(struct platform_device *pdev) if (IS_ERR(eth->clks[i])) { if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) return -EPROBE_DEFER; - return -ENODEV; + if (eth->soc->required_clks & BIT(i)) { + dev_err(&pdev->dev, "clock %s not found\n", + mtk_clks_source_name[i]); + return -EINVAL; + } + eth->clks[i] = NULL; } } @@ -2553,8 +2663,25 @@ static int mtk_remove(struct platform_device *pdev) return 0; } +static const struct mtk_soc_data mt2701_data = { + .caps = MTK_GMAC1_TRGMII, + .required_clks = MT7623_CLKS_BITMAP +}; + +static const struct mtk_soc_data mt7622_data = { + .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW, + .required_clks = MT7622_CLKS_BITMAP +}; + +static const struct mtk_soc_data mt7623_data = { + .caps = MTK_GMAC1_TRGMII, + .required_clks = MT7623_CLKS_BITMAP +}; + const struct of_device_id of_mtk_match[] = { - { .compatible = "mediatek,mt2701-eth" }, + { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data}, + { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, + { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, {}, }; MODULE_DEVICE_TABLE(of, of_mtk_match); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 5868a09f623a..3d3c24a28112 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -302,6 +302,9 @@ #define PHY_IAC_REG_SHIFT 25 #define PHY_IAC_TIMEOUT HZ +#define MTK_MAC_MISC 0x1000c +#define MTK_MUX_TO_ESW BIT(0) + /* Mac control registers */ #define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) #define MAC_MCR_MAX_RX_1536 BIT(24) @@ -357,11 +360,15 @@ #define ETHSYS_CHIPID0_3 0x0 #define ETHSYS_CHIPID4_7 0x4 #define MT7623_ETH 7623 +#define MT7622_ETH 7622 /* ethernet subsystem config register */ #define ETHSYS_SYSCFG0 0x14 #define SYSCFG0_GE_MASK 0x3 #define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) +#define SYSCFG0_SGMII_MASK (3 << 8) +#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & GENMASK(9, 8)) +#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & GENMASK(9, 8)) /* ethernet subsystem clock register */ #define ETHSYS_CLKCFG0 0x2c @@ -372,6 +379,23 @@ #define RSTCTRL_FE BIT(6) #define RSTCTRL_PPE BIT(31) +/* SGMII subsystem config registers */ +/* Register to auto-negotiation restart */ +#define SGMSYS_PCS_CONTROL_1 0x0 +#define SGMII_AN_RESTART BIT(9) + +/* Register to programmable link timer, the unit in 2 * 8ns */ +#define SGMSYS_PCS_LINK_TIMER 0x18 +#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0)) + +/* Register to control remote fault */ +#define SGMSYS_SGMII_MODE 0x20 +#define SGMII_REMOTE_FAULT_DIS BIT(8) + +/* Register to power up QPHY */ +#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8 +#define SGMII_PHYA_PWD BIT(4) + struct mtk_rx_dma { unsigned int rxd1; unsigned int rxd2; @@ -437,12 +461,31 @@ enum mtk_tx_flags { enum mtk_clks_map { MTK_CLK_ETHIF, MTK_CLK_ESW, + MTK_CLK_GP0, MTK_CLK_GP1, MTK_CLK_GP2, MTK_CLK_TRGPLL, + MTK_CLK_SGMII_TX_250M, + MTK_CLK_SGMII_RX_250M, + MTK_CLK_SGMII_CDR_REF, + MTK_CLK_SGMII_CDR_FB, + MTK_CLK_SGMII_CK, + MTK_CLK_ETH2PLL, MTK_CLK_MAX }; +#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \ + BIT(MTK_CLK_TRGPLL)) +#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ + BIT(MTK_CLK_GP2) | \ + BIT(MTK_CLK_SGMII_TX_250M) | \ + BIT(MTK_CLK_SGMII_RX_250M) | \ + BIT(MTK_CLK_SGMII_CDR_REF) | \ + BIT(MTK_CLK_SGMII_CDR_FB) | \ + BIT(MTK_CLK_SGMII_CK) | \ + BIT(MTK_CLK_ETH2PLL)) enum mtk_dev_state { MTK_HW_INIT, MTK_RESETTING @@ -489,6 +532,7 @@ struct mtk_tx_ring { enum mtk_rx_flags { MTK_RX_FLAGS_NORMAL = 0, MTK_RX_FLAGS_HWLRO, + MTK_RX_FLAGS_QDMA, }; /* struct mtk_rx_ring - This struct holds info describing a RX ring @@ -511,6 +555,28 @@ struct mtk_rx_ring { u32 crx_idx_reg; }; +#define MTK_TRGMII BIT(0) +#define MTK_GMAC1_TRGMII (BIT(1) | MTK_TRGMII) +#define MTK_ESW BIT(4) +#define MTK_GMAC1_ESW (BIT(5) | MTK_ESW) +#define MTK_SGMII BIT(8) +#define MTK_GMAC1_SGMII (BIT(9) | MTK_SGMII) +#define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII) +#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \ + MTK_GMAC2_SGMII) +#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) + +/* struct mtk_eth_data - This is the structure holding all differences + * among various plaforms + * @caps Flags shown the extra capability for the SoC + * @required_clks Flags shown the bitmap for required clocks on + * the target SoC + */ +struct mtk_soc_data { + u32 caps; + u32 required_clks; +}; + /* currently no SoC has more than 2 macs */ #define MTK_MAX_DEVS 2 @@ -529,11 +595,14 @@ struct mtk_rx_ring { * @msg_enable: Ethtool msg level * @ethsys: The register map pointing at the range used to setup * MII modes + * @sgmiisys: The register map pointing at the range used to setup + * SGMII modes * @pctl: The register map pointing at the range used to setup * GMAC port drive/slew values * @dma_refcnt: track how many netdevs are using the DMA engine - * @tx_ring: Pointer to the memore holding info about the TX ring - * @rx_ring: Pointer to the memore holding info about the RX ring + * @tx_ring: Pointer to the memory holding info about the TX ring + * @rx_ring: Pointer to the memory holding info about the RX ring + * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring * @tx_napi: The TX NAPI struct * @rx_napi: The RX NAPI struct * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring @@ -542,7 +611,8 @@ struct mtk_rx_ring { * @clks: clock array for all clocks required * @mii_bus: If there is a bus we need to create an instance for it * @pending_work: The workqueue used to reset the dma ring - * @state Initialization and runtime state of the device. + * @state: Initialization and runtime state of the device + * @soc: Holding specific data among vaious SoCs */ struct mtk_eth { @@ -558,12 +628,14 @@ struct mtk_eth { u32 msg_enable; unsigned long sysclk; struct regmap *ethsys; + struct regmap *sgmiisys; struct regmap *pctl; u32 chip_id; bool hwlro; atomic_t dma_refcnt; struct mtk_tx_ring tx_ring; struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM]; + struct mtk_rx_ring rx_ring_qdma; struct napi_struct tx_napi; struct napi_struct rx_napi; struct mtk_tx_dma *scratch_ring; @@ -574,6 +646,8 @@ struct mtk_eth { struct mii_bus *mii_bus; struct work_struct pending_work; unsigned long state; + + const struct mtk_soc_data *soc; }; /* struct mtk_mac - the structure that holds the info about the MACs of the diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 85fe17e4dcfb..87d1f4d2a77b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -208,12 +208,10 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) cq->moder_cnt, cq->moder_time); } -int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, &priv->mdev->uar_lock); - - return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 2b0cbca4beb5..686e18de9a97 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -147,7 +147,7 @@ void mlx4_en_update_loopback_state(struct net_device *dev, mutex_unlock(&priv->mdev->state_lock); } -static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) +static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) { struct mlx4_en_profile *params = &mdev->profile; int i; @@ -176,8 +176,6 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].rss_rings = 0; params->prof[i].inline_thold = inline_thold; } - - return 0; } static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) @@ -309,10 +307,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) } /* Build device profile according to supplied module parameters */ - if (mlx4_en_get_profile(mdev)) { - mlx4_err(mdev, "Bad module parameters, aborting\n"); - goto err_mr; - } + mlx4_en_get_profile(mdev); /* Configure which ports to start according to module parameters */ mdev->port_cnt = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 3a291fc1780a..6e67ca7aa7f5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -130,19 +130,20 @@ out: return err; } -static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct tc_mqprio_qopt *mqprio = type_data; + + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) + if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) return -EINVAL; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return mlx4_en_alloc_tx_queue_per_tc(dev, tc->mqprio->num_tc); + return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc); } #ifdef CONFIG_RFS_ACCEL diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d350b2158104..fdb3ad0cbe54 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -685,7 +685,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int cq_idx); void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); -int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); +void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 5aee05992f27..fdaef00465d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -34,6 +34,27 @@ config MLX5_CORE_EN ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. +config MLX5_MPFS + bool "Mellanox Technologies MLX5 MPFS support" + depends on MLX5_CORE_EN + default y + ---help--- + Mellanox Technologies Ethernet Multi-Physical Function Switch (MPFS) + support in ConnectX NIC. MPFs is required for when multi-PF configuration + is enabled to allow passing user configured unicast MAC addresses to the + requesting PF. + +config MLX5_ESWITCH + bool "Mellanox Technologies MLX5 SRIOV E-Switch support" + depends on MLX5_CORE_EN + default y + ---help--- + Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC. + E-Switch provides internal SRIOV packet steering and switching for the + enabled VFs and PF in two available modes: + Legacy SRIOV mode (L2 mac vlan steering based). + Switchdev mode (eswitch offloads). + config MLX5_CORE_EN_DCB bool "Data Center Bridging (DCB) Support" default y diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9d17e4e76d3a..22ed657d263a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -11,10 +11,13 @@ mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o -mlx5_core-$(CONFIG_MLX5_CORE_EN) += eswitch.o eswitch_offloads.o \ - en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ - en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ - en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ + en_tx.o en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ + en_arfs.o en_fs_ethtool.o en_selftest.o + +mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o + +mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index a62f4b6a21a5..ff60cf7342ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -45,11 +45,70 @@ struct mlx5_device_context { unsigned long state; }; +struct mlx5_delayed_event { + struct list_head list; + struct mlx5_core_dev *dev; + enum mlx5_dev_event event; + unsigned long param; +}; + enum { MLX5_INTERFACE_ADDED, MLX5_INTERFACE_ATTACHED, }; +static void add_delayed_event(struct mlx5_priv *priv, + struct mlx5_core_dev *dev, + enum mlx5_dev_event event, + unsigned long param) +{ + struct mlx5_delayed_event *delayed_event; + + delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC); + if (!delayed_event) { + mlx5_core_err(dev, "event %d is missed\n", event); + return; + } + + mlx5_core_dbg(dev, "Accumulating event %d\n", event); + delayed_event->dev = dev; + delayed_event->event = event; + delayed_event->param = param; + list_add_tail(&delayed_event->list, &priv->waiting_events_list); +} + +static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx, + struct mlx5_core_dev *dev, + struct mlx5_priv *priv) +{ + struct mlx5_delayed_event *de; + struct mlx5_delayed_event *n; + + /* stop delaying events */ + priv->is_accum_events = false; + + /* fire all accumulated events before new event comes */ + list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { + dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); + list_del(&de->list); + kfree(de); + } +} + +static void cleanup_delayed_evets(struct mlx5_priv *priv) +{ + struct mlx5_delayed_event *de; + struct mlx5_delayed_event *n; + + spin_lock_irq(&priv->ctx_lock); + priv->is_accum_events = false; + list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { + list_del(&de->list); + kfree(de); + } + spin_unlock_irq(&priv->ctx_lock); +} + void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) { struct mlx5_device_context *dev_ctx; @@ -63,6 +122,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) return; dev_ctx->intf = intf; + /* accumulating events that can come after mlx5_ib calls to + * ib_register_device, till adding that interface to the events list. + */ + + priv->is_accum_events = true; + dev_ctx->context = intf->add(dev); set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); if (intf->attach) @@ -71,6 +136,9 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) if (dev_ctx->context) { spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); + + fire_delayed_event_locked(dev_ctx, dev, priv); + #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING if (dev_ctx->intf->pfault) { if (priv->pfault) { @@ -84,6 +152,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) spin_unlock_irq(&priv->ctx_lock); } else { kfree(dev_ctx); + /* delete all accumulated events */ + cleanup_delayed_evets(priv); } } @@ -341,6 +411,9 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, spin_lock_irqsave(&priv->ctx_lock, flags); + if (priv->is_accum_events) + add_delayed_event(priv, dev, event, param); + list_for_each_entry(dev_ctx, &priv->ctx_list, list) if (dev_ctx->intf->event) dev_ctx->intf->event(dev, dev_ctx->context, event, param); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index dfccb5305e9c..eecbc6d4f51f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -36,6 +36,7 @@ #include <linux/tcp.h> #include <linux/mlx5/fs.h> #include "en.h" +#include "lib/mpfs.h" static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, struct mlx5e_l2_rule *ai, int type); @@ -65,6 +66,7 @@ struct mlx5e_l2_hash_node { struct hlist_node hlist; u8 action; struct mlx5e_l2_rule ai; + bool mpfs; }; static inline int mlx5e_hash_l2(u8 *addr) @@ -362,17 +364,30 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, struct mlx5e_l2_hash_node *hn) { - switch (hn->action) { + u8 action = hn->action; + int l2_err = 0; + + switch (action) { case MLX5E_ACTION_ADD: mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); + if (!is_multicast_ether_addr(hn->ai.addr)) { + l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr); + hn->mpfs = !l2_err; + } hn->action = MLX5E_ACTION_NONE; break; case MLX5E_ACTION_DEL: + if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs) + l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr); mlx5e_del_l2_flow_rule(priv, &hn->ai); mlx5e_del_l2_from_hash(hn); break; } + + if (l2_err) + netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n", + action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err); } static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 57f31fa478ce..981f8415b546 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2581,12 +2581,6 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv) } } -static bool mlx5e_is_eswitch_vport_mngr(struct mlx5_core_dev *mdev) -{ - return (MLX5_CAP_GEN(mdev, vport_group_manager) && - MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH); -} - void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) { int num_txqs = priv->channels.num * priv->channels.params.num_tc; @@ -2600,7 +2594,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); - if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); @@ -2611,7 +2605,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { mlx5e_redirect_rqts_to_drop(priv); - if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_remove_sqs_fwd_rules(priv); /* FIXME: This is a W/A only for tx timeout watch dog false alarm when @@ -2998,12 +2992,16 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) return 0; } -static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) +static int mlx5e_setup_tc_mqprio(struct net_device *netdev, + struct tc_mqprio_qopt *mqprio) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; + u8 tc = mqprio->num_tc; int err = 0; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + if (tc && tc != MLX5E_MAX_NUM_TC) return -EINVAL; @@ -3027,39 +3025,42 @@ out: return err; } -static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +#ifdef CONFIG_MLX5_ESWITCH +static int mlx5e_setup_tc_cls_flower(struct net_device *dev, + struct tc_cls_flower_offload *cls_flower) { struct mlx5e_priv *priv = netdev_priv(dev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) - goto mqprio; + if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + cls_flower->common.chain_index) + return -EOPNOTSUPP; - if (chain_index) + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlx5e_configure_flower(priv, cls_flower); + case TC_CLSFLOWER_DESTROY: + return mlx5e_delete_flower(priv, cls_flower); + case TC_CLSFLOWER_STATS: + return mlx5e_stats_flower(priv, cls_flower); + default: return -EOPNOTSUPP; + } +} +#endif - switch (tc->type) { +static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { +#ifdef CONFIG_MLX5_ESWITCH case TC_SETUP_CLSFLOWER: - switch (tc->cls_flower->command) { - case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, proto, tc->cls_flower); - case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, tc->cls_flower); - case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, tc->cls_flower); - } + return mlx5e_setup_tc_cls_flower(dev, type_data); +#endif + case TC_SETUP_MQPRIO: + return mlx5e_setup_tc_mqprio(dev, type_data); default: return -EOPNOTSUPP; } - -mqprio: - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; - - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - - return mlx5e_setup_tc(dev, tc->mqprio->num_tc); } static void @@ -3356,6 +3357,7 @@ static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } +#ifdef CONFIG_MLX5_ESWITCH static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -3458,6 +3460,7 @@ static int mlx5e_get_vf_stats(struct net_device *dev, return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1, vf_stats); } +#endif static void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) @@ -3691,11 +3694,11 @@ static void mlx5e_netpoll(struct net_device *dev) } #endif -static const struct net_device_ops mlx5e_netdev_ops_basic = { +static const struct net_device_ops mlx5e_netdev_ops = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, - .ndo_setup_tc = mlx5e_ndo_setup_tc, + .ndo_setup_tc = mlx5e_setup_tc, .ndo_select_queue = mlx5e_select_queue, .ndo_get_stats64 = mlx5e_get_stats, .ndo_set_rx_mode = mlx5e_set_rx_mode, @@ -3706,6 +3709,9 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@ -3714,29 +3720,8 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx5e_netpoll, #endif -}; - -static const struct net_device_ops mlx5e_netdev_ops_sriov = { - .ndo_open = mlx5e_open, - .ndo_stop = mlx5e_close, - .ndo_start_xmit = mlx5e_xmit, - .ndo_setup_tc = mlx5e_ndo_setup_tc, - .ndo_select_queue = mlx5e_select_queue, - .ndo_get_stats64 = mlx5e_get_stats, - .ndo_set_rx_mode = mlx5e_set_rx_mode, - .ndo_set_mac_address = mlx5e_set_mac, - .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, - .ndo_set_features = mlx5e_set_features, - .ndo_change_mtu = mlx5e_change_mtu, - .ndo_do_ioctl = mlx5e_ioctl, - .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, - .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, - .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, - .ndo_features_check = mlx5e_features_check, -#ifdef CONFIG_RFS_ACCEL - .ndo_rx_flow_steer = mlx5e_rx_flow_steer, -#endif +#ifdef CONFIG_MLX5_ESWITCH + /* SRIOV E-Switch NDOs */ .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, @@ -3745,13 +3730,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, - .ndo_tx_timeout = mlx5e_tx_timeout, - .ndo_xdp = mlx5e_xdp, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mlx5e_netpoll, -#endif .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, +#endif }; static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) @@ -3981,9 +3962,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) } } +#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) static const struct switchdev_ops mlx5e_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; +#endif static void mlx5e_build_nic_netdev(struct net_device *netdev) { @@ -3994,15 +3977,12 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) SET_NETDEV_DEV(netdev, &mdev->pdev->dev); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - netdev->netdev_ops = &mlx5e_netdev_ops_sriov; + netdev->netdev_ops = &mlx5e_netdev_ops; + #ifdef CONFIG_MLX5_CORE_EN_DCB - if (MLX5_CAP_GEN(mdev, qos)) - netdev->dcbnl_ops = &mlx5e_dcbnl_ops; + if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos)) + netdev->dcbnl_ops = &mlx5e_dcbnl_ops; #endif - } else { - netdev->netdev_ops = &mlx5e_netdev_ops_basic; - } netdev->watchdog_timeo = 15 * HZ; @@ -4074,8 +4054,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); -#ifdef CONFIG_NET_SWITCHDEV - if (MLX5_CAP_GEN(mdev, vport_group_manager)) +#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) + if (MLX5_VPORT_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@ -4217,7 +4197,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_enable_async_events(priv); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_register_vport_reps(priv); if (netdev->reg_state != NETREG_REGISTERED) @@ -4251,7 +4231,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->set_rx_mode_work); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_unregister_vport_reps(priv); mlx5e_disable_async_events(priv); @@ -4424,32 +4404,29 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) static void *mlx5e_add(struct mlx5_core_dev *mdev) { - struct mlx5_eswitch *esw = mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(mdev); - struct mlx5e_rep_priv *rpriv = NULL; + struct net_device *netdev; + void *rpriv = NULL; void *priv; - int vport; int err; - struct net_device *netdev; err = mlx5e_check_required_hca_cap(mdev); if (err) return NULL; - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); +#ifdef CONFIG_MLX5_ESWITCH + if (MLX5_VPORT_MANAGER(mdev)) { + rpriv = mlx5e_alloc_nic_rep_priv(mdev); if (!rpriv) { - mlx5_core_warn(mdev, - "Not creating net device, Failed to alloc rep priv data\n"); + mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); return NULL; } - rpriv->rep = &esw->offloads.vport_reps[0]; } +#endif netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); - goto err_unregister_reps; + goto err_free_rpriv; } priv = netdev_priv(netdev); @@ -4470,14 +4447,9 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) err_detach: mlx5e_detach(mdev, priv); - err_destroy_netdev: mlx5e_destroy_netdev(priv); - -err_unregister_reps: - for (vport = 1; vport < total_vfs; vport++) - mlx5_eswitch_unregister_vport_rep(esw, vport); - +err_free_rpriv: kfree(rpriv); return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 45e60be9c277..f3c494a4ecdf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -651,37 +651,42 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, return 0; } -static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int +mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, + struct tc_cls_flower_offload *cls_flower) { struct mlx5e_priv *priv = netdev_priv(dev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) + if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + cls_flower->common.chain_index) return -EOPNOTSUPP; - if (tc->egress_dev) { + if (cls_flower->egress_dev) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw); - return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle, - chain_index, - proto, tc); + dev = mlx5_eswitch_get_uplink_netdev(esw); + return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, + cls_flower); } - if (chain_index) + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlx5e_configure_flower(priv, cls_flower); + case TC_CLSFLOWER_DESTROY: + return mlx5e_delete_flower(priv, cls_flower); + case TC_CLSFLOWER_STATS: + return mlx5e_stats_flower(priv, cls_flower); + default: return -EOPNOTSUPP; + } +} - switch (tc->type) { +static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { case TC_SETUP_CLSFLOWER: - switch (tc->cls_flower->command) { - case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, proto, tc->cls_flower); - case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, tc->cls_flower); - case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, tc->cls_flower); - } + return mlx5e_rep_setup_tc_cls_flower(dev, type_data); default: return -EOPNOTSUPP; } @@ -773,7 +778,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_stop = mlx5e_rep_close, .ndo_start_xmit = mlx5e_xmit, .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, - .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, + .ndo_setup_tc = mlx5e_rep_setup_tc, .ndo_get_stats64 = mlx5e_rep_get_stats, .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, @@ -1099,3 +1104,16 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */ mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/ } + +void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5e_rep_priv *rpriv; + + rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); + if (!rpriv) + return NULL; + + rpriv->rep = &esw->offloads.vport_reps[0]; + return rpriv; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index a0a1a7a1d6c0..5659ed9f51e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -38,6 +38,7 @@ #include "eswitch.h" #include "en.h" +#ifdef CONFIG_MLX5_ESWITCH struct mlx5e_neigh_update_table { struct rhashtable neigh_ht; /* Save the neigh hash entries in a list in addition to the hash table @@ -123,6 +124,7 @@ struct mlx5e_encap_entry { int encap_size; }; +void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev); void mlx5e_register_vport_reps(struct mlx5e_priv *priv); void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv); bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); @@ -141,5 +143,12 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e); void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); +#else /* CONFIG_MLX5_ESWITCH */ +static inline void mlx5e_register_vport_reps(struct mlx5e_priv *priv) {} +static inline void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) {} +static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; } +static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {} +#endif #endif /* __MLX5E_REP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 325b2c8c1c6d..8e224bcbc6a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -857,6 +857,7 @@ wq_ll_pop: &wqe->next.next_wqe_index); } +#ifdef CONFIG_MLX5_ESWITCH void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct net_device *netdev = rq->netdev; @@ -901,6 +902,7 @@ wq_ll_pop: mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, &wqe->next.next_wqe_index); } +#endif static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3c536f560dd2..3b10d3df7627 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1326,7 +1326,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, LIST_HEAD(actions); int err; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; @@ -1839,7 +1839,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, bool encap = false; int err = 0; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; memset(attr, 0, sizeof(*attr)); @@ -1939,7 +1939,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return err; } -int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, +int mlx5e_configure_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index ecbe30d808ae..c14c263a739b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -33,12 +33,15 @@ #ifndef __MLX5_EN_TC_H__ #define __MLX5_EN_TC_H__ +#include <net/pkt_cls.h> + #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff +#ifdef CONFIG_MLX5_ESWITCH int mlx5e_tc_init(struct mlx5e_priv *priv); void mlx5e_tc_cleanup(struct mlx5e_priv *priv); -int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, +int mlx5e_configure_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f); int mlx5e_delete_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f); @@ -60,4 +63,10 @@ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) return atomic_read(&priv->fs.tc.ht.nelems); } +#else /* CONFIG_MLX5_ESWITCH */ +static inline int mlx5e_tc_init(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_tc_cleanup(struct mlx5e_priv *priv) {} +static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; } +#endif + #endif /* __MLX5_EN_TC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 52b9a64cd3a2..de704ff5619a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -36,9 +36,7 @@ #include <linux/mlx5/cmd.h> #include "mlx5_core.h" #include "fpga/core.h" -#ifdef CONFIG_MLX5_CORE_EN #include "eswitch.h" -#endif enum { MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), @@ -467,11 +465,9 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) } break; -#ifdef CONFIG_MLX5_CORE_EN case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); break; -#endif case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: mlx5_port_module_event(dev, eqe); @@ -688,9 +684,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; - if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && - MLX5_CAP_GEN(dev, vport_group_manager) && - mlx5_core_is_pf(dev)) + if (MLX5_VPORT_MANAGER(dev)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); if (MLX5_CAP_GEN(dev, port_module_event)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 8b18cc9ec026..fd51f0ea8df9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -46,19 +46,13 @@ enum { MLX5_ACTION_DEL = 2, }; -/* E-Switch UC L2 table hash node */ -struct esw_uc_addr { - struct l2addr_node node; - u32 table_index; - u32 vport; -}; - /* Vport UC/MC hash node */ struct vport_addr { struct l2addr_node node; u8 action; u32 vport; - struct mlx5_flow_handle *flow_rule; /* SRIOV only */ + struct mlx5_flow_handle *flow_rule; + bool mpfs; /* UC MAC was added to MPFs */ /* A flag indicating that mac was added due to mc promiscuous vport */ bool mc_promisc; }; @@ -154,81 +148,6 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in)); } -/* HW L2 Table (MPFS) management */ -static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index, - u8 *mac, u8 vlan_valid, u16 vlan) -{ - u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0}; - u8 *in_mac_addr; - - MLX5_SET(set_l2_table_entry_in, in, opcode, - MLX5_CMD_OP_SET_L2_TABLE_ENTRY); - MLX5_SET(set_l2_table_entry_in, in, table_index, index); - MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid); - MLX5_SET(set_l2_table_entry_in, in, vlan, vlan); - - in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); - ether_addr_copy(&in_mac_addr[2], mac); - - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - -static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index) -{ - u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0}; - - MLX5_SET(delete_l2_table_entry_in, in, opcode, - MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); - MLX5_SET(delete_l2_table_entry_in, in, table_index, index); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - -static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix) -{ - int err = 0; - - *ix = find_first_zero_bit(l2_table->bitmap, l2_table->size); - if (*ix >= l2_table->size) - err = -ENOSPC; - else - __set_bit(*ix, l2_table->bitmap); - - return err; -} - -static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix) -{ - __clear_bit(ix, l2_table->bitmap); -} - -static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac, - u8 vlan_valid, u16 vlan, - u32 *index) -{ - struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table; - int err; - - err = alloc_l2_table_index(l2_table, index); - if (err) - return err; - - err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan); - if (err) - free_l2_table_index(l2_table, *index); - - return err; -} - -static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) -{ - struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table; - - del_l2_table_entry_cmd(dev, index); - free_l2_table_index(l2_table, index); -} - /* E-Switch FDB */ static struct mlx5_flow_handle * __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, @@ -455,65 +374,60 @@ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { - struct hlist_head *hash = esw->l2_table.l2_hash; - struct esw_uc_addr *esw_uc; u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; int err; - esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); - if (esw_uc) { + /* Skip mlx5_mpfs_add_mac for PFs, + * it is already done by the PF netdev in mlx5e_execute_l2_action + */ + if (!vport) + goto fdb_add; + + err = mlx5_mpfs_add_mac(esw->dev, mac); + if (err) { esw_warn(esw->dev, - "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n", - mac, vport, esw_uc->vport); - return -EEXIST; + "Failed to add L2 table mac(%pM) for vport(%d), err(%d)\n", + mac, vport, err); + return err; } + vaddr->mpfs = true; - esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL); - if (!esw_uc) - return -ENOMEM; - esw_uc->vport = vport; - - err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index); - if (err) - goto abort; - +fdb_add: /* SRIOV is enabled: Forward UC MAC to vport */ if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY) vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); - esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n", - vport, mac, esw_uc->table_index, vaddr->flow_rule); - return err; -abort: - l2addr_hash_del(esw_uc); + esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", + vport, mac, vaddr->flow_rule); + return err; } static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { - struct hlist_head *hash = esw->l2_table.l2_hash; - struct esw_uc_addr *esw_uc; u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; + int err = 0; - esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); - if (!esw_uc || esw_uc->vport != vport) { - esw_debug(esw->dev, - "MAC(%pM) doesn't belong to vport (%d)\n", - mac, vport); - return -EINVAL; - } - esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n", - vport, mac, esw_uc->table_index, vaddr->flow_rule); + /* Skip mlx5_mpfs_del_mac for PFs, + * it is already done by the PF netdev in mlx5e_execute_l2_action + */ + if (!vport || !vaddr->mpfs) + goto fdb_del; - del_l2_table_entry(esw->dev, esw_uc->table_index); + err = mlx5_mpfs_del_mac(esw->dev, mac); + if (err) + esw_warn(esw->dev, + "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n", + mac, vport, err); + vaddr->mpfs = false; +fdb_del: if (vaddr->flow_rule) mlx5_del_flow_rules(vaddr->flow_rule); vaddr->flow_rule = NULL; - l2addr_hash_del(esw_uc); return 0; } @@ -1611,13 +1525,14 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) } /* Public E-Switch API */ +#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) + int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { int err; int i, enabled_events; - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + if (!ESW_ALLOWED(esw)) return 0; if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || @@ -1634,7 +1549,6 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); esw->mode = mode; - esw_disable_vport(esw, 0); if (mode == SRIOV_LEGACY) err = esw_create_legacy_fdb_table(esw, nvfs + 1); @@ -1647,7 +1561,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) if (err) esw_warn(esw->dev, "Failed to create eswitch TSAR"); - enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE; + /* Don't enable vport events when in SRIOV_OFFLOADS mode, since: + * 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode + * 2. FDB/Eswitch is programmed by user space tools + */ + enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0; for (i = 0; i <= nvfs; i++) esw_enable_vport(esw, i, enabled_events); @@ -1656,7 +1574,6 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) return 0; abort: - esw_enable_vport(esw, 0, UC_ADDR_CHANGE); esw->mode = SRIOV_NONE; return err; } @@ -1667,9 +1584,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) int nvports; int i; - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH || - esw->mode == SRIOV_NONE) + if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE) return; esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", @@ -1692,44 +1607,21 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) esw_offloads_cleanup(esw, nvports); esw->mode = SRIOV_NONE; - /* VPORT 0 (PF) must be enabled back with non-sriov configuration */ - esw_enable_vport(esw, 0, UC_ADDR_CHANGE); -} - -void mlx5_eswitch_attach(struct mlx5_eswitch *esw) -{ - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return; - - esw_enable_vport(esw, 0, UC_ADDR_CHANGE); - /* VF Vports will be enabled when SRIOV is enabled */ -} - -void mlx5_eswitch_detach(struct mlx5_eswitch *esw) -{ - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return; - - esw_disable_vport(esw, 0); } int mlx5_eswitch_init(struct mlx5_core_dev *dev) { - int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); int total_vports = MLX5_TOTAL_VPORTS(dev); struct mlx5_eswitch *esw; int vport_num; int err; - if (!MLX5_CAP_GEN(dev, vport_group_manager) || - MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + if (!MLX5_VPORT_MANAGER(dev)) return 0; esw_info(dev, - "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n", - total_vports, l2_table_size, + "Total vports %d, per vport: max uc(%d) max mc(%d)\n", + total_vports, MLX5_MAX_UC_PER_VPORT(dev), MLX5_MAX_MC_PER_VPORT(dev)); @@ -1739,14 +1631,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->dev = dev; - esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size), - sizeof(uintptr_t), GFP_KERNEL); - if (!esw->l2_table.bitmap) { - err = -ENOMEM; - goto abort; - } - esw->l2_table.size = l2_table_size; - esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); if (!esw->work_queue) { err = -ENOMEM; @@ -1797,7 +1681,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) abort: if (esw->work_queue) destroy_workqueue(esw->work_queue); - kfree(esw->l2_table.bitmap); kfree(esw->vports); kfree(esw->offloads.vport_reps); kfree(esw); @@ -1806,15 +1689,13 @@ abort: void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) return; esw_info(esw->dev, "cleanup\n"); esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); - kfree(esw->l2_table.bitmap); kfree(esw->offloads.vport_reps); kfree(esw->vports); kfree(esw); @@ -1838,8 +1719,6 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) } /* Vport Administration */ -#define ESW_ALLOWED(esw) \ - (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 834a33050969..565c8b7a399a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -37,6 +37,15 @@ #include <linux/if_link.h> #include <net/devlink.h> #include <linux/mlx5/device.h> +#include "lib/mpfs.h" + +enum { + SRIOV_NONE, + SRIOV_LEGACY, + SRIOV_OFFLOADS +}; + +#ifdef CONFIG_MLX5_ESWITCH #define MLX5_MAX_UC_PER_VPORT(dev) \ (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) @@ -44,9 +53,6 @@ #define MLX5_MAX_MC_PER_VPORT(dev) \ (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) -#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE)) -#define MLX5_L2_ADDR_HASH(addr) (addr[5]) - #define FDB_UPLINK_VPORT 0xffff #define MLX5_MIN_BW_SHARE 1 @@ -54,48 +60,6 @@ #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) -/* L2 -mac address based- hash helpers */ -struct l2addr_node { - struct hlist_node hlist; - u8 addr[ETH_ALEN]; -}; - -#define for_each_l2hash_node(hn, tmp, hash, i) \ - for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ - hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) - -#define l2addr_hash_find(hash, mac, type) ({ \ - int ix = MLX5_L2_ADDR_HASH(mac); \ - bool found = false; \ - type *ptr = NULL; \ - \ - hlist_for_each_entry(ptr, &hash[ix], node.hlist) \ - if (ether_addr_equal(ptr->node.addr, mac)) {\ - found = true; \ - break; \ - } \ - if (!found) \ - ptr = NULL; \ - ptr; \ -}) - -#define l2addr_hash_add(hash, mac, type, gfp) ({ \ - int ix = MLX5_L2_ADDR_HASH(mac); \ - type *ptr = NULL; \ - \ - ptr = kzalloc(sizeof(type), gfp); \ - if (ptr) { \ - ether_addr_copy(ptr->node.addr, mac); \ - hlist_add_head(&ptr->node.hlist, &hash[ix]);\ - } \ - ptr; \ -}) - -#define l2addr_hash_del(ptr) ({ \ - hlist_del(&ptr->node.hlist); \ - kfree(ptr); \ -}) - struct vport_ingress { struct mlx5_flow_table *acl; struct mlx5_flow_group *allow_untagged_spoofchk_grp; @@ -150,12 +114,6 @@ struct mlx5_vport { u16 enabled_events; }; -struct mlx5_l2_table { - struct hlist_head l2_hash[MLX5_L2_ADDR_HASH_SIZE]; - u32 size; - unsigned long *bitmap; -}; - struct mlx5_eswitch_fdb { void *fdb; union { @@ -175,12 +133,6 @@ struct mlx5_eswitch_fdb { }; }; -enum { - SRIOV_NONE, - SRIOV_LEGACY, - SRIOV_OFFLOADS -}; - struct mlx5_esw_sq { struct mlx5_flow_handle *send_to_vport_rule; struct list_head list; @@ -222,7 +174,6 @@ struct esw_mc_addr { /* SRIOV only */ struct mlx5_eswitch { struct mlx5_core_dev *dev; - struct mlx5_l2_table l2_table; struct mlx5_eswitch_fdb fdb_table; struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; struct workqueue_struct *work_queue; @@ -250,8 +201,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); -void mlx5_eswitch_attach(struct mlx5_eswitch *esw); -void mlx5_eswitch_detach(struct mlx5_eswitch *esw); void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe); int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); @@ -345,4 +294,13 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, #define esw_debug(dev, format, ...) \ mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) +#else /* CONFIG_MLX5_ESWITCH */ +/* eswitch API stubs */ +static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} +static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {} +static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; } +static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {} +#endif /* CONFIG_MLX5_ESWITCH */ + #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 95b64025ce36..e7c186b58579 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -433,6 +433,8 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) struct mlx5_flow_table *fdb = NULL; int esw_size, err = 0; u32 flags = 0; + u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | + MLX5_CAP_GEN(dev, max_flow_counter_15_0); root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); if (!root_ns) { @@ -443,9 +445,9 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), - MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS); + max_flow_counter, ESW_OFFLOADS_NUM_GROUPS); - esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS, + esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS, 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index e750f07793b8..16b32f31d691 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -359,7 +359,7 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id) +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) { u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; @@ -374,7 +374,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id) return err; } -int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id) +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id) { u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0}; u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0}; @@ -385,7 +385,7 @@ int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id) return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, u64 *packets, u64 *bytes) { u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + @@ -409,14 +409,14 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, } struct mlx5_cmd_fc_bulk { - u16 id; + u32 id; int num; int outlen; u32 out[0]; }; struct mlx5_cmd_fc_bulk * -mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num) +mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num) { struct mlx5_cmd_fc_bulk *b; int outlen = @@ -453,7 +453,7 @@ mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b) } void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, - struct mlx5_cmd_fc_bulk *b, u16 id, + struct mlx5_cmd_fc_bulk *b, u32 id, u64 *packets, u64 *bytes) { int index = id - b->id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 0f98a7cf4877..c6d7bdf255b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -74,20 +74,20 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, u32 underlay_qpn); -int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); -int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); -int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id); +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, u64 *packets, u64 *bytes); struct mlx5_cmd_fc_bulk; struct mlx5_cmd_fc_bulk * -mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num); +mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num); void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b); int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b); void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, - struct mlx5_cmd_fc_bulk *b, u16 id, + struct mlx5_cmd_fc_bulk *b, u32 id, u64 *packets, u64 *bytes); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 990acee6fb09..9fb5a333df52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -136,7 +136,7 @@ struct mlx5_fc { u64 lastpackets; u64 lastbytes; - u16 id; + u32 id; bool deleted; bool aging; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 6507d8acc54d..89d1f8650033 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -38,6 +38,8 @@ #include "fs_cmd.h" #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) +/* Max number of counters to query in bulk read is 32K */ +#define MLX5_SW_MAX_COUNTERS_BULK BIT(15) /* locking scheme: * @@ -90,16 +92,21 @@ static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter) rb_insert_color(&counter->node, root); } +/* The function returns the last node that was queried so the caller + * function can continue calling it till all counters are queried. + */ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, struct mlx5_fc *first, - u16 last_id) + u32 last_id) { struct mlx5_cmd_fc_bulk *b; struct rb_node *node = NULL; - u16 afirst_id; + u32 afirst_id; int num; int err; - int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk); + + int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK, + (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); /* first id must be aligned to 4 when using bulk query */ afirst_id = first->id & ~0x3; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c new file mode 100644 index 000000000000..7cb67122e8b5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2017, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/etherdevice.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/mlx5_ifc.h> +#include "mlx5_core.h" +#include "lib/mpfs.h" + +/* HW L2 Table (MPFS) management */ +static int set_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac) +{ + u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0}; + u8 *in_mac_addr; + + MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY); + MLX5_SET(set_l2_table_entry_in, in, table_index, index); + + in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); + ether_addr_copy(&in_mac_addr[2], mac); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index) +{ + u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0}; + + MLX5_SET(delete_l2_table_entry_in, in, opcode, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); + MLX5_SET(delete_l2_table_entry_in, in, table_index, index); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +/* UC L2 table hash node */ +struct l2table_node { + struct l2addr_node node; + u32 index; /* index in HW l2 table */ +}; + +struct mlx5_mpfs { + struct hlist_head hash[MLX5_L2_ADDR_HASH_SIZE]; + struct mutex lock; /* Synchronize l2 table access */ + u32 size; + unsigned long *bitmap; +}; + +static int alloc_l2table_index(struct mlx5_mpfs *l2table, u32 *ix) +{ + int err = 0; + + *ix = find_first_zero_bit(l2table->bitmap, l2table->size); + if (*ix >= l2table->size) + err = -ENOSPC; + else + __set_bit(*ix, l2table->bitmap); + + return err; +} + +static void free_l2table_index(struct mlx5_mpfs *l2table, u32 ix) +{ + __clear_bit(ix, l2table->bitmap); +} + +int mlx5_mpfs_init(struct mlx5_core_dev *dev) +{ + int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); + struct mlx5_mpfs *mpfs; + + if (!MLX5_VPORT_MANAGER(dev)) + return 0; + + mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); + if (!mpfs) + return -ENOMEM; + + mutex_init(&mpfs->lock); + mpfs->size = l2table_size; + mpfs->bitmap = kcalloc(BITS_TO_LONGS(l2table_size), + sizeof(uintptr_t), GFP_KERNEL); + if (!mpfs->bitmap) { + kfree(mpfs); + return -ENOMEM; + } + + dev->priv.mpfs = mpfs; + return 0; +} + +void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + + if (!MLX5_VPORT_MANAGER(dev)) + return; + + WARN_ON(!hlist_empty(mpfs->hash)); + kfree(mpfs->bitmap); + kfree(mpfs); +} + +int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + struct l2table_node *l2addr; + u32 index; + int err; + + if (!MLX5_VPORT_MANAGER(dev)) + return 0; + + mutex_lock(&mpfs->lock); + + l2addr = l2addr_hash_find(mpfs->hash, mac, struct l2table_node); + if (l2addr) { + err = -EEXIST; + goto abort; + } + + err = alloc_l2table_index(mpfs, &index); + if (err) + goto abort; + + l2addr = l2addr_hash_add(mpfs->hash, mac, struct l2table_node, GFP_KERNEL); + if (!l2addr) { + free_l2table_index(mpfs, index); + err = -ENOMEM; + goto abort; + } + + l2addr->index = index; + err = set_l2table_entry_cmd(dev, index, mac); + if (err) { + l2addr_hash_del(l2addr); + free_l2table_index(mpfs, index); + } + + mlx5_core_dbg(dev, "MPFS mac added %pM, index (%d)\n", mac, index); +abort: + mutex_unlock(&mpfs->lock); + return err; +} + +int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + struct l2table_node *l2addr; + int err = 0; + u32 index; + + if (!MLX5_VPORT_MANAGER(dev)) + return 0; + + mutex_lock(&mpfs->lock); + + l2addr = l2addr_hash_find(mpfs->hash, mac, struct l2table_node); + if (!l2addr) { + err = -ENOENT; + goto unlock; + } + + index = l2addr->index; + del_l2table_entry_cmd(dev, index); + l2addr_hash_del(l2addr); + free_l2table_index(mpfs, index); + mlx5_core_dbg(dev, "MPFS mac deleted %pM, index (%d)\n", mac, index); +unlock: + mutex_unlock(&mpfs->lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h new file mode 100644 index 000000000000..4a7b2c3203a7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_MPFS_H__ +#define __MLX5_MPFS_H__ + +#include <linux/if_ether.h> +#include <linux/mlx5/device.h> + +/* L2 -mac address based- hash helpers */ +#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE)) +#define MLX5_L2_ADDR_HASH(addr) (addr[5]) + +struct l2addr_node { + struct hlist_node hlist; + u8 addr[ETH_ALEN]; +}; + +#define for_each_l2hash_node(hn, tmp, hash, i) \ + for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hn, tmp, &(hash)[i], hlist) + +#define l2addr_hash_find(hash, mac, type) ({ \ + int ix = MLX5_L2_ADDR_HASH(mac); \ + bool found = false; \ + type *ptr = NULL; \ + \ + hlist_for_each_entry(ptr, &(hash)[ix], node.hlist) \ + if (ether_addr_equal(ptr->node.addr, mac)) {\ + found = true; \ + break; \ + } \ + if (!found) \ + ptr = NULL; \ + ptr; \ +}) + +#define l2addr_hash_add(hash, mac, type, gfp) ({ \ + int ix = MLX5_L2_ADDR_HASH(mac); \ + type *ptr = NULL; \ + \ + ptr = kzalloc(sizeof(type), gfp); \ + if (ptr) { \ + ether_addr_copy(ptr->node.addr, mac); \ + hlist_add_head(&ptr->node.hlist, &(hash)[ix]);\ + } \ + ptr; \ +}) + +#define l2addr_hash_del(ptr) ({ \ + hlist_del(&(ptr)->node.hlist); \ + kfree(ptr); \ +}) + +#ifdef CONFIG_MLX5_MPFS +int mlx5_mpfs_init(struct mlx5_core_dev *dev); +void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac); +int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac); +#else /* #ifndef CONFIG_MLX5_MPFS */ +static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {} +static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } +static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } +#endif +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c065132b956d..6dbd637b4e66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -53,9 +53,8 @@ #include <net/devlink.h> #include "mlx5_core.h" #include "fs_core.h" -#ifdef CONFIG_MLX5_CORE_EN +#include "lib/mpfs.h" #include "eswitch.h" -#endif #include "lib/mlx5.h" #include "fpga/core.h" #include "accel/ipsec.h" @@ -946,13 +945,17 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto err_tables_cleanup; } -#ifdef CONFIG_MLX5_CORE_EN + err = mlx5_mpfs_init(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init l2 table %d\n", err); + goto err_rl_cleanup; + } + err = mlx5_eswitch_init(dev); if (err) { dev_err(&pdev->dev, "Failed to init eswitch %d\n", err); - goto err_rl_cleanup; + goto err_mpfs_cleanup; } -#endif err = mlx5_sriov_init(dev); if (err) { @@ -971,13 +974,11 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) err_sriov_cleanup: mlx5_sriov_cleanup(dev); err_eswitch_cleanup: -#ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_cleanup(dev->priv.eswitch); - +err_mpfs_cleanup: + mlx5_mpfs_cleanup(dev); err_rl_cleanup: -#endif mlx5_cleanup_rl_table(dev); - err_tables_cleanup: mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); @@ -995,9 +996,8 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) { mlx5_fpga_cleanup(dev); mlx5_sriov_cleanup(dev); -#ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_cleanup(dev->priv.eswitch); -#endif + mlx5_mpfs_cleanup(dev); mlx5_cleanup_rl_table(dev); mlx5_cleanup_reserved_gids(dev); mlx5_cleanup_mkey_table(dev); @@ -1155,10 +1155,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_fs; } -#ifdef CONFIG_MLX5_CORE_EN - mlx5_eswitch_attach(dev->priv.eswitch); -#endif - err = mlx5_sriov_attach(dev); if (err) { dev_err(&pdev->dev, "sriov init failed %d\n", err); @@ -1202,9 +1198,6 @@ err_fpga_start: mlx5_sriov_detach(dev); err_sriov: -#ifdef CONFIG_MLX5_CORE_EN - mlx5_eswitch_detach(dev->priv.eswitch); -#endif mlx5_cleanup_fs(dev); err_fs: @@ -1279,9 +1272,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_fpga_device_stop(dev); mlx5_sriov_detach(dev); -#ifdef CONFIG_MLX5_CORE_EN - mlx5_eswitch_detach(dev->priv.eswitch); -#endif mlx5_cleanup_fs(dev); mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); @@ -1313,7 +1303,7 @@ struct mlx5_core_event_handler { }; static const struct devlink_ops mlx5_devlink_ops = { -#ifdef CONFIG_MLX5_CORE_EN +#ifdef CONFIG_MLX5_ESWITCH .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, .eswitch_mode_get = mlx5_devlink_eswitch_mode_get, .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set, @@ -1353,6 +1343,9 @@ static int init_one(struct pci_dev *pdev, mutex_init(&dev->pci_status_mutex); mutex_init(&dev->intf_state_mutex); + INIT_LIST_HEAD(&priv->waiting_events_list); + priv->is_accum_events = false; + #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING err = init_srcu_struct(&priv->pfault_srcu); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 6a263e8d883a..d8da9240a00b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -43,6 +43,10 @@ #define DRIVER_VERSION "5.0-0" #define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev)) +#define MLX5_VPORT_MANAGER(mdev) \ + (MLX5_CAP_GEN(mdev, vport_group_manager) && \ + (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ + mlx5_core_is_pf(mdev)) extern uint mlx5_core_debug_mask; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index bf99d40e30b4..5e7ffc9fad78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -33,9 +33,7 @@ #include <linux/pci.h> #include <linux/mlx5/driver.h> #include "mlx5_core.h" -#ifdef CONFIG_MLX5_CORE_EN #include "eswitch.h" -#endif bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) { @@ -57,14 +55,12 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return -EBUSY; } -#ifdef CONFIG_MLX5_CORE_EN err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); if (err) { mlx5_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); return err; } -#endif for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); @@ -88,11 +84,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) int vf; if (!sriov->enabled_vfs) -#ifdef CONFIG_MLX5_CORE_EN - goto disable_sriov_resources; -#else - return; -#endif + goto out; for (vf = 0; vf < sriov->num_vfs; vf++) { if (!sriov->vfs_ctx[vf].enabled) @@ -106,10 +98,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) sriov->enabled_vfs--; } -#ifdef CONFIG_MLX5_CORE_EN -disable_sriov_resources: +out: mlx5_eswitch_disable_sriov(dev->priv.eswitch); -#endif if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 695adff89d71..d56eea310509 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -75,6 +75,7 @@ config MLXSW_SPECTRUM depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q depends on PSAMPLE || PSAMPLE=n depends on BRIDGE || BRIDGE=n + depends on IPV6 || IPV6=n select PARMAN select MLXFW default m diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index affe84eb4bff..9d5e7cf288be 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -667,7 +667,7 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, int err; dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", - trans->tid, reg->id, mlxsw_reg_id_str(reg->id), + tid, reg->id, mlxsw_reg_id_str(reg->id), mlxsw_core_reg_access_type_str(type)); skb = mlxsw_emad_alloc(mlxsw_core, reg->len); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 9807ef814e42..f6963b0b4a55 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -57,6 +57,9 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_VID, MLXSW_AFK_ELEMENT_PCP, MLXSW_AFK_ELEMENT_TCP_FLAGS, + MLXSW_AFK_ELEMENT_IP_TTL_, + MLXSW_AFK_ELEMENT_IP_ECN, + MLXSW_AFK_ELEMENT_IP_DSCP, MLXSW_AFK_ELEMENT_MAX, }; @@ -104,6 +107,9 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), + MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2), + MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6), MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1bd34d9a7b9e..7e8ba546c3a4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3679,15 +3679,17 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP, MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP, MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP, - MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4, + MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP, MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF, MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP, - MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS, + MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS, MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP, MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE, MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP, MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, + MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD, + MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND, }; /* reg_htgt_trap_group @@ -3952,10 +3954,12 @@ MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2); */ MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8); -static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en) +static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en, + bool ipv6_en) { MLXSW_REG_ZERO(rgcr, payload); mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en); + mlxsw_reg_rgcr_ipv6_en_set(payload, ipv6_en); } /* RITR - Router Interface Table Register @@ -3988,16 +3992,16 @@ MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1); MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1); enum mlxsw_reg_ritr_if_type { + /* VLAN interface. */ MLXSW_REG_RITR_VLAN_IF, + /* FID interface. */ MLXSW_REG_RITR_FID_IF, + /* Sub-port interface. */ MLXSW_REG_RITR_SP_IF, }; /* reg_ritr_type - * Router interface type. - * 0 - VLAN interface. - * 1 - FID interface. - * 2 - Sub-port interface. + * Router interface type as per enum mlxsw_reg_ritr_if_type. * Access: RW */ MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3); @@ -4203,10 +4207,12 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, MLXSW_REG_ZERO(ritr, payload); mlxsw_reg_ritr_enable_set(payload, enable); mlxsw_reg_ritr_ipv4_set(payload, 1); + mlxsw_reg_ritr_ipv6_set(payload, 1); mlxsw_reg_ritr_type_set(payload, type); mlxsw_reg_ritr_op_set(payload, op); mlxsw_reg_ritr_rif_set(payload, rif); mlxsw_reg_ritr_ipv4_fe_set(payload, 1); + mlxsw_reg_ritr_ipv6_fe_set(payload, 1); mlxsw_reg_ritr_lb_en_set(payload, 1); mlxsw_reg_ritr_virtual_router_set(payload, vr_id); mlxsw_reg_ritr_mtu_set(payload, mtu); @@ -4712,12 +4718,13 @@ MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8); /* reg_ralue_dip* * The prefix of the route or of the marker that the object of the LPM * is compared with. The most significant bits of the dip are the prefix. - * The list significant bits must be '0' if the prefix_len is smaller + * The least significant bits must be '0' if the prefix_len is smaller * than 128 for IPv6 or smaller than 32 for IPv4. * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved. * Access: Index */ MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32); +MLXSW_ITEM_BUF(reg, ralue, dip6, 0x0C, 16); enum mlxsw_reg_ralue_entry_type { MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1, @@ -4806,7 +4813,7 @@ MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13); */ MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16); -/* reg_ralue_v +/* reg_ralue_ip2me_v * Valid bit for the tunnel_ptr field. * If valid = 0 then trap to CPU as IP2ME trap ID. * If valid = 1 and the packet format allows NVE or IPinIP tunnel @@ -4816,15 +4823,15 @@ MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16); * Only relevant in case of IP2ME action. * Access: RW */ -MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1); +MLXSW_ITEM32(reg, ralue, ip2me_v, 0x24, 31, 1); -/* reg_ralue_tunnel_ptr +/* reg_ralue_ip2me_tunnel_ptr * Tunnel Pointer for NVE or IPinIP tunnel decapsulation. * For Spectrum, pointer to KVD Linear. * Only relevant in case of IP2ME action. * Access: RW */ -MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24); +MLXSW_ITEM32(reg, ralue, ip2me_tunnel_ptr, 0x24, 0, 24); static inline void mlxsw_reg_ralue_pack(char *payload, enum mlxsw_reg_ralxx_protocol protocol, @@ -4851,6 +4858,16 @@ static inline void mlxsw_reg_ralue_pack4(char *payload, mlxsw_reg_ralue_dip4_set(payload, dip); } +static inline void mlxsw_reg_ralue_pack6(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + enum mlxsw_reg_ralue_op op, + u16 virtual_router, u8 prefix_len, + const void *dip) +{ + mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len); + mlxsw_reg_ralue_dip6_memcpy_to(payload, dip); +} + static inline void mlxsw_reg_ralue_act_remote_pack(char *payload, enum mlxsw_reg_ralue_trap_action trap_action, @@ -4954,6 +4971,7 @@ MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16); * Access: Index */ MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32); +MLXSW_ITEM_BUF(reg, rauht, dip6, 0x10, 16); enum mlxsw_reg_rauht_trap_action { MLXSW_REG_RAUHT_TRAP_ACTION_NOP, @@ -5018,6 +5036,15 @@ static inline void mlxsw_reg_rauht_pack4(char *payload, mlxsw_reg_rauht_dip4_set(payload, dip); } +static inline void mlxsw_reg_rauht_pack6(char *payload, + enum mlxsw_reg_rauht_op op, u16 rif, + const char *mac, const char *dip) +{ + mlxsw_reg_rauht_pack(payload, op, rif, mac); + mlxsw_reg_rauht_type_set(payload, MLXSW_REG_RAUHT_TYPE_IPV6); + mlxsw_reg_rauht_dip6_memcpy_to(payload, dip); +} + /* RALEU - Router Algorithmic LPM ECMP Update Register * --------------------------------------------------- * The register enables updating the ECMP section in the action for multiple @@ -5216,6 +5243,30 @@ MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0, MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0, 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false); +#define MLXSW_REG_RAUHTD_IPV6_ENT_LEN 0x20 + +/* reg_rauhtd_ipv6_ent_a + * Activity. Set for new entries. Set if a packet lookup has hit on the + * specific entry. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv6_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1, + MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x00, false); + +/* reg_rauhtd_ipv6_ent_rif + * Router interface. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv6_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0, + 16, MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x00, false); + +/* reg_rauhtd_ipv6_ent_dip + * Destination IPv6 address. + * Access: RO + */ +MLXSW_ITEM_BUF_INDEXED(reg, rauhtd, ipv6_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, + 16, MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x10); + static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload, int ent_index, u16 *p_rif, u32 *p_dip) @@ -5224,6 +5275,14 @@ static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload, *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index); } +static inline void mlxsw_reg_rauhtd_ent_ipv6_unpack(char *payload, + int rec_index, u16 *p_rif, + char *p_dip) +{ + *p_rif = mlxsw_reg_rauhtd_ipv6_ent_rif_get(payload, rec_index); + mlxsw_reg_rauhtd_ipv6_ent_dip_memcpy_from(payload, rec_index, p_dip); +} + /* MFCR - Management Fan Control Register * -------------------------------------- * This register controls the settings of the Fan Speed PWM mechanism. diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 60bf8f27cc00..eb7c4549f464 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -58,6 +58,7 @@ #include <net/tc_act/tc_mirred.h> #include <net/netevent.h> #include <net/tc_act/tc_sample.h> +#include <net/addrconf.h> #include "spectrum.h" #include "pci.h" @@ -1616,16 +1617,16 @@ mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) } static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, - __be16 protocol, - struct tc_cls_matchall_offload *cls, + struct tc_cls_matchall_offload *f, bool ingress) { struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + __be16 protocol = f->common.protocol; const struct tc_action *a; LIST_HEAD(actions); int err; - if (!tc_single_action(cls->exts)) { + if (!tcf_exts_has_one_action(f->exts)) { netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); return -EOPNOTSUPP; } @@ -1633,9 +1634,9 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); if (!mall_tc_entry) return -ENOMEM; - mall_tc_entry->cookie = cls->cookie; + mall_tc_entry->cookie = f->cookie; - tcf_exts_to_list(cls->exts, &actions); + tcf_exts_to_list(f->exts, &actions); a = list_first_entry(&actions, struct tc_action, list); if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { @@ -1647,7 +1648,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, mirror, a, ingress); } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; - err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls, + err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, a, ingress); } else { err = -EOPNOTSUPP; @@ -1665,12 +1666,12 @@ err_add_action: } static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_cls_matchall_offload *cls) + struct tc_cls_matchall_offload *f) { struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, - cls->cookie); + f->cookie); if (!mall_tc_entry) { netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); return; @@ -1692,49 +1693,61 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, kfree(mall_tc_entry); } -static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *f) { - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); + bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS); - if (chain_index) + if (f->common.chain_index) return -EOPNOTSUPP; - switch (tc->type) { - case TC_SETUP_MATCHALL: - switch (tc->cls_mall->command) { - case TC_CLSMATCHALL_REPLACE: - return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, - proto, - tc->cls_mall, - ingress); - case TC_CLSMATCHALL_DESTROY: - mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, - tc->cls_mall); - return 0; - default: - return -EOPNOTSUPP; - } - case TC_SETUP_CLSFLOWER: - switch (tc->cls_flower->command) { - case TC_CLSFLOWER_REPLACE: - return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, - proto, tc->cls_flower); - case TC_CLSFLOWER_DESTROY: - mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, - tc->cls_flower); - return 0; - case TC_CLSFLOWER_STATS: - return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, - tc->cls_flower); - default: - return -EOPNOTSUPP; - } + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, + ingress); + case TC_CLSMATCHALL_DESTROY: + mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int +mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_flower_offload *f) +{ + bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS); + + if (f->common.chain_index) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_CLSFLOWER_REPLACE: + return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f); + case TC_CLSFLOWER_DESTROY: + mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f); + return 0; + case TC_CLSFLOWER_STATS: + return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f); + default: + return -EOPNOTSUPP; } +} + +static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - return -EOPNOTSUPP; + switch (type) { + case TC_SETUP_CLSMATCHALL: + return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data); + case TC_SETUP_CLSFLOWER: + return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data); + default: + return -EOPNOTSUPP; + } } static const struct net_device_ops mlxsw_sp_port_netdev_ops = { @@ -3333,15 +3346,47 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), + MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, + false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, + false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, + false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, + false), /* L3 traps */ - MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false), - MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), - MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), - MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), - MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), + MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), + MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, + false), + MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, + false), + MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), + MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), + MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), + MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), + MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), + MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), + MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), + MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, + false), + MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), + MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), + MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), @@ -3376,15 +3421,17 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) burst_size = 7; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: rate = 16 * 1024; burst_size = 10; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: - case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: rate = 1024; burst_size = 7; break; @@ -3433,21 +3480,23 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) priority = 5; tc = 5; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: priority = 4; tc = 4; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: priority = 3; tc = 3; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: priority = 2; tc = 2; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: priority = 1; @@ -4357,6 +4406,10 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { .priority = 10, /* Must be called before FIB notifier block */ }; +static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { + .notifier_call = mlxsw_sp_inet6addr_event, +}; + static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { .notifier_call = mlxsw_sp_router_netevent_event, }; @@ -4377,6 +4430,7 @@ static int __init mlxsw_sp_module_init(void) register_netdevice_notifier(&mlxsw_sp_netdevice_nb); register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); + register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); register_netevent_notifier(&mlxsw_sp_router_netevent_nb); err = mlxsw_core_driver_register(&mlxsw_sp_driver); @@ -4393,6 +4447,7 @@ err_pci_driver_register: mlxsw_core_driver_unregister(&mlxsw_sp_driver); err_core_driver_register: unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); + unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); return err; @@ -4403,6 +4458,7 @@ static void __exit mlxsw_sp_module_exit(void) mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); mlxsw_core_driver_unregister(&mlxsw_sp_driver); unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); + unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 5ef98d4d0ab6..8452d1db2f3f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -384,6 +384,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); int mlxsw_sp_inetaddr_event(struct notifier_block *unused, unsigned long event, void *ptr); +int mlxsw_sp_inet6addr_event(struct notifier_block *unused, + unsigned long event, void *ptr); int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, struct netdev_notifier_changeupper_info *info); void @@ -506,7 +508,7 @@ extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; /* spectrum_flower.c */ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, - __be16 protocol, struct tc_cls_flower_offload *f); + struct tc_cls_flower_offload *f); void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, struct tc_cls_flower_offload *f); int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 01a1501b56ca..508b5fcacd77 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -369,7 +369,7 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, local_port = mlxsw_sp_port->local_port; in_port = false; } else { - /* If out_dev is NULL, the called wants to + /* If out_dev is NULL, the caller wants to * set forward to ingress port. */ local_port = 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h index 85d5001a5818..fb8031828454 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -70,6 +70,9 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2), + MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8), + MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6), MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */ }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 61a10f166f97..bc5173f1b5c1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -984,6 +984,9 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_VID, MLXSW_AFK_ELEMENT_PCP, MLXSW_AFK_ELEMENT_TCP_FLAGS, + MLXSW_AFK_ELEMENT_IP_TTL_, + MLXSW_AFK_ELEMENT_IP_ECN, + MLXSW_AFK_ELEMENT_IP_DSCP, }; static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 6afbe9ec64e2..bbd238e50f05 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -109,7 +109,6 @@ static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_BROADCAST] = 1, - [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, @@ -117,6 +116,7 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, + [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, }; static const int *mlxsw_sp_packet_type_sfgc_types[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 21bb2bf62d3e..95428b41c50f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -53,7 +53,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, LIST_HEAD(actions); int err; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return 0; /* Count action is inserted first */ @@ -212,11 +212,46 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, return 0; } +static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f, + u16 n_proto) +{ + struct flow_dissector_key_ip *key, *mask; + + if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) + return 0; + + if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { + dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); + return -EINVAL; + } + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IP, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IP, + f->mask); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, + key->ttl, mask->ttl); + + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, + key->tos & 0x3, mask->tos & 0x3); + + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, + key->tos >> 6, mask->tos >> 6); + + return 0; +} + static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { + u16 n_proto_mask = 0; + u16 n_proto_key = 0; u16 addr_type = 0; u8 ip_proto = 0; int err; @@ -229,12 +264,13 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_TCP) | + BIT(FLOW_DISSECTOR_KEY_IP) | BIT(FLOW_DISSECTOR_KEY_VLAN))) { dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); return -EOPNOTSUPP; } - mlxsw_sp_acl_rulei_priority(rulei, f->prio); + mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_dissector_key_control *key = @@ -253,8 +289,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_BASIC, f->mask); - u16 n_proto_key = ntohs(key->n_proto); - u16 n_proto_mask = ntohs(mask->n_proto); + n_proto_key = ntohs(key->n_proto); + n_proto_mask = ntohs(mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; @@ -324,11 +360,15 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, if (err) return err; + err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); + if (err) + return err; + return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts); } int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, - __be16 protocol, struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct net_device *dev = mlxsw_sp_port->dev; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 4b2e0fd7d51e..93b6da88e79c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -43,12 +43,19 @@ #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/if_bridge.h> +#include <linux/socket.h> +#include <linux/route.h> #include <net/netevent.h> #include <net/neighbour.h> #include <net/arp.h> #include <net/ip_fib.h> +#include <net/ip6_fib.h> #include <net/fib_rules.h> #include <net/l3mdev.h> +#include <net/addrconf.h> +#include <net/ndisc.h> +#include <net/ipv6.h> +#include <net/fib_notifier.h> #include "spectrum.h" #include "core.h" @@ -304,7 +311,7 @@ static struct mlxsw_sp_rif * mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); -#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE) +#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1) struct mlxsw_sp_prefix_usage { DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT); @@ -384,21 +391,31 @@ struct mlxsw_sp_fib_node { struct mlxsw_sp_fib_key key; }; -struct mlxsw_sp_fib_entry_params { +struct mlxsw_sp_fib_entry { + struct list_head list; + struct mlxsw_sp_fib_node *fib_node; + enum mlxsw_sp_fib_entry_type type; + struct list_head nexthop_group_node; + struct mlxsw_sp_nexthop_group *nh_group; +}; + +struct mlxsw_sp_fib4_entry { + struct mlxsw_sp_fib_entry common; u32 tb_id; u32 prio; u8 tos; u8 type; }; -struct mlxsw_sp_fib_entry { +struct mlxsw_sp_fib6_entry { + struct mlxsw_sp_fib_entry common; + struct list_head rt6_list; + unsigned int nrt6; +}; + +struct mlxsw_sp_rt6 { struct list_head list; - struct mlxsw_sp_fib_node *fib_node; - enum mlxsw_sp_fib_entry_type type; - struct list_head nexthop_group_node; - struct mlxsw_sp_nexthop_group *nh_group; - struct mlxsw_sp_fib_entry_params params; - bool offloaded; + struct rt6_info *rt; }; enum mlxsw_sp_l3proto { @@ -428,6 +445,7 @@ struct mlxsw_sp_vr { u32 tb_id; /* kernel fib table id */ unsigned int rif_count; struct mlxsw_sp_fib *fib4; + struct mlxsw_sp_fib *fib6; }; static const struct rhashtable_params mlxsw_sp_fib_ht_params; @@ -625,7 +643,7 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) { - return !!vr->fib4; + return !!vr->fib4 || !!vr->fib6; } static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) @@ -694,7 +712,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr, case MLXSW_SP_L3_PROTO_IPV4: return vr->fib4; case MLXSW_SP_L3_PROTO_IPV6: - BUG_ON(1); + return vr->fib6; } return NULL; } @@ -703,6 +721,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, u32 tb_id) { struct mlxsw_sp_vr *vr; + int err; vr = mlxsw_sp_vr_find_unused(mlxsw_sp); if (!vr) @@ -710,12 +729,24 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4); if (IS_ERR(vr->fib4)) return ERR_CAST(vr->fib4); + vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(vr->fib6)) { + err = PTR_ERR(vr->fib6); + goto err_fib6_create; + } vr->tb_id = tb_id; return vr; + +err_fib6_create: + mlxsw_sp_fib_destroy(vr->fib4); + vr->fib4 = NULL; + return ERR_PTR(err); } static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr) { + mlxsw_sp_fib_destroy(vr->fib6); + vr->fib6 = NULL; mlxsw_sp_fib_destroy(vr->fib4); vr->fib4 = NULL; } @@ -773,7 +804,8 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id) static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr) { - if (!vr->rif_count && list_empty(&vr->fib4->node_list)) + if (!vr->rif_count && list_empty(&vr->fib4->node_list) && + list_empty(&vr->fib6->node_list)) mlxsw_sp_vr_destroy(vr); } @@ -929,8 +961,15 @@ mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) static void mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp) { - unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); + unsigned long interval; +#if IS_ENABLED(CONFIG_IPV6) + interval = min_t(unsigned long, + NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME), + NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME)); +#else + interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); +#endif mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval); } @@ -965,6 +1004,44 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, neigh_release(n); } +#if IS_ENABLED(IPV6) +static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int rec_index) +{ + struct net_device *dev; + struct neighbour *n; + struct in6_addr dip; + u16 rif; + + mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif, + (char *) &dip); + + if (!mlxsw_sp->router->rifs[rif]) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n"); + return; + } + + dev = mlxsw_sp->router->rifs[rif]->dev; + n = neigh_lookup(&nd_tbl, &dip, dev); + if (!n) { + netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n", + &dip); + return; + } + + netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip); + neigh_event_send(n, NULL); + neigh_release(n); +} +#else +static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int rec_index) +{ +} +#endif + static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp, char *rauhtd_pl, int rec_index) @@ -988,6 +1065,15 @@ static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp, } +static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int rec_index) +{ + /* One record contains one entry. */ + mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl, + rec_index); +} + static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, char *rauhtd_pl, int rec_index) { @@ -997,7 +1083,8 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, rec_index); break; case MLXSW_REG_RAUHTD_TYPE_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl, + rec_index); break; } } @@ -1022,22 +1109,20 @@ static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl) return false; } -static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) +static int +__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + enum mlxsw_reg_rauhtd_type type) { - char *rauhtd_pl; - u8 num_rec; - int i, err; - - rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL); - if (!rauhtd_pl) - return -ENOMEM; + int i, num_rec; + int err; /* Make sure the neighbour's netdev isn't removed in the * process. */ rtnl_lock(); do { - mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4); + mlxsw_reg_rauhtd_pack(rauhtd_pl, type); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd), rauhtd_pl); if (err) { @@ -1051,6 +1136,27 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl)); rtnl_unlock(); + return err; +} + +static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) +{ + enum mlxsw_reg_rauhtd_type type; + char *rauhtd_pl; + int err; + + rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL); + if (!rauhtd_pl) + return -ENOMEM; + + type = MLXSW_REG_RAUHTD_TYPE_IPV4; + err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type); + if (err) + goto out; + + type = MLXSW_REG_RAUHTD_TYPE_IPV6; + err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type); +out: kfree(rauhtd_pl); return err; } @@ -1147,6 +1253,32 @@ mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp, } static void +mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + enum mlxsw_reg_rauht_op op) +{ + struct neighbour *n = neigh_entry->key.n; + char rauht_pl[MLXSW_REG_RAUHT_LEN]; + const char *dip = n->primary_key; + + mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha, + dip); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); +} + +static bool mlxsw_sp_neigh_ipv6_ignore(struct neighbour *n) +{ + /* Packets with a link-local destination address are trapped + * after LPM lookup and never reach the neighbour table, so + * there is no need to program such neighbours to the device. + */ + if (ipv6_addr_type((struct in6_addr *) &n->primary_key) & + IPV6_ADDR_LINKLOCAL) + return true; + return false; +} + +static void mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, bool adding) @@ -1154,11 +1286,17 @@ mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, if (!adding && !neigh_entry->connected) return; neigh_entry->connected = adding; - if (neigh_entry->key.n->tbl == &arp_tbl) + if (neigh_entry->key.n->tbl->family == AF_INET) { mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry, mlxsw_sp_rauht_op(adding)); - else + } else if (neigh_entry->key.n->tbl->family == AF_INET6) { + if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry->key.n)) + return; + mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry, + mlxsw_sp_rauht_op(adding)); + } else { WARN_ON_ONCE(1); + } } struct mlxsw_sp_neigh_event_work { @@ -1227,7 +1365,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, p = ptr; /* We don't care about changes in the default table. */ - if (!p->dev || p->tbl != &arp_tbl) + if (!p->dev || (p->tbl->family != AF_INET && + p->tbl->family != AF_INET6)) return NOTIFY_DONE; /* We are in atomic context and can't take RTNL mutex, @@ -1246,7 +1385,7 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, case NETEVENT_NEIGH_UPDATE: n = ptr; - if (n->tbl != &arp_tbl) + if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6) return NOTIFY_DONE; mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev); @@ -1307,25 +1446,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) rhashtable_destroy(&mlxsw_sp->router->neigh_ht); } -static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_rif *rif) -{ - char rauht_pl[MLXSW_REG_RAUHT_LEN]; - - mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL, - rif->rif_index, rif->addr); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); -} - static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif) { struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; - mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif); list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, - rif_list_node) + rif_list_node) { + mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false); mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); + } } struct mlxsw_sp_nexthop_key { @@ -1340,6 +1470,7 @@ struct mlxsw_sp_nexthop { */ struct rhash_head ht_node; struct mlxsw_sp_nexthop_key key; + unsigned char gw_addr[sizeof(struct in6_addr)]; struct mlxsw_sp_rif *rif; u8 should_offload:1, /* set indicates this neigh is connected and * should be put to KVD linear area of this group. @@ -1360,6 +1491,7 @@ struct mlxsw_sp_nexthop_group_key { struct mlxsw_sp_nexthop_group { struct rhash_head ht_node; struct list_head fib_list; /* list of fib entries that use this group */ + struct neigh_table *neigh_tbl; struct mlxsw_sp_nexthop_group_key key; u8 adj_index_valid:1, gateway:1; /* routes using the group use a gateway */ @@ -1535,6 +1667,24 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, } static void +mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op, int err); + +static void +mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp) +{ + enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE; + struct mlxsw_sp_fib_entry *fib_entry; + + list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { + if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node, + fib_entry)) + continue; + mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0); + } +} + +static void mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { @@ -1556,7 +1706,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; - if (nh->should_offload ^ nh->offloaded) { + if (nh->should_offload != nh->offloaded) { offload_change = true; if (nh->should_offload) nh->update = 1; @@ -1621,6 +1771,10 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); goto set_trap; } + + /* Offload state within the group changed, so update the flags. */ + mlxsw_sp_nexthop_fib_entries_refresh(nh_grp); + return; set_trap: @@ -1640,9 +1794,9 @@ set_trap: static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, bool removing) { - if (!removing && !nh->should_offload) + if (!removing) nh->should_offload = 1; - else if (removing && nh->offloaded) + else if (nh->offloaded) nh->should_offload = 0; nh->update = 1; } @@ -1684,7 +1838,6 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry; - struct fib_nh *fib_nh = nh->key.fib_nh; struct neighbour *n; u8 nud_state, dead; int err; @@ -1693,13 +1846,14 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, return 0; /* Take a reference of neigh here ensuring that neigh would - * not be detructed before the nexthop entry is finished. + * not be destructed before the nexthop entry is finished. * The reference is taken either in neigh_lookup() or * in neigh_create() in case n is not found. */ - n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev); + n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); if (!n) { - n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev); + n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, + nh->rif->dev); if (IS_ERR(n)) return PTR_ERR(n); neigh_event_send(n, NULL); @@ -1761,10 +1915,10 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp, neigh_release(n); } -static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp, - struct mlxsw_sp_nexthop *nh, - struct fib_nh *fib_nh) +static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + struct fib_nh *fib_nh) { struct net_device *dev = fib_nh->nh_dev; struct in_device *in_dev; @@ -1773,6 +1927,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, nh->nh_grp = nh_grp; nh->key.fib_nh = fib_nh; + memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw)); err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); if (err) return err; @@ -1802,16 +1957,16 @@ err_nexthop_neigh_init: return err; } -static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop *nh) +static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) { mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); mlxsw_sp_nexthop_rif_fini(nh); mlxsw_sp_nexthop_remove(mlxsw_sp, nh); } -static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, - unsigned long event, struct fib_nh *fib_nh) +static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp, + unsigned long event, struct fib_nh *fib_nh) { struct mlxsw_sp_nexthop_key key; struct mlxsw_sp_nexthop *nh; @@ -1856,7 +2011,7 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_nexthop_group * -mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) +mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) { struct mlxsw_sp_nexthop_group *nh_grp; struct mlxsw_sp_nexthop *nh; @@ -1871,6 +2026,8 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) if (!nh_grp) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&nh_grp->fib_list); + nh_grp->neigh_tbl = &arp_tbl; + nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK; nh_grp->count = fi->fib_nhs; nh_grp->key.fi = fi; @@ -1878,9 +2035,9 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; fib_nh = &fi->fib_nh[i]; - err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh); + err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh); if (err) - goto err_nexthop_init; + goto err_nexthop4_init; } err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); if (err) @@ -1889,10 +2046,10 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) return nh_grp; err_nexthop_group_insert: -err_nexthop_init: +err_nexthop4_init: for (i--; i >= 0; i--) { nh = &nh_grp->nexthops[i]; - mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); } fib_info_put(nh_grp->key.fi); kfree(nh_grp); @@ -1900,8 +2057,8 @@ err_nexthop_init: } static void -mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp) +mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) { struct mlxsw_sp_nexthop *nh; int i; @@ -1909,7 +2066,7 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; - mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); } mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); WARN_ON_ONCE(nh_grp->adj_index_valid); @@ -1917,9 +2074,9 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, kfree(nh_grp); } -static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - struct fib_info *fi) +static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + struct fib_info *fi) { struct mlxsw_sp_nexthop_group_key key; struct mlxsw_sp_nexthop_group *nh_grp; @@ -1927,7 +2084,7 @@ static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, key.fi = fi; nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key); if (!nh_grp) { - nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi); + nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi); if (IS_ERR(nh_grp)) return PTR_ERR(nh_grp); } @@ -1936,15 +2093,25 @@ static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, return 0; } -static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; list_del(&fib_entry->nexthop_group_node); if (!list_empty(&nh_grp->fib_list)) return; - mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp); + mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp); +} + +static bool +mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib4_entry *fib4_entry; + + fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry, + common); + return !fib4_entry->tos; } static bool @@ -1952,8 +2119,14 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) { struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group; - if (fib_entry->params.tos) - return false; + switch (fib_entry->fib_node->fib->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + if (!mlxsw_sp_fib4_entry_should_offload(fib_entry)) + return false; + break; + case MLXSW_SP_L3_PROTO_IPV6: + break; + } switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: @@ -1965,16 +2138,111 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) } } -static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) +static struct mlxsw_sp_nexthop * +mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp, + const struct mlxsw_sp_rt6 *mlxsw_sp_rt6) +{ + int i; + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + struct rt6_info *rt = mlxsw_sp_rt6->rt; + + if (nh->rif && nh->rif->dev == rt->dst.dev && + ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr, + &rt->rt6i_gateway)) + return nh; + continue; + } + + return NULL; +} + +static void +mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + int i; + + if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) { + nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD; + return; + } + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + + if (nh->offloaded) + nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD; + else + nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; + } +} + +static void +mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + int i; + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + + nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; + } +} + +static void +mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) { - fib_entry->offloaded = true; + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, + common); + + if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) { + list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, + list)->rt->rt6i_flags |= RTF_OFFLOAD; + return; + } + + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + struct mlxsw_sp_nexthop *nh; + + nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6); + if (nh && nh->offloaded) + mlxsw_sp_rt6->rt->rt6i_flags |= RTF_OFFLOAD; + else + mlxsw_sp_rt6->rt->rt6i_flags &= ~RTF_OFFLOAD; + } +} +static void +mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, + common); + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + struct rt6_info *rt = mlxsw_sp_rt6->rt; + + rt->rt6i_flags &= ~RTF_OFFLOAD; + } +} + +static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) +{ switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: - fib_info_offload_inc(fib_entry->nh_group->key.fi); + mlxsw_sp_fib4_entry_offload_set(fib_entry); break; case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_fib6_entry_offload_set(fib_entry); + break; } } @@ -1983,13 +2251,12 @@ mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) { switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: - fib_info_offload_dec(fib_entry->nh_group->key.fi); + mlxsw_sp_fib4_entry_offload_unset(fib_entry); break; case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_fib6_entry_offload_unset(fib_entry); + break; } - - fib_entry->offloaded = false; } static void @@ -1998,17 +2265,13 @@ mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, { switch (op) { case MLXSW_REG_RALUE_OP_WRITE_DELETE: - if (!fib_entry->offloaded) - return; return mlxsw_sp_fib_entry_offload_unset(fib_entry); case MLXSW_REG_RALUE_OP_WRITE_WRITE: if (err) return; - if (mlxsw_sp_fib_entry_should_offload(fib_entry) && - !fib_entry->offloaded) + if (mlxsw_sp_fib_entry_should_offload(fib_entry)) mlxsw_sp_fib_entry_offload_set(fib_entry); - else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) && - fib_entry->offloaded) + else if (!mlxsw_sp_fib_entry_should_offload(fib_entry)) mlxsw_sp_fib_entry_offload_unset(fib_entry); return; default: @@ -2016,13 +2279,37 @@ mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, } } -static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static void +mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl, + const struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { - char ralue_pl[MLXSW_REG_RALUE_LEN]; struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; - u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; + enum mlxsw_reg_ralxx_protocol proto; + u32 *p_dip; + + proto = (enum mlxsw_reg_ralxx_protocol) fib->proto; + + switch (fib->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + p_dip = (u32 *) fib_entry->fib_node->key.addr; + mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id, + fib_entry->fib_node->key.prefix_len, + *p_dip); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id, + fib_entry->fib_node->key.prefix_len, + fib_entry->fib_node->key.addr); + break; + } +} + +static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + char ralue_pl[MLXSW_REG_RALUE_LEN]; enum mlxsw_reg_ralue_trap_action trap_action; u16 trap_id = 0; u32 adjacency_index = 0; @@ -2041,24 +2328,19 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; } - mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) fib->proto, op, - fib->vr->id, fib_entry->fib_node->key.prefix_len, - *p_dip); + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, adjacency_index, ecmp_size); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } -static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif; - struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; enum mlxsw_reg_ralue_trap_action trap_action; char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; u16 trap_id = 0; u16 rif_index = 0; @@ -2070,42 +2352,34 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; } - mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) fib->proto, op, - fib->vr->id, fib_entry->fib_node->key.prefix_len, - *p_dip); + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif_index); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } -static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { - struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; - mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) fib->proto, op, - fib->vr->id, fib_entry->fib_node->key.prefix_len, - *p_dip); + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } -static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: - return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: - return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_TRAP: - return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op); } return -EINVAL; } @@ -2114,16 +2388,10 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { - int err = -EINVAL; + int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op); - switch (fib_entry->fib_node->fib->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); - break; - case MLXSW_SP_L3_PROTO_IPV6: - return err; - } mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err); + return err; } @@ -2173,72 +2441,80 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, } } -static struct mlxsw_sp_fib_entry * +static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node, const struct fib_entry_notifier_info *fen_info) { + struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_entry *fib_entry; int err; - fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); - if (!fib_entry) { - err = -ENOMEM; - goto err_fib_entry_alloc; - } + fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL); + if (!fib4_entry) + return ERR_PTR(-ENOMEM); + fib_entry = &fib4_entry->common; err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry); if (err) goto err_fib4_entry_type_set; - err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi); + err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi); if (err) - goto err_nexthop_group_get; + goto err_nexthop4_group_get; - fib_entry->params.prio = fen_info->fi->fib_priority; - fib_entry->params.tb_id = fen_info->tb_id; - fib_entry->params.type = fen_info->type; - fib_entry->params.tos = fen_info->tos; + fib4_entry->prio = fen_info->fi->fib_priority; + fib4_entry->tb_id = fen_info->tb_id; + fib4_entry->type = fen_info->type; + fib4_entry->tos = fen_info->tos; fib_entry->fib_node = fib_node; - return fib_entry; + return fib4_entry; -err_nexthop_group_get: +err_nexthop4_group_get: err_fib4_entry_type_set: - kfree(fib_entry); -err_fib_entry_alloc: + kfree(fib4_entry); return ERR_PTR(err); } static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) + struct mlxsw_sp_fib4_entry *fib4_entry) { - mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); - kfree(fib_entry); + mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common); + kfree(fib4_entry); } static struct mlxsw_sp_fib_node * -mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info); +mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, + size_t addr_len, unsigned char prefix_len); -static struct mlxsw_sp_fib_entry * +static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_node *fib_node; + struct mlxsw_sp_fib *fib; + struct mlxsw_sp_vr *vr; - fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info); - if (IS_ERR(fib_node)) + vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id); + if (!vr) + return NULL; + fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4); + + fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst, + sizeof(fen_info->dst), + fen_info->dst_len); + if (!fib_node) return NULL; - list_for_each_entry(fib_entry, &fib_node->entry_list, list) { - if (fib_entry->params.tb_id == fen_info->tb_id && - fib_entry->params.tos == fen_info->tos && - fib_entry->params.type == fen_info->type && - fib_entry->nh_group->key.fi == fen_info->fi) { - return fib_entry; + list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) { + if (fib4_entry->tb_id == fen_info->tb_id && + fib4_entry->tos == fen_info->tos && + fib4_entry->type == fen_info->type && + fib4_entry->common.nh_group->key.fi == fen_info->fi) { + return fib4_entry; } } @@ -2395,28 +2671,25 @@ static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_fib_node * -mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info) +mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr, + size_t addr_len, unsigned char prefix_len, + enum mlxsw_sp_l3proto proto) { struct mlxsw_sp_fib_node *fib_node; struct mlxsw_sp_fib *fib; struct mlxsw_sp_vr *vr; int err; - vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id); + vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id); if (IS_ERR(vr)) return ERR_CAST(vr); - fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4); + fib = mlxsw_sp_vr_fib(vr, proto); - fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len); + fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len); if (fib_node) return fib_node; - fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len); + fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len); if (!fib_node) { err = -ENOMEM; goto err_fib_node_create; @@ -2435,8 +2708,8 @@ err_fib_node_create: return ERR_PTR(err); } -static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_node *fib_node) +static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) { struct mlxsw_sp_vr *vr = fib_node->fib->vr; @@ -2447,95 +2720,100 @@ static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_vr_put(vr); } -static struct mlxsw_sp_fib_entry * +static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct mlxsw_sp_fib_entry_params *params) + const struct mlxsw_sp_fib4_entry *new4_entry) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; - list_for_each_entry(fib_entry, &fib_node->entry_list, list) { - if (fib_entry->params.tb_id > params->tb_id) + list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) { + if (fib4_entry->tb_id > new4_entry->tb_id) continue; - if (fib_entry->params.tb_id != params->tb_id) + if (fib4_entry->tb_id != new4_entry->tb_id) break; - if (fib_entry->params.tos > params->tos) + if (fib4_entry->tos > new4_entry->tos) continue; - if (fib_entry->params.prio >= params->prio || - fib_entry->params.tos < params->tos) - return fib_entry; + if (fib4_entry->prio >= new4_entry->prio || + fib4_entry->tos < new4_entry->tos) + return fib4_entry; } return NULL; } -static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry, - struct mlxsw_sp_fib_entry *new_entry) +static int +mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry, + struct mlxsw_sp_fib4_entry *new4_entry) { struct mlxsw_sp_fib_node *fib_node; - if (WARN_ON(!fib_entry)) + if (WARN_ON(!fib4_entry)) return -EINVAL; - fib_node = fib_entry->fib_node; - list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) { - if (fib_entry->params.tb_id != new_entry->params.tb_id || - fib_entry->params.tos != new_entry->params.tos || - fib_entry->params.prio != new_entry->params.prio) + fib_node = fib4_entry->common.fib_node; + list_for_each_entry_from(fib4_entry, &fib_node->entry_list, + common.list) { + if (fib4_entry->tb_id != new4_entry->tb_id || + fib4_entry->tos != new4_entry->tos || + fib4_entry->prio != new4_entry->prio) break; } - list_add_tail(&new_entry->list, &fib_entry->list); + list_add_tail(&new4_entry->common.list, &fib4_entry->common.list); return 0; } static int -mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node, - struct mlxsw_sp_fib_entry *new_entry, +mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry, bool replace, bool append) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node; + struct mlxsw_sp_fib4_entry *fib4_entry; - fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params); + fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry); if (append) - return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry); - if (replace && WARN_ON(!fib_entry)) + return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry); + if (replace && WARN_ON(!fib4_entry)) return -EINVAL; /* Insert new entry before replaced one, so that we can later * remove the second. */ - if (fib_entry) { - list_add_tail(&new_entry->list, &fib_entry->list); + if (fib4_entry) { + list_add_tail(&new4_entry->common.list, + &fib4_entry->common.list); } else { - struct mlxsw_sp_fib_entry *last; + struct mlxsw_sp_fib4_entry *last; - list_for_each_entry(last, &fib_node->entry_list, list) { - if (new_entry->params.tb_id > last->params.tb_id) + list_for_each_entry(last, &fib_node->entry_list, common.list) { + if (new4_entry->tb_id > last->tb_id) break; - fib_entry = last; + fib4_entry = last; } - if (fib_entry) - list_add(&new_entry->list, &fib_entry->list); + if (fib4_entry) + list_add(&new4_entry->common.list, + &fib4_entry->common.list); else - list_add(&new_entry->list, &fib_node->entry_list); + list_add(&new4_entry->common.list, + &fib_node->entry_list); } return 0; } static void -mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry) +mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry) { - list_del(&fib_entry->list); + list_del(&fib4_entry->common.list); } -static int -mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_fib_node *fib_node, - struct mlxsw_sp_fib_entry *fib_entry) +static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) return 0; @@ -2552,11 +2830,11 @@ mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); } -static void -mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_fib_node *fib_node, - struct mlxsw_sp_fib_entry *fib_entry) +static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) return; @@ -2574,54 +2852,50 @@ mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, + struct mlxsw_sp_fib4_entry *fib4_entry, bool replace, bool append) { - struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; int err; - err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace, - append); + err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append); if (err) return err; - err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry); + err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common); if (err) - goto err_fib4_node_entry_add; + goto err_fib_node_entry_add; return 0; -err_fib4_node_entry_add: - mlxsw_sp_fib4_node_list_remove(fib_entry); +err_fib_node_entry_add: + mlxsw_sp_fib4_node_list_remove(fib4_entry); return err; } static void mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) + struct mlxsw_sp_fib4_entry *fib4_entry) { - struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; - - mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry); - mlxsw_sp_fib4_node_list_remove(fib_entry); + mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common); + mlxsw_sp_fib4_node_list_remove(fib4_entry); } static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, + struct mlxsw_sp_fib4_entry *fib4_entry, bool replace) { - struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; - struct mlxsw_sp_fib_entry *replaced; + struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; + struct mlxsw_sp_fib4_entry *replaced; if (!replace) return; /* We inserted the new entry before replaced one */ - replaced = list_next_entry(fib_entry, list); + replaced = list_next_entry(fib4_entry, common.list); mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced); mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced); - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); } static int @@ -2629,76 +2903,723 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info, bool replace, bool append) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_node *fib_node; int err; if (mlxsw_sp->router->aborted) return 0; - fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info); + fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id, + &fen_info->dst, sizeof(fen_info->dst), + fen_info->dst_len, + MLXSW_SP_L3_PROTO_IPV4); if (IS_ERR(fib_node)) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n"); return PTR_ERR(fib_node); } - fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info); - if (IS_ERR(fib_entry)) { + fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info); + if (IS_ERR(fib4_entry)) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n"); - err = PTR_ERR(fib_entry); + err = PTR_ERR(fib4_entry); goto err_fib4_entry_create; } - err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace, + err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace, append); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n"); goto err_fib4_node_entry_link; } - mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace); + mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace); return 0; err_fib4_node_entry_link: - mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); err_fib4_entry_create: - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); return err; } static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, struct fib_entry_notifier_info *fen_info) { + struct mlxsw_sp_fib4_entry *fib4_entry; + struct mlxsw_sp_fib_node *fib_node; + + if (mlxsw_sp->router->aborted) + return; + + fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); + if (WARN_ON(!fib4_entry)) + return; + fib_node = fib4_entry->common.fib_node; + + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); +} + +static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt) +{ + /* Packets with link-local destination IP arriving to the router + * are trapped to the CPU, so no need to program specific routes + * for them. + */ + if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL) + return true; + + /* Multicast routes aren't supported, so ignore them. Neighbour + * Discovery packets are specifically trapped. + */ + if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST) + return true; + + /* Cloned routes are irrelevant in the forwarding path. */ + if (rt->rt6i_flags & RTF_CACHE) + return true; + + return false; +} + +static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL); + if (!mlxsw_sp_rt6) + return ERR_PTR(-ENOMEM); + + /* In case of route replace, replaced route is deleted with + * no notification. Take reference to prevent accessing freed + * memory. + */ + mlxsw_sp_rt6->rt = rt; + rt6_hold(rt); + + return mlxsw_sp_rt6; +} + +#if IS_ENABLED(CONFIG_IPV6) +static void mlxsw_sp_rt6_release(struct rt6_info *rt) +{ + rt6_release(rt); +} +#else +static void mlxsw_sp_rt6_release(struct rt6_info *rt) +{ +} +#endif + +static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) +{ + mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt); + kfree(mlxsw_sp_rt6); +} + +static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt) +{ + /* RTF_CACHE routes are ignored */ + return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; +} + +static struct rt6_info * +mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) +{ + return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, + list)->rt; +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, + const struct rt6_info *nrt, bool replace) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + + if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace) + return NULL; + + list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { + struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + + /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same + * virtual router. + */ + if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id) + continue; + if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id) + break; + if (rt->rt6i_metric < nrt->rt6i_metric) + continue; + if (rt->rt6i_metric == nrt->rt6i_metric && + mlxsw_sp_fib6_rt_can_mp(rt)) + return fib6_entry; + if (rt->rt6i_metric > nrt->rt6i_metric) + break; + } + + return NULL; +} + +static struct mlxsw_sp_rt6 * +mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry, + const struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + if (mlxsw_sp_rt6->rt == rt) + return mlxsw_sp_rt6; + } + + return NULL; +} + +static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + const struct rt6_info *rt) +{ + struct net_device *dev = rt->dst.dev; + struct mlxsw_sp_rif *rif; + int err; + + nh->nh_grp = nh_grp; + memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr)); + + if (!dev) + return 0; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!rif) + return 0; + mlxsw_sp_nexthop_rif_init(nh, rif); + + err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); + if (err) + goto err_nexthop_neigh_init; + + return 0; + +err_nexthop_neigh_init: + mlxsw_sp_nexthop_rif_fini(nh); + return err; +} + +static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_rif_fini(nh); +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + struct mlxsw_sp_nexthop *nh; + size_t alloc_size; + int i = 0; + int err; + + alloc_size = sizeof(*nh_grp) + + fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop); + nh_grp = kzalloc(alloc_size, GFP_KERNEL); + if (!nh_grp) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&nh_grp->fib_list); +#if IS_ENABLED(CONFIG_IPV6) + nh_grp->neigh_tbl = &nd_tbl; +#endif + mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list, + struct mlxsw_sp_rt6, list); + nh_grp->gateway = !!(mlxsw_sp_rt6->rt->rt6i_flags & RTF_GATEWAY); + nh_grp->count = fib6_entry->nrt6; + for (i = 0; i < nh_grp->count; i++) { + struct rt6_info *rt = mlxsw_sp_rt6->rt; + + nh = &nh_grp->nexthops[i]; + err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt); + if (err) + goto err_nexthop6_init; + mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list); + } + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + return nh_grp; + +err_nexthop6_init: + for (i--; i >= 0; i--) { + nh = &nh_grp->nexthops[i]; + mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); + } + kfree(nh_grp); + return ERR_PTR(err); +} + +static void +mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_nexthop *nh; + int i = nh_grp->count; + + for (i--; i >= 0; i--) { + nh = &nh_grp->nexthops[i]; + mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); + } + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + WARN_ON(nh_grp->adj_index_valid); + kfree(nh_grp); +} + +static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + + /* For now, don't consolidate nexthop groups */ + nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry); + if (IS_ERR(nh_grp)) + return PTR_ERR(nh_grp); + + list_add_tail(&fib6_entry->common.nexthop_group_node, + &nh_grp->fib_list); + fib6_entry->common.nh_group = nh_grp; + + return 0; +} + +static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + + list_del(&fib_entry->nexthop_group_node); + if (!list_empty(&nh_grp->fib_list)) + return; + mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp); +} + +static int +mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group; + int err; + + fib6_entry->common.nh_group = NULL; + list_del(&fib6_entry->common.nexthop_group_node); + + err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); + if (err) + goto err_nexthop6_group_get; + + /* In case this entry is offloaded, then the adjacency index + * currently associated with it in the device's table is that + * of the old group. Start using the new one instead. + */ + err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common); + if (err) + goto err_fib_node_entry_add; + + if (list_empty(&old_nh_grp->fib_list)) + mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp); + + return 0; + +err_fib_node_entry_add: + mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); +err_nexthop6_group_get: + list_add_tail(&fib6_entry->common.nexthop_group_node, + &old_nh_grp->fib_list); + fib6_entry->common.nh_group = old_nh_grp; + return err; +} + +static int +mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry, + struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + int err; + + mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt); + if (IS_ERR(mlxsw_sp_rt6)) + return PTR_ERR(mlxsw_sp_rt6); + + list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); + fib6_entry->nrt6++; + + err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); + if (err) + goto err_nexthop6_group_update; + + return 0; + +err_nexthop6_group_update: + fib6_entry->nrt6--; + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); + return err; +} + +static void +mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry, + struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt); + if (WARN_ON(!mlxsw_sp_rt6)) + return; + + fib6_entry->nrt6--; + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); +} + +static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp_fib_entry *fib_entry, + const struct rt6_info *rt) +{ + /* Packets hitting RTF_REJECT routes need to be discarded by the + * stack. We can rely on their destination device not having a + * RIF (it's the loopback device) and can thus use action type + * local, which will cause them to be trapped with a lower + * priority than packets that need to be locally received. + */ + if (rt->rt6i_flags & RTF_LOCAL) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + else if (rt->rt6i_flags & RTF_REJECT) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + else if (rt->rt6i_flags & RTF_GATEWAY) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; + else + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; +} + +static void +mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp; + + list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list, + list) { + fib6_entry->nrt6--; + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); + } +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node, + struct rt6_info *rt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + int err; + + fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL); + if (!fib6_entry) + return ERR_PTR(-ENOMEM); + fib_entry = &fib6_entry->common; + + mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt); + if (IS_ERR(mlxsw_sp_rt6)) { + err = PTR_ERR(mlxsw_sp_rt6); + goto err_rt6_create; + } + + mlxsw_sp_fib6_entry_type_set(fib_entry, mlxsw_sp_rt6->rt); + + INIT_LIST_HEAD(&fib6_entry->rt6_list); + list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); + fib6_entry->nrt6 = 1; + err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); + if (err) + goto err_nexthop6_group_get; + + fib_entry->fib_node = fib_node; + + return fib6_entry; + +err_nexthop6_group_get: + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); +err_rt6_create: + kfree(fib6_entry); + return ERR_PTR(err); +} + +static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); + mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry); + WARN_ON(fib6_entry->nrt6); + kfree(fib6_entry); +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, + const struct rt6_info *nrt, bool replace) +{ + struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; + + list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { + struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + + if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id) + continue; + if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id) + break; + if (replace && rt->rt6i_metric == nrt->rt6i_metric) { + if (mlxsw_sp_fib6_rt_can_mp(rt) == + mlxsw_sp_fib6_rt_can_mp(nrt)) + return fib6_entry; + if (mlxsw_sp_fib6_rt_can_mp(nrt)) + fallback = fallback ?: fib6_entry; + } + if (rt->rt6i_metric > nrt->rt6i_metric) + return fallback ?: fib6_entry; + } + + return fallback; +} + +static int +mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry, + bool replace) +{ + struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node; + struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry); + struct mlxsw_sp_fib6_entry *fib6_entry; + + fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace); + + if (replace && WARN_ON(!fib6_entry)) + return -EINVAL; + + if (fib6_entry) { + list_add_tail(&new6_entry->common.list, + &fib6_entry->common.list); + } else { + struct mlxsw_sp_fib6_entry *last; + + list_for_each_entry(last, &fib_node->entry_list, common.list) { + struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last); + + if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id) + break; + fib6_entry = last; + } + + if (fib6_entry) + list_add(&new6_entry->common.list, + &fib6_entry->common.list); + else + list_add(&new6_entry->common.list, + &fib_node->entry_list); + } + + return 0; +} + +static void +mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry) +{ + list_del(&fib6_entry->common.list); +} + +static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry, + bool replace) +{ + int err; + + err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace); + if (err) + return err; + + err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common); + if (err) + goto err_fib_node_entry_add; + + return 0; + +err_fib_node_entry_add: + mlxsw_sp_fib6_node_list_remove(fib6_entry); + return err; +} + +static void +mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common); + mlxsw_sp_fib6_node_list_remove(fib6_entry); +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp, + const struct rt6_info *rt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib_node *fib_node; + struct mlxsw_sp_fib *fib; + struct mlxsw_sp_vr *vr; + + vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id); + if (!vr) + return NULL; + fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6); + + fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr, + sizeof(rt->rt6i_dst.addr), + rt->rt6i_dst.plen); + if (!fib_node) + return NULL; + + list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { + struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + + if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id && + rt->rt6i_metric == iter_rt->rt6i_metric && + mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt)) + return fib6_entry; + } + + return NULL; +} + +static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry, + bool replace) +{ + struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; + struct mlxsw_sp_fib6_entry *replaced; + + if (!replace) + return; + + replaced = list_next_entry(fib6_entry, common.list); + + mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced); + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); +} + +static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, + struct rt6_info *rt, bool replace) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib_node *fib_node; + int err; + + if (mlxsw_sp->router->aborted) + return 0; + + if (rt->rt6i_src.plen) + return -EINVAL; + + if (mlxsw_sp_fib6_rt_should_ignore(rt)) + return 0; + + fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id, + &rt->rt6i_dst.addr, + sizeof(rt->rt6i_dst.addr), + rt->rt6i_dst.plen, + MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(fib_node)) + return PTR_ERR(fib_node); + + /* Before creating a new entry, try to append route to an existing + * multipath entry. + */ + fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace); + if (fib6_entry) { + err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); + if (err) + goto err_fib6_entry_nexthop_add; + return 0; + } + + fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); + if (IS_ERR(fib6_entry)) { + err = PTR_ERR(fib6_entry); + goto err_fib6_entry_create; + } + + err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace); + if (err) + goto err_fib6_node_entry_link; + + mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace); + + return 0; + +err_fib6_node_entry_link: + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); +err_fib6_entry_create: +err_fib6_entry_nexthop_add: + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); + return err; +} + +static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, + struct rt6_info *rt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; if (mlxsw_sp->router->aborted) return; - fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); - if (WARN_ON(!fib_entry)) + if (mlxsw_sp_fib6_rt_should_ignore(rt)) + return; + + fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt); + if (WARN_ON(!fib6_entry)) return; - fib_node = fib_entry->fib_node; - mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + /* If route is part of a multipath entry, but not the last one + * removed, then only reduce its nexthop group. + */ + if (!list_is_singular(&fib6_entry->rt6_list)) { + mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt); + return; + } + + fib_node = fib6_entry->common.fib_node; + + mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry); + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); } -static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) +static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_reg_ralxx_protocol proto, + u8 tree_id) { char ralta_pl[MLXSW_REG_RALTA_LEN]; char ralst_pl[MLXSW_REG_RALST_LEN]; int i, err; - mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4, - MLXSW_SP_LPM_TREE_MIN); + mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); if (err) return err; - mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN); + mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl); if (err) return err; @@ -2711,17 +3632,14 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp_vr_is_used(vr)) continue; - mlxsw_reg_raltb_pack(raltb_pl, vr->id, - MLXSW_REG_RALXX_PROTOCOL_IPV4, - MLXSW_SP_LPM_TREE_MIN); + mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); if (err) return err; - mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4, - MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0, - 0); + mlxsw_reg_ralue_pack(ralue_pl, proto, + MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0); mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); @@ -2732,17 +3650,33 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) return 0; } +static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) +{ + enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4; + int err; + + err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, + MLXSW_SP_LPM_TREE_MIN); + if (err) + return err; + + proto = MLXSW_REG_RALXX_PROTOCOL_IPV6; + return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, + MLXSW_SP_LPM_TREE_MIN + 1); +} + static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { - struct mlxsw_sp_fib_entry *fib_entry, *tmp; + struct mlxsw_sp_fib4_entry *fib4_entry, *tmp; - list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) { - bool do_break = &tmp->list == &fib_node->entry_list; + list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list, + common.list) { + bool do_break = &tmp->common.list == &fib_node->entry_list; - mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); /* Break when entry list is empty and node was freed. * Otherwise, we'll access freed memory in the next * iteration. @@ -2752,6 +3686,23 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, } } +static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) +{ + struct mlxsw_sp_fib6_entry *fib6_entry, *tmp; + + list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list, + common.list) { + bool do_break = &tmp->common.list == &fib_node->entry_list; + + mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry); + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); + if (do_break) + break; + } +} + static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { @@ -2760,7 +3711,7 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node); break; case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node); break; } } @@ -2791,10 +3742,17 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp_vr_is_used(vr)) continue; mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); + + /* If virtual router was only used for IPv4, then it's no + * longer used. + */ + if (!mlxsw_sp_vr_is_used(vr)) + continue; + mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); } } -static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) +static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp) { int err; @@ -2811,6 +3769,7 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp_fib_event_work { struct work_struct work; union { + struct fib6_entry_notifier_info fen6_info; struct fib_entry_notifier_info fen_info; struct fib_rule_notifier_info fr_info; struct fib_nh_notifier_info fnh_info; @@ -2819,7 +3778,7 @@ struct mlxsw_sp_fib_event_work { unsigned long event; }; -static void mlxsw_sp_router_fib_event_work(struct work_struct *work) +static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) { struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); @@ -2839,7 +3798,7 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info, replace, append); if (err) - mlxsw_sp_router_fib4_abort(mlxsw_sp); + mlxsw_sp_router_fib_abort(mlxsw_sp); fib_info_put(fib_work->fen_info.fi); break; case FIB_EVENT_ENTRY_DEL: @@ -2850,13 +3809,13 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) case FIB_EVENT_RULE_DEL: rule = fib_work->fr_info.rule; if (!fib4_rule_default(rule) && !rule->l3mdev) - mlxsw_sp_router_fib4_abort(mlxsw_sp); + mlxsw_sp_router_fib_abort(mlxsw_sp); fib_rule_put(rule); break; case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_DEL: - mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event, - fib_work->fnh_info.fib_nh); + mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event, + fib_work->fnh_info.fib_nh); fib_info_put(fib_work->fnh_info.fib_nh->nh_parent); break; } @@ -2864,6 +3823,87 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) kfree(fib_work); } +static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) +{ + struct mlxsw_sp_fib_event_work *fib_work = + container_of(work, struct mlxsw_sp_fib_event_work, work); + struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; + struct fib_rule *rule; + bool replace; + int err; + + rtnl_lock(); + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_ADD: + replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; + err = mlxsw_sp_router_fib6_add(mlxsw_sp, + fib_work->fen6_info.rt, replace); + if (err) + mlxsw_sp_router_fib_abort(mlxsw_sp); + mlxsw_sp_rt6_release(fib_work->fen6_info.rt); + break; + case FIB_EVENT_ENTRY_DEL: + mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt); + mlxsw_sp_rt6_release(fib_work->fen6_info.rt); + break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + rule = fib_work->fr_info.rule; + if (!fib6_rule_default(rule) && !rule->l3mdev) + mlxsw_sp_router_fib_abort(mlxsw_sp); + fib_rule_put(rule); + break; + } + rtnl_unlock(); + kfree(fib_work); +} + +static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, + struct fib_notifier_info *info) +{ + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ + case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_DEL: + memcpy(&fib_work->fen_info, info, sizeof(fib_work->fen_info)); + /* Take referece on fib_info to prevent it from being + * freed while work is queued. Release it afterwards. + */ + fib_info_hold(fib_work->fen_info.fi); + break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info)); + fib_rule_get(fib_work->fr_info.rule); + break; + case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_DEL: + memcpy(&fib_work->fnh_info, info, sizeof(fib_work->fnh_info)); + fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); + break; + } +} + +static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, + struct fib_notifier_info *info) +{ + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_DEL: + memcpy(&fib_work->fen6_info, info, sizeof(fib_work->fen6_info)); + rt6_hold(fib_work->fen6_info.rt); + break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info)); + fib_rule_get(fib_work->fr_info.rule); + break; + } +} + /* Called with rcu_read_lock() */ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, unsigned long event, void *ptr) @@ -2879,31 +3919,18 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, if (WARN_ON(!fib_work)) return NOTIFY_BAD; - INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work); router = container_of(nb, struct mlxsw_sp_router, fib_nb); fib_work->mlxsw_sp = router->mlxsw_sp; fib_work->event = event; - switch (event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ - case FIB_EVENT_ENTRY_APPEND: /* fall through */ - case FIB_EVENT_ENTRY_ADD: /* fall through */ - case FIB_EVENT_ENTRY_DEL: - memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); - /* Take referece on fib_info to prevent it from being - * freed while work is queued. Release it afterwards. - */ - fib_info_hold(fib_work->fen_info.fi); - break; - case FIB_EVENT_RULE_ADD: /* fall through */ - case FIB_EVENT_RULE_DEL: - memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); - fib_rule_get(fib_work->fr_info.rule); + switch (info->family) { + case AF_INET: + INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work); + mlxsw_sp_router_fib4_event(fib_work, info); break; - case FIB_EVENT_NH_ADD: /* fall through */ - case FIB_EVENT_NH_DEL: - memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info)); - fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); + case AF_INET6: + INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); + mlxsw_sp_router_fib6_event(fib_work, info); break; } @@ -2948,17 +3975,28 @@ static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif); } -static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, - const struct in_device *in_dev, - unsigned long event) +static bool +mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev, + unsigned long event) { + struct inet6_dev *inet6_dev; + bool addr_list_empty = true; + struct in_device *idev; + switch (event) { case NETDEV_UP: - if (!rif) - return true; - return false; + return rif == NULL; case NETDEV_DOWN: - if (rif && !in_dev->ifa_list && + idev = __in_dev_get_rtnl(dev); + if (idev && idev->ifa_list) + addr_list_empty = false; + + inet6_dev = __in6_dev_get(dev); + if (addr_list_empty && inet6_dev && + !list_empty(&inet6_dev->addr_list)) + addr_list_empty = false; + + if (rif && addr_list_empty && !netif_is_l3_slave(rif->dev)) return true; /* It is possible we already removed the RIF ourselves @@ -3356,7 +4394,7 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused, goto out; rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event)) + if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; err = __mlxsw_sp_inetaddr_event(dev, event); @@ -3364,6 +4402,61 @@ out: return notifier_from_errno(err); } +struct mlxsw_sp_inet6addr_event_work { + struct work_struct work; + struct net_device *dev; + unsigned long event; +}; + +static void mlxsw_sp_inet6addr_event_work(struct work_struct *work) +{ + struct mlxsw_sp_inet6addr_event_work *inet6addr_work = + container_of(work, struct mlxsw_sp_inet6addr_event_work, work); + struct net_device *dev = inet6addr_work->dev; + unsigned long event = inet6addr_work->event; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *rif; + + rtnl_lock(); + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + goto out; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!mlxsw_sp_rif_should_config(rif, dev, event)) + goto out; + + __mlxsw_sp_inetaddr_event(dev, event); +out: + rtnl_unlock(); + dev_put(dev); + kfree(inet6addr_work); +} + +/* Called with rcu_read_lock() */ +int mlxsw_sp_inet6addr_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr; + struct mlxsw_sp_inet6addr_event_work *inet6addr_work; + struct net_device *dev = if6->idev->dev; + + if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) + return NOTIFY_DONE; + + inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC); + if (!inet6addr_work) + return NOTIFY_BAD; + + INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work); + inet6addr_work->dev = dev; + inet6addr_work->event = event; + dev_hold(dev); + mlxsw_core_schedule_work(&inet6addr_work->work); + + return NOTIFY_DONE; +} + static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, const char *mac, int mtu) { @@ -3565,6 +4658,11 @@ static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif) if (err) return err; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), true); + if (err) + goto err_fid_mc_flood_set; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), true); if (err) @@ -3573,6 +4671,9 @@ static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif) return 0; err_fid_bc_flood_set: + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); +err_fid_mc_flood_set: mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false); return err; } @@ -3584,6 +4685,8 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false); } @@ -3614,6 +4717,11 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) if (err) return err; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), true); + if (err) + goto err_fid_mc_flood_set; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), true); if (err) @@ -3622,6 +4730,9 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) return 0; err_fid_bc_flood_set: + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); +err_fid_mc_flood_set: mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); return err; } @@ -3633,6 +4744,8 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); } @@ -3704,7 +4817,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) return -EIO; max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); - mlxsw_reg_rgcr_pack(rgcr_pl, true); + mlxsw_reg_rgcr_pack(rgcr_pl, true, true); mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); if (err) @@ -3716,7 +4829,7 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) { char rgcr_pl[MLXSW_REG_RGCR_LEN]; - mlxsw_reg_rgcr_pack(rgcr_pl, false); + mlxsw_reg_rgcr_pack(rgcr_pl, false, false); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 12b5ed58f3eb..61652396bf75 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -61,11 +61,32 @@ enum { MLXSW_TRAP_ID_MTUERROR = 0x52, MLXSW_TRAP_ID_TTLERROR = 0x53, MLXSW_TRAP_ID_LBERROR = 0x54, - MLXSW_TRAP_ID_OSPF = 0x55, + MLXSW_TRAP_ID_IPV4_OSPF = 0x55, MLXSW_TRAP_ID_IP2ME = 0x5F, + MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60, + MLXSW_TRAP_ID_IPV6_LINK_LOCAL_DEST = 0x61, + MLXSW_TRAP_ID_IPV6_LINK_LOCAL_SRC = 0x62, + MLXSW_TRAP_ID_IPV6_ALL_NODES_LINK = 0x63, + MLXSW_TRAP_ID_IPV6_OSPF = 0x64, + MLXSW_TRAP_ID_IPV6_MLDV12_LISTENER_QUERY = 0x65, + MLXSW_TRAP_ID_IPV6_MLDV1_LISTENER_REPORT = 0x66, + MLXSW_TRAP_ID_IPV6_MLDV1_LISTENER_DONE = 0x67, + MLXSW_TRAP_ID_IPV6_MLDV2_LISTENER_REPORT = 0x68, + MLXSW_TRAP_ID_IPV6_DHCP = 0x69, + MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, - MLXSW_TRAP_ID_BGP_IPV4 = 0x88, + MLXSW_TRAP_ID_IPV4_BGP = 0x88, + MLXSW_TRAP_ID_IPV6_BGP = 0x89, + MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A, + MLXSW_TRAP_ID_L3_IPV6_ROUTER_ADVERTISMENT = 0x8B, + MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_SOLICITATION = 0x8C, + MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_ADVERTISMENT = 0x8D, + MLXSW_TRAP_ID_L3_IPV6_REDIRECTION = 0x8E, MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, + MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91, + MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92, + MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, + MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, MLXSW_TRAP_ID_ACL0 = 0x1C0, MLXSW_TRAP_ID_MAX = 0x1FF diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index c0d7d5eec7e7..2e4effa9fe45 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -161,7 +161,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) priv->rx_head = 0; - /* reset the MAC controller TX/RX desciptor base address */ + /* reset the MAC controller TX/RX descriptor base address */ writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS); writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS); } @@ -269,9 +269,8 @@ rx_next: priv->rx_head = rx_head; } - if (rx < budget) { + if (rx < budget) napi_complete_done(napi, rx); - } priv->reg_imr |= RPKT_FINISH_M; writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK); @@ -289,8 +288,8 @@ static int moxart_tx_queue_space(struct net_device *ndev) static void moxart_tx_finished(struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); - unsigned tx_head = priv->tx_head; - unsigned tx_tail = priv->tx_tail; + unsigned int tx_head = priv->tx_head; + unsigned int tx_tail = priv->tx_tail; while (tx_tail != tx_head) { dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail], @@ -312,7 +311,7 @@ static void moxart_tx_finished(struct net_device *ndev) static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) { - struct net_device *ndev = (struct net_device *) dev_id; + struct net_device *ndev = (struct net_device *)dev_id; struct moxart_mac_priv_t *priv = netdev_priv(ndev); unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS); @@ -495,7 +494,7 @@ static int moxart_mac_probe(struct platform_device *pdev) priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, &priv->tx_base, GFP_DMA | GFP_KERNEL); - if (priv->tx_desc_base == NULL) { + if (!priv->tx_desc_base) { ret = -ENOMEM; goto init_fail; } @@ -503,7 +502,7 @@ static int moxart_mac_probe(struct platform_device *pdev) priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM, &priv->rx_base, GFP_DMA | GFP_KERNEL); - if (priv->rx_desc_base == NULL) { + if (!priv->rx_desc_base) { ret = -ENOMEM; goto init_fail; } diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h index 686b8957d5cf..bee608b547d1 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.h +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -55,17 +55,17 @@ #define RX_DESC2_ADDRESS_VIRT 4 #define TX_DESC_NUM 64 -#define TX_DESC_NUM_MASK (TX_DESC_NUM-1) +#define TX_DESC_NUM_MASK (TX_DESC_NUM - 1) #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK)) #define TX_BUF_SIZE 1600 -#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1) +#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK + 1) #define TX_WAKE_THRESHOLD 16 #define RX_DESC_NUM 64 -#define RX_DESC_NUM_MASK (RX_DESC_NUM-1) +#define RX_DESC_NUM_MASK (RX_DESC_NUM - 1) #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM_MASK)) #define RX_BUF_SIZE 1600 -#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK+1) +#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK + 1) #define REG_INTERRUPT_STATUS 0 #define REG_INTERRUPT_MASK 4 diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index fd2ec36c6fa1..462eda926b1c 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -42,8 +42,6 @@ * aggregated as a single large packet * napi: This parameter used to enable/disable NAPI (polling Rx) * Possible values '1' for enable and '0' for disable. Default is '1' - * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO) - * Possible values '1' for enable and '0' for disable. Default is '0' * vlan_tag_strip: This can be used to enable or disable vlan stripping. * Possible values '1' for enable , '0' for disable. * Default is '2' - which means disable in promisc mode @@ -453,7 +451,6 @@ S2IO_PARM_INT(lro_max_pkts, 0xFFFF); S2IO_PARM_INT(indicate_max_pkts, 0); S2IO_PARM_INT(napi, 1); -S2IO_PARM_INT(ufo, 0); S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC); static unsigned int tx_fifo_len[MAX_TX_FIFOS] = @@ -4128,32 +4125,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) } frg_len = skb_headlen(skb); - if (offload_type == SKB_GSO_UDP) { - int ufo_size; - - ufo_size = s2io_udp_mss(skb); - ufo_size &= ~7; - txdp->Control_1 |= TXD_UFO_EN; - txdp->Control_1 |= TXD_UFO_MSS(ufo_size); - txdp->Control_1 |= TXD_BUFFER0_SIZE(8); -#ifdef __BIG_ENDIAN - /* both variants do cpu_to_be64(be32_to_cpu(...)) */ - fifo->ufo_in_band_v[put_off] = - (__force u64)skb_shinfo(skb)->ip6_frag_id; -#else - fifo->ufo_in_band_v[put_off] = - (__force u64)skb_shinfo(skb)->ip6_frag_id << 32; -#endif - txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v; - txdp->Buffer_Pointer = pci_map_single(sp->pdev, - fifo->ufo_in_band_v, - sizeof(u64), - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) - goto pci_map_failed; - txdp++; - } - txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) @@ -4161,8 +4132,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) txdp->Host_Control = (unsigned long)skb; txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); - if (offload_type == SKB_GSO_UDP) - txdp->Control_1 |= TXD_UFO_EN; frg_cnt = skb_shinfo(skb)->nr_frags; /* For fragmented SKB. */ @@ -4177,14 +4146,9 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) skb_frag_size(frag), DMA_TO_DEVICE); txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag)); - if (offload_type == SKB_GSO_UDP) - txdp->Control_1 |= TXD_UFO_EN; } txdp->Control_1 |= TXD_GATHER_CODE_LAST; - if (offload_type == SKB_GSO_UDP) - frg_cnt++; /* as Txd0 was used for inband header */ - tx_fifo = mac_control->tx_FIFO_start[queue]; val64 = fifo->list_info[put_off].list_phy_addr; writeq(val64, &tx_fifo->TxDL_Pointer); @@ -7910,11 +7874,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) NETIF_F_RXCSUM | NETIF_F_LRO; dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - if (sp->device_type & XFRAME_II_DEVICE) { - dev->hw_features |= NETIF_F_UFO; - if (ufo) - dev->features |= NETIF_F_UFO; - } if (sp->high_dma_flag == true) dev->features |= NETIF_F_HIGHDMA; dev->watchdog_timeo = WATCH_DOG_TIMEOUT; @@ -8147,10 +8106,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", dev->name); - if (ufo) - DBG_PRINT(ERR_DBG, - "%s: UDP Fragmentation Offload(UFO) enabled\n", - dev->name); /* Initialize device name */ snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name, sp->product_name); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 8e57fda6b8b5..239dfbe8a0a1 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1238,6 +1238,16 @@ static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); } +static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); +} + +static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); +} + static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -1325,6 +1335,16 @@ static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); } +static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); +} + +static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); +} + static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); @@ -1383,11 +1403,15 @@ static const instr_cb_t instr_cb[256] = { [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, + [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, + [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, + [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, + [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, [BPF_JMP | BPF_EXIT] = goto_out, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index afbdf5fd4e4f..f981f60ec306 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -121,23 +121,21 @@ static void nfp_bpf_vnic_clean(struct nfp_app *app, struct nfp_net *nn) } static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc) + enum tc_setup_type type, void *type_data) { + struct tc_cls_bpf_offload *cls_bpf = type_data; struct nfp_net *nn = netdev_priv(netdev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) - return -EOPNOTSUPP; - if (proto != htons(ETH_P_ALL)) + if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) || + TC_H_MAJ(cls_bpf->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + cls_bpf->common.protocol != htons(ETH_P_ALL) || + cls_bpf->common.chain_index) return -EOPNOTSUPP; - if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) { - if (!nn->dp.bpf_offload_xdp) - return nfp_net_bpf_offload(nn, tc->cls_bpf); - else - return -EBUSY; - } + if (nn->dp.bpf_offload_xdp) + return -EBUSY; - return -EINVAL; + return nfp_net_bpf_offload(nn, cls_bpf); } static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 78d80a364edb..a88bb5bc0082 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -115,14 +115,14 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) /* TC direct action */ if (cls_bpf->exts_integrated) { - if (tc_no_actions(cls_bpf->exts)) + if (!tcf_exts_has_actions(cls_bpf->exts)) return NN_ACT_DIRECT; return -EOPNOTSUPP; } /* TC legacy mode */ - if (!tc_single_action(cls_bpf->exts)) + if (!tcf_exts_has_one_action(cls_bpf->exts)) return -EOPNOTSUPP; tcf_exts_to_list(cls_bpf->exts, &actions); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index d696ba46f70a..5b783a91b115 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -79,28 +79,32 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, const struct bpf_verifier_env *env) { const struct bpf_reg_state *reg0 = &env->cur_state.regs[0]; + u64 imm; if (nfp_prog->act == NN_ACT_XDP) return 0; - if (reg0->type != CONST_IMM) { - pr_info("unsupported exit state: %d, imm: %llx\n", - reg0->type, reg0->imm); + if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off); + pr_info("unsupported exit state: %d, var_off: %s\n", + reg0->type, tn_buf); return -EINVAL; } - if (nfp_prog->act != NN_ACT_DIRECT && - reg0->imm != 0 && (reg0->imm & ~0U) != ~0U) { + imm = reg0->var_off.value; + if (nfp_prog->act != NN_ACT_DIRECT && imm != 0 && (imm & ~0U) != ~0U) { pr_info("unsupported exit state: %d, imm: %llx\n", - reg0->type, reg0->imm); + reg0->type, imm); return -EINVAL; } - if (nfp_prog->act == NN_ACT_DIRECT && reg0->imm <= TC_ACT_REDIRECT && - reg0->imm != TC_ACT_SHOT && reg0->imm != TC_ACT_STOLEN && - reg0->imm != TC_ACT_QUEUED) { + if (nfp_prog->act == NN_ACT_DIRECT && imm <= TC_ACT_REDIRECT && + imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN && + imm != TC_ACT_QUEUED) { pr_info("unsupported exit state: %d, imm: %llx\n", - reg0->type, reg0->imm); + reg0->type, imm); return -EINVAL; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 9e64c048e83f..71e4f4f4e9ba 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -38,8 +38,8 @@ #include <linux/hashtable.h> #include <linux/time64.h> #include <linux/types.h> +#include <net/pkt_cls.h> -struct tc_to_netdev; struct net_device; struct nfp_app; @@ -135,7 +135,7 @@ int nfp_flower_metadata_init(struct nfp_app *app); void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc); + enum tc_setup_type type, void *type_data); int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 4ad10bd5e139..01767c7376d5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -385,16 +385,15 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, } int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc) + enum tc_setup_type type, void *type_data) { - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) - return -EOPNOTSUPP; + struct tc_cls_flower_offload *cls_flower = type_data; - if (!eth_proto_is_802_3(proto)) + if (type != TC_SETUP_CLSFLOWER || + TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + !eth_proto_is_802_3(cls_flower->common.protocol) || + cls_flower->common.chain_index) return -EOPNOTSUPP; - if (tc->type != TC_SETUP_CLSFLOWER) - return -EINVAL; - - return nfp_flower_repr_offload(app, netdev, tc->cls_flower); + return nfp_flower_repr_offload(app, netdev, cls_flower); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 5d714e10d9a9..f34e8778fae2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -42,7 +42,6 @@ struct bpf_prog; struct net_device; struct pci_dev; struct sk_buff; -struct tc_to_netdev; struct sk_buff; struct nfp_app; struct nfp_cpp; @@ -109,7 +108,7 @@ struct nfp_app_type { void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb); int (*setup_tc)(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc); + enum tc_setup_type type, void *type_data); bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, struct bpf_prog *prog); @@ -238,12 +237,11 @@ static inline bool nfp_app_tc_busy(struct nfp_app *app, struct nfp_net *nn) static inline int nfp_app_setup_tc(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, - struct tc_to_netdev *tc) + enum tc_setup_type type, void *type_data) { if (!app || !app->type->setup_tc) return -EOPNOTSUPP; - return app->type->setup_tc(app, netdev, handle, proto, tc); + return app->type->setup_tc(app, netdev, type, type_data); } static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index d67969d3e484..dd769eceb33d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -174,6 +174,21 @@ static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) return nfp_pcie_sriov_enable(pdev, num_vfs); } +static const struct firmware * +nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name) +{ + const struct firmware *fw = NULL; + int err; + + err = request_firmware_direct(&fw, name, &pdev->dev); + nfp_info(pf->cpp, " %s: %s\n", + name, err ? "not found" : "found, loading..."); + if (err) + return NULL; + + return fw; +} + /** * nfp_net_fw_find() - Find the correct firmware image for netdev mode * @pdev: PCI Device structure @@ -184,13 +199,32 @@ static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) static const struct firmware * nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) { - const struct firmware *fw = NULL; struct nfp_eth_table_port *port; + const struct firmware *fw; const char *fw_model; char fw_name[256]; - int spc, err = 0; - int i, j; + const u8 *serial; + u16 interface; + int spc, i, j; + nfp_info(pf->cpp, "Looking for firmware file in order of priority:\n"); + + /* First try to find a firmware image specific for this device */ + interface = nfp_cpp_interface(pf->cpp); + nfp_cpp_serial(pf->cpp, &serial); + sprintf(fw_name, "netronome/serial-%pMF-%02hhx-%02hhx.nffw", + serial, interface >> 8, interface & 0xff); + fw = nfp_net_fw_request(pdev, pf, fw_name); + if (fw) + return fw; + + /* Then try the PCI name */ + sprintf(fw_name, "netronome/pci-%s.nffw", pci_name(pdev)); + fw = nfp_net_fw_request(pdev, pf, fw_name); + if (fw) + return fw; + + /* Finally try the card type and media */ if (!pf->eth_tbl) { dev_err(&pdev->dev, "Error: can't identify media config\n"); return NULL; @@ -223,13 +257,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) if (spc <= 0) return NULL; - err = request_firmware(&fw, fw_name, &pdev->dev); - if (err) - return NULL; - - dev_info(&pdev->dev, "Loading FW image: %s\n", fw_name); - - return fw; + return nfp_net_fw_request(pdev, pf, fw_name); } /** diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 4631ca8b8eb2..4a990033c4d5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2660,6 +2660,7 @@ static int nfp_net_netdev_close(struct net_device *netdev) /* Step 2: Tell NFP */ nfp_net_clear_config_and_disable(nn); + nfp_port_configure(netdev, false); /* Step 3: Free resources */ @@ -2777,16 +2778,21 @@ static int nfp_net_netdev_open(struct net_device *netdev) goto err_free_all; /* Step 2: Configure the NFP + * - Ifup the physical interface if it exists * - Enable rings from 0 to tx_rings/rx_rings - 1. * - Write MAC address (in case it changed) * - Set the MTU * - Set the Freelist buffer size * - Enable the FW */ - err = nfp_net_set_config_and_enable(nn); + err = nfp_port_configure(netdev, true); if (err) goto err_free_all; + err = nfp_net_set_config_and_enable(nn); + if (err) + goto err_port_disable; + /* Step 3: Enable for kernel * - put some freelist descriptors on each RX ring * - enable NAPI on each ring @@ -2797,6 +2803,8 @@ static int nfp_net_netdev_open(struct net_device *netdev) return 0; +err_port_disable: + nfp_port_configure(netdev, false); err_free_all: nfp_net_close_free_all(nn); return err; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 5797dbf2b507..d5e2361f0e86 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -704,7 +704,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (!pf->rtbl) { nfp_err(pf->cpp, "No %s, giving up.\n", pf->fw_loaded ? "symbol table" : "firmware found"); - return -EPROBE_DEFER; + return -EINVAL; } mutex_lock(&pf->lock); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 8ec5474f4b18..47daad30756c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -239,15 +239,34 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) static int nfp_repr_stop(struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); + int err; + + err = nfp_app_repr_stop(repr->app, repr); + if (err) + return err; - return nfp_app_repr_stop(repr->app, repr); + nfp_port_configure(netdev, false); + return 0; } static int nfp_repr_open(struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); + int err; + + err = nfp_port_configure(netdev, true); + if (err) + return err; + + err = nfp_app_repr_open(repr->app, repr); + if (err) + goto err_port_disable; - return nfp_app_repr_open(repr->app, repr); + return 0; + +err_port_disable: + nfp_port_configure(netdev, false); + return err; } const struct net_device_ops nfp_repr_netdev_ops = { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index e42644dbb865..0cf65e57addb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -88,19 +88,16 @@ const struct switchdev_ops nfp_port_switchdev_ops = { .switchdev_port_attr_get = nfp_port_attr_get, }; -int nfp_port_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) { struct nfp_port *port; - if (chain_index) - return -EOPNOTSUPP; - port = nfp_port_from_netdev(netdev); if (!port) return -EOPNOTSUPP; - return nfp_app_setup_tc(port->app, netdev, handle, proto, tc); + return nfp_app_setup_tc(port->app, netdev, type, type_data); } struct nfp_port * @@ -181,6 +178,33 @@ nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len) return 0; } +/** + * nfp_port_configure() - helper to set the interface configured bit + * @netdev: net_device instance + * @configed: Desired state + * + * Helper to set the ifup/ifdown state on the PHY only if there is a physical + * interface associated with the netdev. + * + * Return: + * 0 - configuration successful (or no change); + * -ERRNO - configuration failed. + */ +int nfp_port_configure(struct net_device *netdev, bool configed) +{ + struct nfp_eth_table_port *eth_port; + struct nfp_port *port; + int err; + + port = nfp_port_from_netdev(netdev); + eth_port = __nfp_port_get_eth_port(port); + if (!eth_port) + return 0; + + err = nfp_eth_set_configured(port->app->cpp, eth_port->index, configed); + return err < 0 && err != -EOPNOTSUPP ? err : 0; +} + int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, struct nfp_port *port, unsigned int id) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index a33d22e18f94..c88e376dcf0f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -36,7 +36,6 @@ #include <net/devlink.h> -struct tc_to_netdev; struct net_device; struct nfp_app; struct nfp_pf; @@ -109,8 +108,8 @@ struct nfp_port { extern const struct switchdev_ops nfp_port_switchdev_ops; -int nfp_port_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data); struct nfp_port *nfp_port_from_netdev(struct net_device *netdev); struct nfp_port * @@ -120,6 +119,7 @@ struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port); int nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len); +int nfp_port_configure(struct net_device *netdev, bool configed); struct nfp_port * nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index c2bc36e8649f..f6f7c085f8e0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -391,7 +391,10 @@ int nfp_eth_config_commit_end(struct nfp_nsp *nsp) * Enable or disable PHY module (this usually means setting the TX lanes * disable bits). * - * Return: 0 or -ERRNO. + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. */ int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) { @@ -427,7 +430,10 @@ int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) * * Set the ifup/ifdown state on the PHY. * - * Return: 0 or -ERRNO. + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. */ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed) { @@ -439,6 +445,14 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed) if (IS_ERR(nsp)) return PTR_ERR(nsp); + /* Older ABI versions did support this feature, however this has only + * been reliable since ABI 20. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 20) { + nfp_eth_config_cleanup_end(nsp); + return -EOPNOTSUPP; + } + entries = nfp_nsp_config_entries(nsp); /* Check if we are already in requested state */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 6c87bed13bd2..58a689fb04db 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1684,6 +1684,8 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) "Load request was sent. Load code: 0x%x\n", load_code); + qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); + qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); p_hwfn->first_on_engine = (load_code == @@ -2472,6 +2474,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; + struct qed_mcp_link_capabilities *p_caps; struct qed_mcp_link_params *link; /* Read global nvm_cfg address */ @@ -2534,6 +2537,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* Read default link configuration */ link = &p_hwfn->mcp_info->link_input; + p_caps = &p_hwfn->mcp_info->link_capabilities; port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); link_temp = qed_rd(p_hwfn, p_ptt, @@ -2588,10 +2592,45 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); link->loopback_mode = 0; - DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n", - link->speed.forced_speed, link->speed.advertised_speeds, - link->speed.autoneg, link->pause.autoneg); + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { + link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + + offsetof(struct nvm_cfg1_port, ext_phy)); + link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; + link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; + p_caps->default_eee = QED_MCP_EEE_ENABLED; + link->eee.enable = true; + switch (link_temp) { + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: + p_caps->default_eee = QED_MCP_EEE_DISABLED; + link->eee.enable = false; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: + p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: + p_caps->eee_lpi_timer = + EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: + p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; + break; + } + + link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; + link->eee.tx_lpi_enable = link->eee.enable; + link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV; + } else { + p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; + } + + DP_VERBOSE(p_hwfn, + NETIF_MSG_LINK, + "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", + link->speed.forced_speed, + link->speed.advertised_speeds, + link->speed.autoneg, + link->pause.autoneg, + p_caps->default_eee, p_caps->eee_lpi_timer); /* Read Multi-function information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + @@ -2751,6 +2790,27 @@ static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_hw_info_port_num_ah(p_hwfn, p_ptt); } +static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_link_capabilities *p_caps; + u32 eee_status; + + p_caps = &p_hwfn->mcp_info->link_capabilities; + if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED) + return; + + p_caps->eee_speed_caps = 0; + eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, eee_status)); + eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >> + EEE_SUPPORTED_SPEED_OFFSET; + + if (eee_status & EEE_1G_SUPPORTED) + p_caps->eee_speed_caps |= QED_EEE_1G_ADV; + if (eee_status & EEE_10G_ADV) + p_caps->eee_speed_caps |= QED_EEE_10G_ADV; +} + static int qed_get_hw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -2767,6 +2827,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_hw_info_port_num(p_hwfn, p_ptt); + qed_mcp_get_capabilities(p_hwfn, p_ptt); + qed_hw_get_nvm_info(p_hwfn, p_ptt); rc = qed_int_igu_read_cam(p_hwfn, p_ptt); @@ -2785,6 +2847,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, p_hwfn->mcp_info->func_info.ovlan; qed_mcp_cmd_port_init(p_hwfn, p_ptt); + + qed_get_eee_caps(p_hwfn, p_ptt); } if (qed_mcp_is_init(p_hwfn)) { @@ -3630,7 +3694,7 @@ static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } p_coal_timeset = p_eth_qzone; - memset(p_coal_timeset, 0, eth_qzone_size); + memset(p_eth_qzone, 0, eth_qzone_size); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); @@ -3638,12 +3702,46 @@ static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return 0; } -int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id) +int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle) +{ + struct qed_queue_cid *p_cid = p_handle; + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc = 0; + + p_hwfn = p_cid->p_owner; + + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + if (rx_coal) { + rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc) + goto out; + p_hwfn->cdev->rx_coalesce_usecs = rx_coal; + } + + if (tx_coal) { + rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); + if (rc) + goto out; + p_hwfn->cdev->tx_coalesce_usecs = tx_coal; + } +out: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid) { struct ustorm_eth_queue_zone eth_qzone; u8 timeset, timer_res; - u16 fw_qid = 0; u32 address; int rc; @@ -3660,32 +3758,29 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } timeset = (u8)(coalesce >> timer_res); - rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); - if (rc) - return rc; - - rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false); + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, + p_cid->sb_igu_id, false); if (rc) goto out; - address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + address = BAR0_MAP_REG_USDM_RAM + + USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct ustorm_eth_queue_zone), timeset); if (rc) goto out; - p_hwfn->cdev->rx_coalesce_usecs = coalesce; out: return rc; } -int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id) +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid) { struct xstorm_eth_queue_zone eth_qzone; u8 timeset, timer_res; - u16 fw_qid = 0; u32 address; int rc; @@ -3702,22 +3797,16 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } timeset = (u8)(coalesce >> timer_res); - rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); - if (rc) - return rc; - - rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true); + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, + p_cid->sb_igu_id, true); if (rc) goto out; - address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + address = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct xstorm_eth_queue_zone), timeset); - if (rc) - goto out; - - p_hwfn->cdev->tx_coalesce_usecs = coalesce; out: return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 1f1df1bf127c..defdda1ffaa2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -443,38 +443,35 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); /** - * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue - * The fact that we can configure coalescing to up to 511, but on varying - * accuracy [the bigger the value the less accurate] up to a mistake of 3usec - * for the highest values. + * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue. * * @param p_hwfn - * @param p_ptt - * @param coalesce - Coalesce value in micro seconds. - * @param qid - Queue index. - * @param qid - SB Id + * @param p_coal - store coalesce value read from the hardware. + * @param p_handle * * @return int - */ -int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id); + **/ +int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); /** - * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue - * While the API allows setting coalescing per-qid, all tx queues sharing a - * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] - * otherwise configuration would break. + * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and + * Tx queue. The fact that we can configure coalescing to up to 511, but on + * varying accuracy [the bigger the value the less accurate] up to a mistake + * of 3usec for the highest values. + * While the API allows setting coalescing per-qid, all queues sharing a SB + * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] + * otherwise configuration would break. * - * @param p_hwfn - * @param p_ptt - * @param coalesce - Coalesce value in micro seconds. - * @param qid - Queue index. - * @param qid - SB Id + * + * @param rx_coal - Rx Coalesce value in micro seconds. + * @param tx_coal - TX Coalesce value in micro seconds. + * @param p_handle * * @return int - */ -int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id); + **/ +int +qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); + const char *qed_hw_get_resc_name(enum qed_resources res_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 31fb0bffa098..3427fe7049b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -10825,6 +10825,17 @@ struct eth_phy_cfg { #define ETH_LOOPBACK_EXT (3) #define ETH_LOOPBACK_MAC (4) + u32 eee_cfg; +#define EEE_CFG_EEE_ENABLED BIT(0) +#define EEE_CFG_TX_LPI BIT(1) +#define EEE_CFG_ADV_SPEED_1G BIT(2) +#define EEE_CFG_ADV_SPEED_10G BIT(3) +#define EEE_TX_TIMER_USEC_MASK (0xfffffff0) +#define EEE_TX_TIMER_USEC_OFFSET 4 +#define EEE_TX_TIMER_USEC_BALANCED_TIME (0xa00) +#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME (0x100) +#define EEE_TX_TIMER_USEC_LATENCY_TIME (0x6000) + u32 feature_config_flags; #define ETH_EEE_MODE_ADV_LPI (1 << 0) }; @@ -11242,6 +11253,25 @@ struct public_port { u32 wol_pkt_len; u32 wol_pkt_details; struct dcb_dscp_map dcb_dscp_map; + + u32 eee_status; +#define EEE_ACTIVE_BIT BIT(0) +#define EEE_LD_ADV_STATUS_MASK 0x000000f0 +#define EEE_LD_ADV_STATUS_OFFSET 4 +#define EEE_1G_ADV BIT(1) +#define EEE_10G_ADV BIT(2) +#define EEE_LP_ADV_STATUS_MASK 0x00000f00 +#define EEE_LP_ADV_STATUS_OFFSET 8 +#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 +#define EEE_SUPPORTED_SPEED_OFFSET 12 +#define EEE_1G_SUPPORTED BIT(1) +#define EEE_10G_SUPPORTED BIT(2) + + u32 eee_remote; +#define EEE_REMOTE_TW_TX_MASK 0x0000ffff +#define EEE_REMOTE_TW_TX_OFFSET 0 +#define EEE_REMOTE_TW_RX_MASK 0xffff0000 +#define EEE_REMOTE_TW_RX_OFFSET 16 }; struct public_func { @@ -11570,6 +11600,9 @@ struct public_drv_mb { #define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000 #define DRV_MSG_CODE_OS_WOL 0x002e0000 +#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000 +#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000 + #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 drv_mb_param; @@ -11653,6 +11686,10 @@ struct public_drv_mb { #define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 #define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_UNSUPPORTED 0x00000000 @@ -11696,6 +11733,9 @@ struct public_drv_mb { #define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 #define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 +/* get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 + #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) u32 drv_pulse_mb; @@ -11891,7 +11931,16 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 u32 phy_cfg; u32 mgmt_traffic; + u32 ext_phy; + /* EEE power saving mode */ +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 + u32 mba_cfg1; u32 mba_cfg2; u32 vf_cfg; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 0ba5ec8a9814..9a1645852015 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2047,6 +2047,106 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return qed_spq_post(p_hwfn, p_ent, NULL); } +int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_rx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + int rc; + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); + + address = BAR0_MAP_REG_USDM_RAM + + USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + coalesce = qed_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return -EINVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_rx_coal = (u16)(coalesce << timer_res); + + return 0; +} + +int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_tx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + int rc; + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); + + address = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + coalesce = qed_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return -EINVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_tx_coal = (u16)(coalesce << timer_res); + + return 0; +} + +int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle) +{ + struct qed_queue_cid *p_cid = handle; + struct qed_ptt *p_ptt; + int rc = 0; + + if (IS_VF(p_hwfn->cdev)) { + rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); + if (rc) + DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); + + return rc; + } + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + if (p_cid->b_is_rx) { + rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc) + goto out; + } else { + rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc) + goto out; + } + +out: + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + static int qed_fill_eth_dev_info(struct qed_dev *cdev, struct qed_dev_eth_info *info) { @@ -2696,6 +2796,20 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, return rc; } +static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) +{ + struct qed_queue_cid *p_cid = handle; + struct qed_hwfn *p_hwfn; + int rc; + + p_hwfn = p_cid->p_owner; + rc = qed_get_queue_coalesce(p_hwfn, coal, handle); + if (rc) + DP_NOTICE(p_hwfn, "Unable to read queue calescing\n"); + + return rc; +} + static int qed_fp_cqe_completion(struct qed_dev *dev, u8 rss_id, struct eth_slow_path_rx_cqe *cqe) { @@ -2739,6 +2853,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .tunn_config = &qed_tunn_configure, .ntuple_filter_config = &qed_ntuple_arfs_filter_config, .configure_arfs_searcher = &qed_configure_arfs_searcher, + .get_coalesce = &qed_get_coalesce, }; const struct qed_eth_ops *qed_get_eth_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index f8f09aadced7..cc1f248551c9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -400,4 +400,20 @@ qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, u8 qed_mcast_bin_from_mac(u8 *mac); -#endif /* _QED_L2_H */ +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid); + +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid); + +int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_hw_coal); + +int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_hw_coal); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index b11399606990..27832885a87f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -954,9 +954,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, struct qed_tunnel_info tunn_info; const u8 *data = NULL; struct qed_hwfn *hwfn; -#ifdef CONFIG_RFS_ACCEL struct qed_ptt *p_ptt; -#endif int rc = -EINVAL; if (qed_iov_wq_start(cdev)) @@ -972,7 +970,6 @@ static int qed_slowpath_start(struct qed_dev *cdev, goto err; } -#ifdef CONFIG_RFS_ACCEL if (cdev->num_hwfns == 1) { p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); if (p_ptt) { @@ -983,7 +980,6 @@ static int qed_slowpath_start(struct qed_dev *cdev, goto err; } } -#endif } cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; @@ -1091,12 +1087,10 @@ err: if (IS_PF(cdev)) release_firmware(cdev->firmware); -#ifdef CONFIG_RFS_ACCEL if (IS_PF(cdev) && (cdev->num_hwfns == 1) && QED_LEADING_HWFN(cdev)->p_arfs_ptt) qed_ptt_release(QED_LEADING_HWFN(cdev), QED_LEADING_HWFN(cdev)->p_arfs_ptt); -#endif qed_iov_wq_stop(cdev, false); @@ -1111,11 +1105,9 @@ static int qed_slowpath_stop(struct qed_dev *cdev) qed_ll2_dealloc_if(cdev); if (IS_PF(cdev)) { -#ifdef CONFIG_RFS_ACCEL if (cdev->num_hwfns == 1) qed_ptt_release(QED_LEADING_HWFN(cdev), QED_LEADING_HWFN(cdev)->p_arfs_ptt); -#endif qed_free_stream_mem(cdev); if (IS_QED_ETH_IF(cdev)) qed_sriov_disable(cdev, true); @@ -1305,6 +1297,10 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) } } + if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) + memcpy(&link_params->eee, ¶ms->eee, + sizeof(link_params->eee)); + rc = qed_mcp_set_link(hwfn, ptt, params->link_up); qed_ptt_release(hwfn, ptt); @@ -1491,6 +1487,21 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) if_link->lp_caps |= QED_LM_Asym_Pause_BIT; + + if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { + if_link->eee_supported = false; + } else { + if_link->eee_supported = true; + if_link->eee_active = link.eee_active; + if_link->sup_caps = link_caps.eee_speed_caps; + /* MFW clears adv_caps on eee disable; use configured value */ + if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : + params.eee.adv_caps; + if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; + if_link->eee.enable = params.eee.enable; + if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; + if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; + } } static void qed_get_current_link(struct qed_dev *cdev, @@ -1557,36 +1568,10 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, return rc; } -static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) -{ - *rx_coal = cdev->rx_coalesce_usecs; - *tx_coal = cdev->tx_coalesce_usecs; -} - static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, - u16 qid, u16 sb_id) + void *handle) { - struct qed_hwfn *hwfn; - struct qed_ptt *ptt; - int hwfn_index; - int status = 0; - - hwfn_index = qid % cdev->num_hwfns; - hwfn = &cdev->hwfns[hwfn_index]; - ptt = qed_ptt_acquire(hwfn); - if (!ptt) - return -EAGAIN; - - status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal, - qid / cdev->num_hwfns, sb_id); - if (status) - goto out; - status = qed_set_txq_coalesce(hwfn, ptt, tx_coal, - qid / cdev->num_hwfns, sb_id); -out: - qed_ptt_release(hwfn, ptt); - - return status; + return qed_set_queue_coalesce(rx_coal, tx_coal, handle); } static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) @@ -1735,7 +1720,6 @@ const struct qed_common_ops qed_common_ops_pass = { .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, .nvm_get_image = &qed_nvm_get_image, - .get_coalesce = &qed_get_coalesce, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, .update_drv_state = &qed_update_drv_state, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 3eb241657368..376485d99357 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1097,6 +1097,31 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); } +static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link) +{ + u32 eee_status, val; + + p_link->eee_adv_caps = 0; + p_link->eee_lp_adv_caps = 0; + eee_status = qed_rd(p_hwfn, + p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, eee_status)); + p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); + val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; + if (val & EEE_1G_ADV) + p_link->eee_adv_caps |= QED_EEE_1G_ADV; + if (val & EEE_10G_ADV) + p_link->eee_adv_caps |= QED_EEE_10G_ADV; + val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; + if (val & EEE_1G_ADV) + p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV; + if (val & EEE_10G_ADV) + p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV; +} + static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_reset) { @@ -1228,6 +1253,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) + qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); + qed_link_update(p_hwfn); out: spin_unlock_bh(&p_hwfn->mcp_info->link_lock); @@ -1251,6 +1279,19 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; phy_cfg.adv_speed = params->speed.advertised_speeds; phy_cfg.loopback_mode = params->loopback_mode; + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { + if (params->eee.enable) + phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; + if (params->eee.tx_lpi_enable) + phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; + if (params->eee.adv_caps & QED_EEE_1G_ADV) + phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; + if (params->eee.adv_caps & QED_EEE_10G_ADV) + phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; + phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << + EEE_TX_TIMER_USEC_OFFSET) & + EEE_TX_TIMER_USEC_MASK; + } p_hwfn->b_drv_link_init = b_up; @@ -2822,3 +2863,28 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, p_unlock->resource = resource; } } + +int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 mcp_resp; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, + 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); + if (!rc) + DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE), + "MFW supported features: %08x\n", + p_hwfn->mcp_info->capabilities); + + return rc; +} + +int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 mcp_resp, mcp_param, features; + + features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE; + + return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, + features, &mcp_resp, &mcp_param); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index af03b3651411..c7ec2395d1ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -53,15 +53,25 @@ struct qed_mcp_link_pause_params { bool forced_tx; }; +enum qed_mcp_eee_mode { + QED_MCP_EEE_DISABLED, + QED_MCP_EEE_ENABLED, + QED_MCP_EEE_UNSUPPORTED +}; + struct qed_mcp_link_params { - struct qed_mcp_link_speed_params speed; - struct qed_mcp_link_pause_params pause; - u32 loopback_mode; + struct qed_mcp_link_speed_params speed; + struct qed_mcp_link_pause_params pause; + u32 loopback_mode; + struct qed_link_eee_params eee; }; struct qed_mcp_link_capabilities { u32 speed_capabilities; bool default_speed_autoneg; + enum qed_mcp_eee_mode default_eee; + u32 eee_lpi_timer; + u8 eee_speed_caps; }; struct qed_mcp_link_state { @@ -102,6 +112,9 @@ struct qed_mcp_link_state { u8 partner_adv_pause; bool sfp_tx_fault; + bool eee_active; + u8 eee_adv_caps; + u8 eee_lp_adv_caps; }; struct qed_mcp_function_info { @@ -546,6 +559,9 @@ struct qed_mcp_info { u8 *mfw_mb_shadow; u16 mfw_mb_length; u32 mcp_hist; + + /* Capabilties negotiated with the MFW */ + u32 capabilities; }; struct qed_mcp_mb_params { @@ -925,5 +941,20 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, enum qed_resc_lock resource, bool b_is_permanent); +/** + * @brief Learn of supported MFW features; To be done during early init + * + * @param p_hwfn + * @param p_ptt + */ +int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief Inform MFW of set of features supported by driver. Should be done + * inside the content of the LOAD_REQ. + * + * @param p_hwfn + * @param p_ptt + */ +int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 2cfd3bd9a031..3f40b1de7957 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3400,6 +3400,157 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, length, status); } +static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_read_coal_resp_tlv *p_resp; + struct vfpf_read_coal_req_tlv *req; + u8 status = PFVF_STATUS_FAILURE; + struct qed_vf_queue *p_queue; + struct qed_queue_cid *p_cid; + u16 coal = 0, qid, i; + bool b_is_rx; + int rc = 0; + + mbx->offset = (u8 *)mbx->reply_virt; + req = &mbx->req_virt->read_coal_req; + + qid = req->qid; + b_is_rx = req->is_rx ? true : false; + + if (b_is_rx) { + if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid, + QED_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Rx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + + p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); + rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal); + if (rc) + goto send_resp; + } else { + if (!qed_iov_validate_txq(p_hwfn, p_vf, qid, + QED_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Tx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + p_queue = &p_vf->vf_queues[qid]; + if ((!p_queue->cids[i].p_cid) || + (!p_queue->cids[i].b_is_tx)) + continue; + + p_cid = p_queue->cids[i].p_cid; + + rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal); + if (rc) + goto send_resp; + break; + } + } + + status = PFVF_STATUS_SUCCESS; + +send_resp: + p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ, + sizeof(*p_resp)); + p_resp->coal = coal; + + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); +} + +static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_update_coalesce *req; + u8 status = PFVF_STATUS_FAILURE; + struct qed_queue_cid *p_cid; + u16 rx_coal, tx_coal; + int rc = 0, i; + u16 qid; + + req = &mbx->req_virt->update_coalesce; + + rx_coal = req->rx_coal; + tx_coal = req->tx_coal; + qid = req->qid; + + if (!qed_iov_validate_rxq(p_hwfn, vf, qid, + QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Rx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + if (!qed_iov_validate_txq(p_hwfn, vf, qid, + QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Tx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", + vf->abs_vf_id, rx_coal, tx_coal, qid); + + if (rx_coal) { + p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); + + rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Unable to set rx queue = %d coalesce\n", + vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); + goto out; + } + vf->rx_coal = rx_coal; + } + + if (tx_coal) { + struct qed_vf_queue *p_queue = &vf->vf_queues[qid]; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + if (!p_queue->cids[i].p_cid) + continue; + + if (!p_queue->cids[i].b_is_tx) + continue; + + rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, + p_queue->cids[i].p_cid); + + if (rc) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Unable to set tx queue coalesce\n", + vf->abs_vf_id); + goto out; + } + } + vf->tx_coal = tx_coal; + } + + status = PFVF_STATUS_SUCCESS; +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, + sizeof(struct pfvf_def_resp_tlv), status); +} static int qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) @@ -3725,6 +3876,12 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_UPDATE_TUNN_PARAM: qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_COALESCE_UPDATE: + qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_COALESCE_READ: + qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); + break; } } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index c2e44bce398c..3955929ba892 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -217,6 +217,9 @@ struct qed_vf_info { u8 num_rxqs; u8 num_txqs; + u16 rx_coal; + u16 tx_coal; + u8 num_sbs; u8 num_mac_filters; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 1926d1ed439f..91b5e9f02a62 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1343,6 +1343,81 @@ exit: return rc; } +int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + u16 *p_coal, struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_read_coal_resp_tlv *resp; + struct vfpf_read_coal_req_tlv *req; + int rc; + + /* clear mailbox and prep header tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req)); + req->qid = p_cid->rel.queue_id; + req->is_rx = p_cid->b_is_rx ? 1 : 0; + + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + resp = &p_iov->pf2vf_reply->read_coal_resp; + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + goto exit; + + *p_coal = resp->coal; +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +int +qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_update_coalesce *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep header tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, sizeof(*req)); + + req->rx_coal = rx_coal; + req->tx_coal = tx_coal; + req->qid = p_cid->rel.queue_id; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n", + rx_coal, tx_coal, req->qid); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + goto exit; + + if (rx_coal) + p_hwfn->cdev->rx_coalesce_usecs = rx_coal; + + if (tx_coal) + p_hwfn->cdev->tx_coalesce_usecs = tx_coal; + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + return rc; +} + u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 34d9b882a780..97d44dfb38ca 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -497,6 +497,27 @@ struct tlv_buffer_size { u8 tlv_buffer[TLV_BUFFER_SIZE]; }; +struct vfpf_update_coalesce { + struct vfpf_first_tlv first_tlv; + u16 rx_coal; + u16 tx_coal; + u16 qid; + u8 padding[2]; +}; + +struct vfpf_read_coal_req_tlv { + struct vfpf_first_tlv first_tlv; + u16 qid; + u8 is_rx; + u8 padding[5]; +}; + +struct pfvf_read_coal_resp_tlv { + struct pfvf_tlv hdr; + u16 coal; + u8 padding[6]; +}; + union vfpf_tlvs { struct vfpf_first_tlv first_tlv; struct vfpf_acquire_tlv acquire; @@ -509,7 +530,8 @@ union vfpf_tlvs { struct vfpf_vport_update_tlv vport_update; struct vfpf_ucast_filter_tlv ucast_filter; struct vfpf_update_tunn_param_tlv tunn_param_update; - struct channel_list_end_tlv list_end; + struct vfpf_update_coalesce update_coalesce; + struct vfpf_read_coal_req_tlv read_coal_req; struct tlv_buffer_size tlv_buf_size; }; @@ -519,6 +541,7 @@ union pfvf_tlvs { struct tlv_buffer_size tlv_buf_size; struct pfvf_start_queue_resp_tlv queue_start; struct pfvf_update_tunn_param_tlv tunn_param_resp; + struct pfvf_read_coal_resp_tlv read_coal_resp; }; enum qed_bulletin_bit { @@ -624,8 +647,9 @@ enum { CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_UPDATE_TUNN_PARAM, - CHANNEL_TLV_RESERVED, + CHANNEL_TLV_COALESCE_UPDATE, CHANNEL_TLV_QID, + CHANNEL_TLV_COALESCE_READ, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. @@ -677,6 +701,31 @@ struct qed_vf_iov { bool b_doorbell_bar; }; +/** + * @brief VF - Set Rx/Tx coalesce per VF's relative queue. + * Coalesce value '0' will omit the configuration. + * + * @param p_hwfn + * @param rx_coal - coalesce value in micro second for rx queue + * @param tx_coal - coalesce value in micro second for tx queue + * @param p_cid - queue cid + * + **/ +int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + u16 rx_coal, + u16 tx_coal, struct qed_queue_cid *p_cid); + +/** + * @brief VF - Get coalesce per VF's relative queue. + * + * @param p_hwfn + * @param p_coal - coalesce value in micro second for VF queues. + * @param p_cid - queue cid + * + **/ +int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + u16 *p_coal, struct qed_queue_cid *p_cid); + #ifdef CONFIG_QED_SRIOV /** * @brief Read the VF bulletin and act on it if needed diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 4dfb238221f9..adb700512baa 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -160,6 +160,8 @@ struct qede_rdma_dev { struct qede_ptp; +#define QEDE_RFS_MAX_FLTR 256 + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -241,9 +243,7 @@ struct qede_dev { u16 vxlan_dst_port; u16 geneve_dst_port; -#ifdef CONFIG_RFS_ACCEL struct qede_arfs *arfs; -#endif bool wol_enabled; struct qede_rdma_dev rdma_info; @@ -447,16 +447,21 @@ struct qede_fastpath { #ifdef CONFIG_RFS_ACCEL int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); +#define QEDE_SP_ARFS_CONFIG 4 +#define QEDE_SP_TASK_POLL_DELAY (5 * HZ) +#endif + void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr); void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev); void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc); void qede_free_arfs(struct qede_dev *edev); int qede_alloc_arfs(struct qede_dev *edev); - -#define QEDE_SP_ARFS_CONFIG 4 -#define QEDE_SP_TASK_POLL_DELAY (5 * HZ) -#define QEDE_RFS_MAX_FLTR 256 -#endif +int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); +int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); +int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd); +int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, + u32 *rule_locs); +int qede_get_arfs_filter_count(struct qede_dev *edev); struct qede_reload_args { void (*func)(struct qede_dev *edev, struct qede_reload_args *args); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 6a03d3e66cff..dae741270022 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -702,24 +702,62 @@ static u32 qede_get_link(struct net_device *dev) static int qede_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { + void *rx_handle = NULL, *tx_handle = NULL; struct qede_dev *edev = netdev_priv(dev); - u16 rxc, txc; + u16 rx_coal, tx_coal, i, rc = 0; + struct qede_fastpath *fp; + + rx_coal = QED_DEFAULT_RX_USECS; + tx_coal = QED_DEFAULT_TX_USECS; memset(coal, 0, sizeof(struct ethtool_coalesce)); - edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc); - coal->rx_coalesce_usecs = rxc; - coal->tx_coalesce_usecs = txc; + __qede_lock(edev); + if (edev->state == QEDE_STATE_OPEN) { + for_each_queue(i) { + fp = &edev->fp_array[i]; - return 0; + if (fp->type & QEDE_FASTPATH_RX) { + rx_handle = fp->rxq->handle; + break; + } + } + + rc = edev->ops->get_coalesce(edev->cdev, &rx_coal, rx_handle); + if (rc) { + DP_INFO(edev, "Read Rx coalesce error\n"); + goto out; + } + + for_each_queue(i) { + fp = &edev->fp_array[i]; + if (fp->type & QEDE_FASTPATH_TX) { + tx_handle = fp->txq->handle; + break; + } + } + + rc = edev->ops->get_coalesce(edev->cdev, &tx_coal, tx_handle); + if (rc) + DP_INFO(edev, "Read Tx coalesce error\n"); + } + +out: + __qede_unlock(edev); + + coal->rx_coalesce_usecs = rx_coal; + coal->tx_coalesce_usecs = tx_coal; + + return rc; } static int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { struct qede_dev *edev = netdev_priv(dev); + struct qede_fastpath *fp; int i, rc = 0; - u16 rxc, txc, sb_id; + u16 rxc, txc; if (!netif_running(dev)) { DP_INFO(edev, "Interface is down\n"); @@ -730,21 +768,36 @@ static int qede_set_coalesce(struct net_device *dev, coal->tx_coalesce_usecs > QED_COALESCE_MAX) { DP_INFO(edev, "Can't support requested %s coalesce value [max supported value %d]\n", - coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" - : "tx", - QED_COALESCE_MAX); + coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" : + "tx", QED_COALESCE_MAX); return -EINVAL; } rxc = (u16)coal->rx_coalesce_usecs; txc = (u16)coal->tx_coalesce_usecs; for_each_queue(i) { - sb_id = edev->fp_array[i].sb_info->igu_sb_id; - rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, - (u16)i, sb_id); - if (rc) { - DP_INFO(edev, "Set coalesce error, rc = %d\n", rc); - return rc; + fp = &edev->fp_array[i]; + + if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { + rc = edev->ops->common->set_coalesce(edev->cdev, + rxc, 0, + fp->rxq->handle); + if (rc) { + DP_INFO(edev, + "Set RX coalesce error, rc = %d\n", rc); + return rc; + } + } + + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + rc = edev->ops->common->set_coalesce(edev->cdev, + 0, txc, + fp->txq->handle); + if (rc) { + DP_INFO(edev, + "Set TX coalesce error, rc = %d\n", rc); + return rc; + } } } @@ -1045,20 +1098,34 @@ static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) } static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rules __always_unused) + u32 *rule_locs) { struct qede_dev *edev = netdev_priv(dev); + int rc = 0; switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = QEDE_RSS_COUNT(edev); - return 0; + break; case ETHTOOL_GRXFH: - return qede_get_rss_flags(edev, info); + rc = qede_get_rss_flags(edev, info); + break; + case ETHTOOL_GRXCLSRLCNT: + info->rule_cnt = qede_get_arfs_filter_count(edev); + info->data = QEDE_RFS_MAX_FLTR; + break; + case ETHTOOL_GRXCLSRULE: + rc = qede_get_cls_rule_entry(edev, info); + break; + case ETHTOOL_GRXCLSRLALL: + rc = qede_get_cls_rule_all(edev, info, rule_locs); + break; default: DP_ERR(edev, "Command parameters not supported\n"); - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; } + + return rc; } static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) @@ -1168,14 +1235,24 @@ static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) { struct qede_dev *edev = netdev_priv(dev); + int rc; switch (info->cmd) { case ETHTOOL_SRXFH: - return qede_set_rss_flags(edev, info); + rc = qede_set_rss_flags(edev, info); + break; + case ETHTOOL_SRXCLSRLINS: + rc = qede_add_cls_rule(edev, info); + break; + case ETHTOOL_SRXCLSRLDEL: + rc = qede_del_cls_rule(edev, info); + break; default: DP_INFO(edev, "Command parameters not supported\n"); - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; } + + return rc; } static u32 qede_get_rxfh_indir_size(struct net_device *dev) @@ -1607,6 +1684,87 @@ static int qede_get_tunable(struct net_device *dev, return 0; } +static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + if (!current_link.eee_supported) { + DP_INFO(edev, "EEE is not supported\n"); + return -EOPNOTSUPP; + } + + if (current_link.eee.adv_caps & QED_EEE_1G_ADV) + edata->advertised = ADVERTISED_1000baseT_Full; + if (current_link.eee.adv_caps & QED_EEE_10G_ADV) + edata->advertised |= ADVERTISED_10000baseT_Full; + if (current_link.sup_caps & QED_EEE_1G_ADV) + edata->supported = ADVERTISED_1000baseT_Full; + if (current_link.sup_caps & QED_EEE_10G_ADV) + edata->supported |= ADVERTISED_10000baseT_Full; + if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV) + edata->lp_advertised = ADVERTISED_1000baseT_Full; + if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV) + edata->lp_advertised |= ADVERTISED_10000baseT_Full; + + edata->tx_lpi_timer = current_link.eee.tx_lpi_timer; + edata->eee_enabled = current_link.eee.enable; + edata->tx_lpi_enabled = current_link.eee.tx_lpi_enable; + edata->eee_active = current_link.eee_active; + + return 0; +} + +static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + struct qed_link_params params; + + if (!edev->ops->common->can_link_change(edev->cdev)) { + DP_INFO(edev, "Link settings are not allowed to be changed\n"); + return -EOPNOTSUPP; + } + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + if (!current_link.eee_supported) { + DP_INFO(edev, "EEE is not supported\n"); + return -EOPNOTSUPP; + } + + memset(¶ms, 0, sizeof(params)); + params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG; + + if (!(edata->advertised & (ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full)) || + ((edata->advertised & (ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full)) != + edata->advertised)) { + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Invalid advertised capabilities %d\n", + edata->advertised); + return -EINVAL; + } + + if (edata->advertised & ADVERTISED_1000baseT_Full) + params.eee.adv_caps = QED_EEE_1G_ADV; + if (edata->advertised & ADVERTISED_10000baseT_Full) + params.eee.adv_caps |= QED_EEE_10G_ADV; + params.eee.enable = edata->eee_enabled; + params.eee.tx_lpi_enable = edata->tx_lpi_enabled; + params.eee.tx_lpi_timer = edata->tx_lpi_timer; + + params.link_up = true; + edev->ops->common->set_link(edev->cdev, ¶ms); + + return 0; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_link_ksettings = qede_get_link_ksettings, .set_link_ksettings = qede_set_link_ksettings, @@ -1640,6 +1798,9 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, + .get_eee = qede_get_eee, + .set_eee = qede_set_eee, + .get_tunable = qede_get_tunable, .set_tunable = qede_set_tunable, }; @@ -1650,6 +1811,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = { .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, .get_link = qede_get_link, + .get_coalesce = qede_get_coalesce, + .set_coalesce = qede_set_coalesce, .get_ringparam = qede_get_ringparam, .set_ringparam = qede_set_ringparam, .get_strings = qede_get_strings, diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index f939db5bac5f..f79e36e4060a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -38,7 +38,6 @@ #include <linux/qed/qed_if.h> #include "qede.h" -#ifdef CONFIG_RFS_ACCEL struct qede_arfs_tuple { union { __be32 src_ipv4; @@ -76,10 +75,12 @@ struct qede_arfs_fltr_node { u16 next_rxq_id; bool filter_op; bool used; + u8 fw_rc; struct hlist_node node; }; struct qede_arfs { +#define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx]) #define QEDE_ARFS_POLL_COUNT 100 #define QEDE_RFS_FLW_BITSHIFT (4) #define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1) @@ -121,11 +122,56 @@ qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) kfree(fltr); } +static int +qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr, + u16 bucket_idx) +{ + fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data, + fltr->buf_len, DMA_TO_DEVICE); + if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) { + DP_NOTICE(edev, "Failed to map DMA memory for rule\n"); + qede_free_arfs_filter(edev, fltr); + return -ENOMEM; + } + + INIT_HLIST_NODE(&fltr->node); + hlist_add_head(&fltr->node, + QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx)); + edev->arfs->filter_count++; + + if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { + edev->ops->configure_arfs_searcher(edev->cdev, true); + edev->arfs->enable = true; + } + + return 0; +} + +static void +qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr) +{ + hlist_del(&fltr->node); + dma_unmap_single(&edev->pdev->dev, fltr->mapping, + fltr->buf_len, DMA_TO_DEVICE); + + qede_free_arfs_filter(edev, fltr); + edev->arfs->filter_count--; + + if (!edev->arfs->filter_count && edev->arfs->enable) { + edev->arfs->enable = false; + edev->ops->configure_arfs_searcher(edev->cdev, false); + } +} + void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc) { struct qede_arfs_fltr_node *fltr = filter; struct qede_dev *edev = dev; + fltr->fw_rc = fw_rc; + if (fw_rc) { DP_NOTICE(edev, "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", @@ -185,18 +231,17 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) && !fltr->used) || free_fltr) { - hlist_del(&fltr->node); - dma_unmap_single(&edev->pdev->dev, - fltr->mapping, - fltr->buf_len, DMA_TO_DEVICE); - qede_free_arfs_filter(edev, fltr); - edev->arfs->filter_count--; + qede_dequeue_fltr_and_config_searcher(edev, + fltr); } else { - if ((rps_may_expire_flow(edev->ndev, - fltr->rxq_id, - fltr->flow_id, - fltr->sw_id) || del) && - !free_fltr) + bool flow_exp = false; +#ifdef CONFIG_RFS_ACCEL + flow_exp = rps_may_expire_flow(edev->ndev, + fltr->rxq_id, + fltr->flow_id, + fltr->sw_id); +#endif + if ((flow_exp || del) && !free_fltr) qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false); @@ -213,10 +258,12 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) edev->arfs->enable = false; edev->ops->configure_arfs_searcher(edev->cdev, false); } +#ifdef CONFIG_RFS_ACCEL } else { set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, QEDE_SP_TASK_POLL_DELAY); +#endif } spin_unlock_bh(&edev->arfs->arfs_list_lock); @@ -258,25 +305,26 @@ int qede_alloc_arfs(struct qede_dev *edev) spin_lock_init(&edev->arfs->arfs_list_lock); for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) - INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]); + INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i)); - edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev)); - if (!edev->ndev->rx_cpu_rmap) { + edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) * + sizeof(long)); + if (!edev->arfs->arfs_fltr_bmap) { vfree(edev->arfs); edev->arfs = NULL; return -ENOMEM; } - edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) * - sizeof(long)); - if (!edev->arfs->arfs_fltr_bmap) { - free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); - edev->ndev->rx_cpu_rmap = NULL; +#ifdef CONFIG_RFS_ACCEL + edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev)); + if (!edev->ndev->rx_cpu_rmap) { + vfree(edev->arfs->arfs_fltr_bmap); + edev->arfs->arfs_fltr_bmap = NULL; vfree(edev->arfs); edev->arfs = NULL; return -ENOMEM; } - +#endif return 0; } @@ -285,16 +333,19 @@ void qede_free_arfs(struct qede_dev *edev) if (!edev->arfs) return; +#ifdef CONFIG_RFS_ACCEL if (edev->ndev->rx_cpu_rmap) free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); edev->ndev->rx_cpu_rmap = NULL; +#endif vfree(edev->arfs->arfs_fltr_bmap); edev->arfs->arfs_fltr_bmap = NULL; vfree(edev->arfs); edev->arfs = NULL; } +#ifdef CONFIG_RFS_ACCEL static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos, const struct sk_buff *skb) { @@ -394,9 +445,8 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, spin_lock_bh(&edev->arfs->arfs_list_lock); - n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx], + n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx), skb, ports[0], ports[1], ip_proto); - if (n) { /* Filter match */ n->next_rxq_id = rxq_index; @@ -448,23 +498,9 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, n->tuple.ip_proto = ip_proto; memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb)); - n->mapping = dma_map_single(&edev->pdev->dev, n->data, - n->buf_len, DMA_TO_DEVICE); - if (dma_mapping_error(&edev->pdev->dev, n->mapping)) { - DP_NOTICE(edev, "Failed to map DMA memory for arfs\n"); - qede_free_arfs_filter(edev, n); - rc = -ENOMEM; + rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx); + if (rc) goto ret_unlock; - } - - INIT_HLIST_NODE(&n->node); - hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]); - edev->arfs->filter_count++; - - if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { - edev->ops->configure_arfs_searcher(edev->cdev, true); - edev->arfs->enable = true; - } qede_configure_arfs_fltr(edev, n, n->rxq_id, true); @@ -472,6 +508,7 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); + return n->sw_id; ret_unlock: @@ -1263,3 +1300,371 @@ void qede_config_rx_mode(struct net_device *ndev) out: kfree(uc_macs); } + +static struct qede_arfs_fltr_node * +qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location) +{ + struct qede_arfs_fltr_node *fltr; + + hlist_for_each_entry(fltr, head, node) + if (location == fltr->sw_id) + return fltr; + + return NULL; +} + +static bool +qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos, + struct ethtool_rx_flow_spec *fsp, + __be16 proto) +{ + if (proto == htons(ETH_P_IP)) { + struct ethtool_tcpip4_spec *ip; + + ip = &fsp->h_u.tcp_ip4_spec; + + if (tpos->tuple.src_ipv4 == ip->ip4src && + tpos->tuple.dst_ipv4 == ip->ip4dst) + return true; + else + return false; + } else { + struct ethtool_tcpip6_spec *ip6; + struct in6_addr *src; + + ip6 = &fsp->h_u.tcp_ip6_spec; + src = &tpos->tuple.src_ipv6; + + if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) && + !memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst, + sizeof(struct in6_addr))) + return true; + else + return false; + } + return false; +} + +int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, + u32 *rule_locs) +{ + struct qede_arfs_fltr_node *fltr; + struct hlist_head *head; + int cnt = 0, rc = 0; + + info->data = QEDE_RFS_MAX_FLTR; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + head = QEDE_ARFS_BUCKET_HEAD(edev, 0); + + hlist_for_each_entry(fltr, head, node) { + if (cnt == info->rule_cnt) { + rc = -EMSGSIZE; + goto unlock; + } + + rule_locs[cnt] = fltr->sw_id; + cnt++; + } + + info->rule_cnt = cnt; + +unlock: + __qede_unlock(edev); + return rc; +} + +int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct qede_arfs_fltr_node *fltr = NULL; + int rc = 0; + + cmd->data = QEDE_RFS_MAX_FLTR; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), + fsp->location); + if (!fltr) { + DP_NOTICE(edev, "Rule not found - location=0x%x\n", + fsp->location); + rc = -EINVAL; + goto unlock; + } + + if (fltr->tuple.eth_proto == htons(ETH_P_IP)) { + if (fltr->tuple.ip_proto == IPPROTO_TCP) + fsp->flow_type = TCP_V4_FLOW; + else + fsp->flow_type = UDP_V4_FLOW; + + fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port; + fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4; + fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4; + } else { + if (fltr->tuple.ip_proto == IPPROTO_TCP) + fsp->flow_type = TCP_V6_FLOW; + else + fsp->flow_type = UDP_V6_FLOW; + fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port; + fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port; + memcpy(&fsp->h_u.tcp_ip6_spec.ip6src, + &fltr->tuple.src_ipv6, sizeof(struct in6_addr)); + memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst, + &fltr->tuple.dst_ipv6, sizeof(struct in6_addr)); + } + + fsp->ring_cookie = fltr->rxq_id; + +unlock: + __qede_unlock(edev); + return rc; +} + +static int +qede_validate_and_check_flow_exist(struct qede_dev *edev, + struct ethtool_rx_flow_spec *fsp, + int *min_hlen) +{ + __be16 src_port = 0x0, dst_port = 0x0; + struct qede_arfs_fltr_node *fltr; + struct hlist_node *temp; + struct hlist_head *head; + __be16 eth_proto; + u8 ip_proto; + + if (fsp->location >= QEDE_RFS_MAX_FLTR || + fsp->ring_cookie >= QEDE_RSS_COUNT(edev)) + return -EINVAL; + + if (fsp->flow_type == TCP_V4_FLOW) { + *min_hlen += sizeof(struct iphdr) + + sizeof(struct tcphdr); + eth_proto = htons(ETH_P_IP); + ip_proto = IPPROTO_TCP; + } else if (fsp->flow_type == UDP_V4_FLOW) { + *min_hlen += sizeof(struct iphdr) + + sizeof(struct udphdr); + eth_proto = htons(ETH_P_IP); + ip_proto = IPPROTO_UDP; + } else if (fsp->flow_type == TCP_V6_FLOW) { + *min_hlen += sizeof(struct ipv6hdr) + + sizeof(struct tcphdr); + eth_proto = htons(ETH_P_IPV6); + ip_proto = IPPROTO_TCP; + } else if (fsp->flow_type == UDP_V6_FLOW) { + *min_hlen += sizeof(struct ipv6hdr) + + sizeof(struct udphdr); + eth_proto = htons(ETH_P_IPV6); + ip_proto = IPPROTO_UDP; + } else { + DP_NOTICE(edev, "Unsupported flow type = 0x%x\n", + fsp->flow_type); + return -EPROTONOSUPPORT; + } + + if (eth_proto == htons(ETH_P_IP)) { + src_port = fsp->h_u.tcp_ip4_spec.psrc; + dst_port = fsp->h_u.tcp_ip4_spec.pdst; + } else { + src_port = fsp->h_u.tcp_ip6_spec.psrc; + dst_port = fsp->h_u.tcp_ip6_spec.pdst; + } + + head = QEDE_ARFS_BUCKET_HEAD(edev, 0); + hlist_for_each_entry_safe(fltr, temp, head, node) { + if ((fltr->tuple.ip_proto == ip_proto && + fltr->tuple.eth_proto == eth_proto && + qede_compare_user_flow_ips(fltr, fsp, eth_proto) && + fltr->tuple.src_port == src_port && + fltr->tuple.dst_port == dst_port) || + fltr->sw_id == fsp->location) + return -EEXIST; + } + + return 0; +} + +static int +qede_poll_arfs_filter_config(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr) +{ + int count = QEDE_ARFS_POLL_COUNT; + + while (fltr->used && count) { + msleep(20); + count--; + } + + if (count == 0 || fltr->fw_rc) { + qede_dequeue_fltr_and_config_searcher(edev, fltr); + return -EIO; + } + + return fltr->fw_rc; +} + +int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + struct ethtool_rx_flow_spec *fsp = &info->fs; + struct qede_arfs_fltr_node *n; + int min_hlen = ETH_HLEN, rc; + struct ethhdr *eth; + struct iphdr *ip; + __be16 *ports; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen); + if (rc) + goto unlock; + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) { + rc = -ENOMEM; + goto unlock; + } + + n->data = kzalloc(min_hlen, GFP_KERNEL); + if (!n->data) { + kfree(n); + rc = -ENOMEM; + goto unlock; + } + + n->sw_id = fsp->location; + set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); + n->buf_len = min_hlen; + n->rxq_id = fsp->ring_cookie; + n->next_rxq_id = n->rxq_id; + eth = (struct ethhdr *)n->data; + + if (info->fs.flow_type == TCP_V4_FLOW || + info->fs.flow_type == UDP_V4_FLOW) { + ports = (__be16 *)(n->data + ETH_HLEN + + sizeof(struct iphdr)); + eth->h_proto = htons(ETH_P_IP); + n->tuple.eth_proto = htons(ETH_P_IP); + n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src; + n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst; + n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc; + n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst; + ports[0] = n->tuple.src_port; + ports[1] = n->tuple.dst_port; + ip = (struct iphdr *)(n->data + ETH_HLEN); + ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src; + ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst; + ip->version = 0x4; + ip->ihl = 0x5; + + if (info->fs.flow_type == TCP_V4_FLOW) { + n->tuple.ip_proto = IPPROTO_TCP; + ip->protocol = IPPROTO_TCP; + } else { + n->tuple.ip_proto = IPPROTO_UDP; + ip->protocol = IPPROTO_UDP; + } + ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN); + } else { + struct ipv6hdr *ip6; + + ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN); + ports = (__be16 *)(n->data + ETH_HLEN + + sizeof(struct ipv6hdr)); + eth->h_proto = htons(ETH_P_IPV6); + n->tuple.eth_proto = htons(ETH_P_IPV6); + memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc; + n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst; + ports[0] = n->tuple.src_port; + ports[1] = n->tuple.dst_port; + memcpy(&ip6->saddr, &n->tuple.src_ipv6, + sizeof(struct in6_addr)); + memcpy(&ip6->daddr, &n->tuple.dst_ipv6, + sizeof(struct in6_addr)); + ip6->version = 0x6; + + if (info->fs.flow_type == TCP_V6_FLOW) { + n->tuple.ip_proto = IPPROTO_TCP; + ip6->nexthdr = NEXTHDR_TCP; + ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr)); + } else { + n->tuple.ip_proto = IPPROTO_UDP; + ip6->nexthdr = NEXTHDR_UDP; + ip6->payload_len = cpu_to_be16(sizeof(struct udphdr)); + } + } + + rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); + if (rc) + goto unlock; + + qede_configure_arfs_fltr(edev, n, n->rxq_id, true); + rc = qede_poll_arfs_filter_config(edev, n); +unlock: + __qede_unlock(edev); + return rc; +} + +int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + struct ethtool_rx_flow_spec *fsp = &info->fs; + struct qede_arfs_fltr_node *fltr = NULL; + int rc = -EPERM; + + __qede_lock(edev); + if (!edev->arfs) + goto unlock; + + fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), + fsp->location); + if (!fltr) + goto unlock; + + qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false); + + rc = qede_poll_arfs_filter_config(edev, fltr); + if (rc == 0) + qede_dequeue_fltr_and_config_searcher(edev, fltr); + +unlock: + __qede_unlock(edev); + return rc; +} + +int qede_get_arfs_filter_count(struct qede_dev *edev) +{ + int count = 0; + + __qede_lock(edev); + + if (!edev->arfs) + goto unlock; + + count = edev->arfs->filter_count; + +unlock: + __qede_unlock(edev); + return count; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 06ca13dd9ddb..e5ee9f274a71 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -873,9 +873,7 @@ static void qede_update_pf_params(struct qed_dev *cdev) */ pf_params.eth_pf_params.num_vf_cons = 48; -#ifdef CONFIG_RFS_ACCEL pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; -#endif qed_ops->common->update_pf_params(cdev, &pf_params); } @@ -1984,12 +1982,12 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); -#ifdef CONFIG_RFS_ACCEL + if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { qede_poll_for_freeing_arfs_filters(edev); qede_free_arfs(edev); } -#endif + /* Release the interrupts */ qede_sync_free_irqs(edev); edev->ops->common->set_fp_int(edev->cdev, 0); @@ -2041,13 +2039,12 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, if (rc) goto err2; -#ifdef CONFIG_RFS_ACCEL if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { rc = qede_alloc_arfs(edev); if (rc) DP_NOTICE(edev, "aRFS memory allocation failed\n"); } -#endif + qede_napi_add_enable(edev); DP_INFO(edev, "Napi added and enabled\n"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 0844b7c75767..afa10a163da1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1285,7 +1285,7 @@ flash_temp: int qlcnic_dump_fw(struct qlcnic_adapter *adapter) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - static const struct qlcnic_dump_operations *fw_dump_ops; + const struct qlcnic_dump_operations *fw_dump_ops; struct qlcnic_83xx_dump_template_hdr *hdr_83xx; u32 entry_offset, dump, no_entries, buf_offset = 0; int i, k, ops_cnt, ops_index, dump_size = 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 73027a6c06c7..82fcb83ea3c8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -1248,7 +1248,7 @@ static const struct bin_attribute bin_attr_pm_config = { .write = qlcnic_sysfs_write_pm_config, }; -static struct bin_attribute bin_attr_flash = { +static const struct bin_attribute bin_attr_flash = { .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = qlcnic_83xx_sysfs_flash_read_handler, diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 0525bd696d5d..96a27b00c90e 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -991,6 +991,7 @@ struct ravb_private { struct net_device *ndev; struct platform_device *pdev; void __iomem *addr; + struct clk *clk; struct mdiobb_ctrl mdiobb; u32 num_rx_ring[NUM_RX_QUEUE]; u32 num_tx_ring[NUM_TX_QUEUE]; @@ -1033,6 +1034,7 @@ struct ravb_private { unsigned no_avb_link:1; unsigned avb_link_active_low:1; + unsigned wol_enabled:1; }; static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 5931e859876c..fdf30bfa403b 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -680,6 +680,9 @@ static void ravb_emac_interrupt_unlocked(struct net_device *ndev) ecsr = ravb_read(ndev, ECSR); ravb_write(ndev, ecsr, ECSR); /* clear interrupt */ + + if (ecsr & ECSR_MPD) + pm_wakeup_event(&priv->pdev->dev, 0); if (ecsr & ECSR_ICD) ndev->stats.tx_carrier_errors++; if (ecsr & ECSR_LCHNG) { @@ -1330,6 +1333,33 @@ static int ravb_get_ts_info(struct net_device *ndev, return 0; } +static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ravb_private *priv = netdev_priv(ndev); + + wol->supported = 0; + wol->wolopts = 0; + + if (priv->clk) { + wol->supported = WAKE_MAGIC; + wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; + } +} + +static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ravb_private *priv = netdev_priv(ndev); + + if (!priv->clk || wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); + + device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled); + + return 0; +} + static const struct ethtool_ops ravb_ethtool_ops = { .nway_reset = ravb_nway_reset, .get_msglevel = ravb_get_msglevel, @@ -1343,6 +1373,8 @@ static const struct ethtool_ops ravb_ethtool_ops = { .get_ts_info = ravb_get_ts_info, .get_link_ksettings = ravb_get_link_ksettings, .set_link_ksettings = ravb_set_link_ksettings, + .get_wol = ravb_get_wol, + .set_wol = ravb_set_wol, }; static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, @@ -2041,6 +2073,11 @@ static int ravb_probe(struct platform_device *pdev) priv->chip_id = chip_id; + /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + priv->clk = NULL; + /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; @@ -2107,6 +2144,9 @@ static int ravb_probe(struct platform_device *pdev) if (error) goto out_napi_del; + if (priv->clk) + device_set_wakeup_capable(&pdev->dev, 1); + /* Print device information */ netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); @@ -2160,15 +2200,66 @@ static int ravb_remove(struct platform_device *pdev) return 0; } +static int ravb_wol_setup(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + /* Disable interrupts by clearing the interrupt masks. */ + ravb_write(ndev, 0, RIC0); + ravb_write(ndev, 0, RIC2); + ravb_write(ndev, 0, TIC); + + /* Only allow ECI interrupts */ + synchronize_irq(priv->emac_irq); + napi_disable(&priv->napi[RAVB_NC]); + napi_disable(&priv->napi[RAVB_BE]); + ravb_write(ndev, ECSIPR_MPDIP, ECSIPR); + + /* Enable MagicPacket */ + ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); + + /* Increased clock usage so device won't be suspended */ + clk_enable(priv->clk); + + return enable_irq_wake(priv->emac_irq); +} + +static int ravb_wol_restore(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + int ret; + + napi_enable(&priv->napi[RAVB_NC]); + napi_enable(&priv->napi[RAVB_BE]); + + /* Disable MagicPacket */ + ravb_modify(ndev, ECMR, ECMR_MPDE, 0); + + ret = ravb_close(ndev); + if (ret < 0) + return ret; + + /* Restore clock usage count */ + clk_disable(priv->clk); + + return disable_irq_wake(priv->emac_irq); +} + static int __maybe_unused ravb_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); - int ret = 0; + struct ravb_private *priv = netdev_priv(ndev); + int ret; - if (netif_running(ndev)) { - netif_device_detach(ndev); + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + + if (priv->wol_enabled) + ret = ravb_wol_setup(ndev); + else ret = ravb_close(ndev); - } return ret; } @@ -2179,6 +2270,33 @@ static int __maybe_unused ravb_resume(struct device *dev) struct ravb_private *priv = netdev_priv(ndev); int ret = 0; + if (priv->wol_enabled) { + /* Reduce the usecount of the clock to zero and then + * restore it to its original value. This is done to force + * the clock to be re-enabled which is a workaround + * for renesas-cpg-mssr driver which do not enable clocks + * when resuming from PSCI suspend/resume. + * + * Without this workaround the driver fails to communicate + * with the hardware if WoL was enabled when the system + * entered PSCI suspend. This is due to that if WoL is enabled + * we explicitly keep the clock from being turned off when + * suspending, but in PSCI sleep power is cut so the clock + * is disabled anyhow, the clock driver is not aware of this + * so the clock is not turned back on when resuming. + * + * TODO: once the renesas-cpg-mssr suspend/resume is working + * this clock dance should be removed. + */ + clk_disable(priv->clk); + clk_disable(priv->clk); + clk_enable(priv->clk); + clk_enable(priv->clk); + + /* Set reset mode to rearm the WoL logic */ + ravb_write(ndev, CCC_OPC_RESET, CCC); + } + /* All register have been reset to default values. * Restore all registers which where setup at probe time and * reopen device if it was running before system suspended. @@ -2202,6 +2320,11 @@ static int __maybe_unused ravb_resume(struct device *dev) ravb_write(ndev, priv->desc_bat_dma, DBAT); if (netif_running(ndev)) { + if (priv->wol_enabled) { + ret = ravb_wol_restore(ndev); + if (ret) + return ret; + } ret = ravb_open(ndev); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index b1e5c07099fa..fc8f8bdf6579 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -34,6 +34,7 @@ #include <net/netevent.h> #include <net/arp.h> #include <net/fib_rules.h> +#include <net/fib_notifier.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <generated/utsrelease.h> @@ -2191,6 +2192,10 @@ static int rocker_router_fib_event(struct notifier_block *nb, { struct rocker *rocker = container_of(nb, struct rocker, fib_nb); struct rocker_fib_event_work *fib_work; + struct fib_notifier_info *info = ptr; + + if (info->family != AF_INET) + return NOTIFY_DONE; fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); if (WARN_ON(!fib_work)) diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 600e30e8f0be..da4e26b53a52 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2761,7 +2761,7 @@ static int ofdpa_fib4_add(struct rocker *rocker, fen_info->tb_id, 0); if (err) return err; - fib_info_offload_inc(fen_info->fi); + fen_info->fi->fib_nh->nh_flags |= RTNH_F_OFFLOAD; return 0; } @@ -2776,7 +2776,7 @@ static int ofdpa_fib4_del(struct rocker *rocker, ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker); if (!ofdpa_port) return 0; - fib_info_offload_dec(fen_info->fi); + fen_info->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst), fen_info->dst_len, fen_info->fi, fen_info->tb_id, OFDPA_OP_FLAG_REMOVE); @@ -2803,7 +2803,7 @@ static void ofdpa_fib4_abort(struct rocker *rocker) rocker); if (!ofdpa_port) continue; - fib_info_offload_dec(flow_entry->fi); + flow_entry->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE, flow_entry); } diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index fcea9371ab7f..d407adf59610 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -32,8 +32,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); -int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_piobuf_size; extern bool efx_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h index e5a7a40cc8b6..4f3bb30661ea 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.h +++ b/drivers/net/ethernet/sfc/falcon/efx.h @@ -32,8 +32,8 @@ netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb); void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index); -int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data); unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx); extern bool ef4_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index f1520a404ac6..6a75f4140a4b 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -425,24 +425,25 @@ void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue) efx->n_tx_channels : 0)); } -int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *ntc) +int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data) { struct ef4_nic *efx = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; struct ef4_channel *channel; struct ef4_tx_queue *tx_queue; unsigned tc, num_tc; int rc; - if (ntc->type != TC_SETUP_MQPRIO) - return -EINVAL; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - num_tc = ntc->mqprio->num_tc; + num_tc = mqprio->num_tc; if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC) return -EINVAL; - ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; if (num_tc == net_dev->num_tc) return 0; diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index c905971c5f3a..d3f96a8f743b 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -746,59 +746,171 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx, return NULL; } -#define SFP_PAGE_SIZE 128 -#define SFP_NUM_PAGES 2 -static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, - struct ethtool_eeprom *ee, u8 *data) +#define SFP_PAGE_SIZE 128 +#define SFF_DIAG_TYPE_OFFSET 92 +#define SFF_DIAG_ADDR_CHANGE BIT(2) +#define SFF_8079_NUM_PAGES 2 +#define SFF_8472_NUM_PAGES 4 +#define SFF_8436_NUM_PAGES 5 +#define SFF_DMT_LEVEL_OFFSET 94 + +/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom + * @efx: NIC context + * @page: EEPROM page number + * @data: Destination data pointer + * @offset: Offset in page to copy from in to data + * @space: Space available in data + * + * Return: + * >=0 - amount of data copied + * <0 - error + */ +static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx, + unsigned int page, + u8 *data, ssize_t offset, + ssize_t space) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX); MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN); size_t outlen; - int rc; unsigned int payload_len; - unsigned int space_remaining = ee->len; - unsigned int page; - unsigned int page_off; unsigned int to_copy; - u8 *user_data = data; + int rc; - BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN); + if (offset > SFP_PAGE_SIZE) + return -EINVAL; - page_off = ee->offset % SFP_PAGE_SIZE; - page = ee->offset / SFP_PAGE_SIZE; + to_copy = min(space, SFP_PAGE_SIZE - offset); - while (space_remaining && (page < SFP_NUM_PAGES)) { - MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO, - inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), - &outlen); - if (rc) - return rc; + if (rc) + return rc; + + if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + + SFP_PAGE_SIZE)) + return -EIO; + + payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN); + if (payload_len != SFP_PAGE_SIZE) + return -EIO; - if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + - SFP_PAGE_SIZE)) - return -EIO; + memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, + to_copy); - payload_len = MCDI_DWORD(outbuf, - GET_PHY_MEDIA_INFO_OUT_DATALEN); - if (payload_len != SFP_PAGE_SIZE) - return -EIO; + return to_copy; +} - /* Copy as much as we can into data */ - payload_len -= page_off; - to_copy = (space_remaining < payload_len) ? - space_remaining : payload_len; +static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx, + unsigned int page, + u8 byte) +{ + int rc; + u8 data; - memcpy(user_data, - MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off, - to_copy); + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1); + if (rc == 1) + return data; + + return rc; +} + +static int efx_mcdi_phy_diag_type(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the diagnostic type at byte 92. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DIAG_TYPE_OFFSET); +} - space_remaining -= to_copy; - user_data += to_copy; - page_off = 0; - page++; +static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the DMT level at byte 94. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DMT_LEVEL_OFFSET); +} + +static u32 efx_mcdi_phy_module_type(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS) + return phy_data->media; + + /* A QSFP+ NIC may actually have an SFP+ module attached. + * The ID is page 0, byte 0. + */ + switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) { + case 0x3: + return MC_CMD_MEDIA_SFP_PLUS; + case 0xc: + case 0xd: + return MC_CMD_MEDIA_QSFP_PLUS; + default: + return 0; + } +} + +static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data) +{ + int rc; + ssize_t space_remaining = ee->len; + unsigned int page_off; + bool ignore_missing; + int num_pages; + int page; + + switch (efx_mcdi_phy_module_type(efx)) { + case MC_CMD_MEDIA_SFP_PLUS: + num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ? + SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES; + page = 0; + ignore_missing = false; + break; + case MC_CMD_MEDIA_QSFP_PLUS: + num_pages = SFF_8436_NUM_PAGES; + page = -1; /* We obtain the lower page by asking for -1. */ + ignore_missing = true; /* Ignore missing pages after page 0. */ + break; + default: + return -EOPNOTSUPP; + } + + page_off = ee->offset % SFP_PAGE_SIZE; + page += ee->offset / SFP_PAGE_SIZE; + + while (space_remaining && (page < num_pages)) { + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, + data, page_off, + space_remaining); + + if (rc > 0) { + space_remaining -= rc; + data += rc; + page_off = 0; + page++; + } else if (rc == 0) { + space_remaining = 0; + } else if (ignore_missing && (page > 0)) { + int intended_size = SFP_PAGE_SIZE - page_off; + + space_remaining -= intended_size; + if (space_remaining < 0) { + space_remaining = 0; + } else { + memset(data, 0, intended_size); + data += intended_size; + page_off = 0; + page++; + rc = 0; + } + } else { + return rc; + } } return 0; @@ -807,16 +919,42 @@ static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, static int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo) { - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + int sff_8472_level; + int diag_type; - switch (phy_cfg->media) { + switch (efx_mcdi_phy_module_type(efx)) { case MC_CMD_MEDIA_SFP_PLUS: - modinfo->type = ETH_MODULE_SFF_8079; - modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - return 0; + sff_8472_level = efx_mcdi_phy_sff_8472_level(efx); + + /* If we can't read the diagnostics level we have none. */ + if (sff_8472_level < 0) + return -EOPNOTSUPP; + + /* Check if this module requires the (unsupported) address + * change operation. + */ + diag_type = efx_mcdi_phy_diag_type(efx); + + if ((sff_8472_level == 0) || + (diag_type & SFF_DIAG_ADDR_CHANGE)) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + + case MC_CMD_MEDIA_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + default: return -EOPNOTSUPP; } + + return 0; } static const struct efx_phy_operations efx_mcdi_phy_ops = { diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 02d41eb4a8e9..32bf1fecf864 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -653,24 +653,25 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) efx->n_tx_channels : 0)); } -int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *ntc) +int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data) { struct efx_nic *efx = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; struct efx_channel *channel; struct efx_tx_queue *tx_queue; unsigned tc, num_tc; int rc; - if (ntc->type != TC_SETUP_MQPRIO) - return -EINVAL; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - num_tc = ntc->mqprio->num_tc; + num_tc = mqprio->num_tc; if (num_tc > EFX_MAX_TX_TC) return -EINVAL; - ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; if (num_tc == net_dev->num_tc) return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 85c0e41f8021..97035766c291 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -45,6 +45,15 @@ config DWMAC_GENERIC platform specific code to function or is using platform data for setup. +config DWMAC_ANARION + tristate "Adaptrum Anarion GMAC support" + default ARC + depends on OF && (ARC || COMPILE_TEST) + help + Support for Adaptrum Anarion GMAC Ethernet controller. + + This selects the Anarion SoC glue layer support for the stmmac driver. + config DWMAC_IPQ806X tristate "QCA IPQ806x DWMAC support" default ARCH_QCOM diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index fd4937a7fcab..238307fadcdb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -7,6 +7,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o +obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c new file mode 100644 index 000000000000..85ce80c600c7 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -0,0 +1,152 @@ +/* + * Adaptrum Anarion DWMAC glue layer + * + * Copyright (C) 2017, Adaptrum, Inc. + * (Written by Alexandru Gagniuc <alex.g at adaptrum.com> for Adaptrum, Inc.) + * Licensed under the GPLv2 or (at your option) any later version. + */ + +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/stmmac.h> + +#include "stmmac.h" +#include "stmmac_platform.h" + +#define GMAC_RESET_CONTROL_REG 0 +#define GMAC_SW_CONFIG_REG 4 +#define GMAC_CONFIG_INTF_SEL_MASK (0x7 << 0) +#define GMAC_CONFIG_INTF_RGMII (0x1 << 0) + +struct anarion_gmac { + uintptr_t ctl_block; + uint32_t phy_intf_sel; +}; + +static uint32_t gmac_read_reg(struct anarion_gmac *gmac, uint8_t reg) +{ + return readl((void *)(gmac->ctl_block + reg)); +}; + +static void gmac_write_reg(struct anarion_gmac *gmac, uint8_t reg, uint32_t val) +{ + writel(val, (void *)(gmac->ctl_block + reg)); +} + +static int anarion_gmac_init(struct platform_device *pdev, void *priv) +{ + uint32_t sw_config; + struct anarion_gmac *gmac = priv; + + /* Reset logic, configure interface mode, then release reset. SIMPLE! */ + gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1); + + sw_config = gmac_read_reg(gmac, GMAC_SW_CONFIG_REG); + sw_config &= ~GMAC_CONFIG_INTF_SEL_MASK; + sw_config |= (gmac->phy_intf_sel & GMAC_CONFIG_INTF_SEL_MASK); + gmac_write_reg(gmac, GMAC_SW_CONFIG_REG, sw_config); + + gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 0); + + return 0; +} + +static void anarion_gmac_exit(struct platform_device *pdev, void *priv) +{ + struct anarion_gmac *gmac = priv; + + gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1); +} + +static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev) +{ + int phy_mode; + struct resource *res; + void __iomem *ctl_block; + struct anarion_gmac *gmac; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ctl_block = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ctl_block)) { + dev_err(&pdev->dev, "Cannot get reset region (%ld)!\n", + PTR_ERR(ctl_block)); + return ctl_block; + } + + gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL); + if (!gmac) + return ERR_PTR(-ENOMEM); + + gmac->ctl_block = (uintptr_t)ctl_block; + + phy_mode = of_get_phy_mode(pdev->dev.of_node); + switch (phy_mode) { + case PHY_INTERFACE_MODE_RGMII: /* Fall through */ + case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */: + case PHY_INTERFACE_MODE_RGMII_RXID: /* Fall through */ + case PHY_INTERFACE_MODE_RGMII_TXID: + gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII; + break; + default: + dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", + phy_mode); + return ERR_PTR(-ENOTSUPP); + } + + return gmac; +} + +static int anarion_dwmac_probe(struct platform_device *pdev) +{ + int ret; + struct anarion_gmac *gmac; + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + gmac = anarion_config_dt(pdev); + if (IS_ERR(gmac)) + return PTR_ERR(gmac); + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + plat_dat->init = anarion_gmac_init; + plat_dat->exit = anarion_gmac_exit; + anarion_gmac_init(pdev, gmac); + plat_dat->bsp_priv = gmac; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) { + stmmac_remove_config_dt(pdev, plat_dat); + return ret; + } + + return 0; +} + +static const struct of_device_id anarion_dwmac_match[] = { + { .compatible = "adaptrum,anarion-gmac" }, + { } +}; +MODULE_DEVICE_TABLE(of, anarion_dwmac_match); + +static struct platform_driver anarion_dwmac_driver = { + .probe = anarion_dwmac_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "anarion-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = anarion_dwmac_match, + }, +}; +module_platform_driver(anarion_dwmac_driver); + +MODULE_DESCRIPTION("Adaptrum Anarion DWMAC specific glue layer"); +MODULE_AUTHOR("Alexandru Gagniuc <mr.nuke.me@gmail.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 8603e397097e..5b56c24b6ed2 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -248,7 +248,7 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[], dev->ethtool_ops = &vsw_ethtool_ops; dev->watchdog_timeo = VSW_TX_TIMEOUT; - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG; + dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; /* MTU range: 68 - 65535 */ diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 4bb04aaf9650..6a4e8e1bbd90 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9221,8 +9221,7 @@ static int niu_get_of_props(struct niu *np) phy_type = of_get_property(dp, "phy-type", &prop_len); if (!phy_type) { - netdev_err(dev, "%s: OF node lacks phy-type property\n", - dp->full_name); + netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp); return -EINVAL; } @@ -9232,26 +9231,25 @@ static int niu_get_of_props(struct niu *np) strcpy(np->vpd.phy_type, phy_type); if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { - netdev_err(dev, "%s: Illegal phy string [%s]\n", - dp->full_name, np->vpd.phy_type); + netdev_err(dev, "%pOF: Illegal phy string [%s]\n", + dp, np->vpd.phy_type); return -EINVAL; } mac_addr = of_get_property(dp, "local-mac-address", &prop_len); if (!mac_addr) { - netdev_err(dev, "%s: OF node lacks local-mac-address property\n", - dp->full_name); + netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n", + dp); return -EINVAL; } if (prop_len != dev->addr_len) { - netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n", - dp->full_name, prop_len); + netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n", + dp, prop_len); } memcpy(dev->dev_addr, mac_addr, dev->addr_len); if (!is_valid_ether_addr(&dev->dev_addr[0])) { - netdev_err(dev, "%s: OF MAC address is invalid\n", - dp->full_name); - netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr); + netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp); + netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr); return -EINVAL; } @@ -10027,8 +10025,8 @@ static int niu_of_probe(struct platform_device *op) reg = of_get_property(op->dev.of_node, "reg", NULL); if (!reg) { - dev_err(&op->dev, "%s: No 'reg' property, aborting\n", - op->dev.of_node->full_name); + dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n", + op->dev.of_node); return -ENODEV; } diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 75b167e3fe98..0b95105f7060 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac, dev->watchdog_timeo = VNET_TX_TIMEOUT; dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | - NETIF_F_IP_CSUM | NETIF_F_SG; + NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; /* MTU range: 68 - 65535 */ diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 9e86833249d4..ecf456c7b6d1 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -303,7 +303,7 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev, return skb; } -static inline void vnet_fullcsum(struct sk_buff *skb) +static inline void vnet_fullcsum_ipv4(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); int offset = skb_transport_offset(skb); @@ -335,6 +335,40 @@ static inline void vnet_fullcsum(struct sk_buff *skb) } } +#if IS_ENABLED(CONFIG_IPV6) +static inline void vnet_fullcsum_ipv6(struct sk_buff *skb) +{ + struct ipv6hdr *ip6h = ipv6_hdr(skb); + int offset = skb_transport_offset(skb); + + if (skb->protocol != htons(ETH_P_IPV6)) + return; + if (ip6h->nexthdr != IPPROTO_TCP && + ip6h->nexthdr != IPPROTO_UDP) + return; + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 1; + skb->csum = 0; + if (ip6h->nexthdr == IPPROTO_TCP) { + struct tcphdr *ptcp = tcp_hdr(skb); + + ptcp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + ptcp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + skb->len - offset, IPPROTO_TCP, + skb->csum); + } else if (ip6h->nexthdr == IPPROTO_UDP) { + struct udphdr *pudp = udp_hdr(skb); + + pudp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + pudp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + skb->len - offset, IPPROTO_UDP, + skb->csum); + } +} +#endif + static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) { struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); @@ -394,9 +428,14 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) struct iphdr *iph = ip_hdr(skb); int ihl = iph->ihl * 4; - skb_reset_transport_header(skb); skb_set_transport_header(skb, ihl); - vnet_fullcsum(skb); + vnet_fullcsum_ipv4(skb); +#if IS_ENABLED(CONFIG_IPV6) + } else if (skb->protocol == htons(ETH_P_IPV6)) { + skb_set_transport_header(skb, + sizeof(struct ipv6hdr)); + vnet_fullcsum_ipv6(skb); +#endif } } if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { @@ -1115,24 +1154,47 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) if (skb->ip_summed == CHECKSUM_PARTIAL) start = skb_checksum_start_offset(skb); if (start) { - struct iphdr *iph = ip_hdr(nskb); int offset = start + nskb->csum_offset; + /* copy the headers, no csum here */ if (skb_copy_bits(skb, 0, nskb->data, start)) { dev_kfree_skb(nskb); dev_kfree_skb(skb); return NULL; } + + /* copy the rest, with csum calculation */ *(__sum16 *)(skb->data + offset) = 0; csum = skb_copy_and_csum_bits(skb, start, nskb->data + start, skb->len - start, 0); - if (iph->protocol == IPPROTO_TCP || - iph->protocol == IPPROTO_UDP) { - csum = csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - start, - iph->protocol, csum); + + /* add in the header checksums */ + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(nskb); + + if (iph->protocol == IPPROTO_TCP || + iph->protocol == IPPROTO_UDP) { + csum = csum_tcpudp_magic(iph->saddr, + iph->daddr, + skb->len - start, + iph->protocol, + csum); + } + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = ipv6_hdr(nskb); + + if (ip6h->nexthdr == IPPROTO_TCP || + ip6h->nexthdr == IPPROTO_UDP) { + csum = csum_ipv6_magic(&ip6h->saddr, + &ip6h->daddr, + skb->len - start, + ip6h->nexthdr, + csum); + } } + + /* save the final result */ *(__sum16 *)(nskb->data + offset) = csum; nskb->ip_summed = CHECKSUM_NONE; @@ -1318,8 +1380,14 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, if (unlikely(!skb)) goto out_dropped; - if (skb->ip_summed == CHECKSUM_PARTIAL) - vnet_fullcsum(skb); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->protocol == htons(ETH_P_IP)) + vnet_fullcsum_ipv4(skb); +#if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) + vnet_fullcsum_ipv6(skb); +#endif + } dr = &port->vio.drings[VIO_DRIVER_TX_RING]; i = skb_get_queue_mapping(skb); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c index 3b91257683bc..e1b55b8fb8e0 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -17,6 +17,7 @@ #include <linux/netdevice.h> #include <linux/tcp.h> +#include <linux/interrupt.h> #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index badd0a8caeb9..c8776dbf1a55 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1321,8 +1321,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) phy = of_phy_connect(priv->ndev, slave->data->phy_node, &cpsw_adjust_link, 0, slave->data->phy_if); if (!phy) { - dev_err(priv->dev, "phy \"%s\" not found on slave %d\n", - slave->data->phy_node->full_name, + dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n", + slave->data->phy_node, slave->slave_num); return; } @@ -2670,8 +2670,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, parp = of_get_property(slave_node, "phy_id", &lenp); if (slave_data->phy_node) { dev_dbg(&pdev->dev, - "slave[%d] using phy-handle=\"%s\"\n", - i, slave_data->phy_node->full_name); + "slave[%d] using phy-handle=\"%pOF\"\n", + i, slave_data->phy_node); } else if (of_phy_is_fixed_link(slave_node)) { /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 64d5527feb2a..4bb561856af5 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1480,8 +1480,8 @@ static int emac_dev_open(struct net_device *ndev) phydev = of_phy_connect(ndev, priv->phy_node, &emac_adjust_link, 0, 0); if (!phydev) { - dev_err(emac_dev, "could not connect to phy %s\n", - priv->phy_node->full_name); + dev_err(emac_dev, "could not connect to phy %pOF\n", + priv->phy_node); ret = -ENODEV; goto err; } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 9d52c3a78621..eb96a6913235 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1877,20 +1877,21 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, return 0; } -static int netcp_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { + struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc; int i; /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - num_tc = tc->mqprio->num_tc; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + num_tc = mqprio->num_tc; /* Sanity-check the number of traffic classes requested */ if ((dev->real_num_tx_queues <= 1) || diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index d73da8afe08e..60abc9250f56 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1089,7 +1089,7 @@ static int temac_of_probe(struct platform_device *op) lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); if (lp->phy_node) - dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np); + dev_dbg(lp->dev, "using PHY node %pOF (%p)\n", np, np); /* Add the device attributes */ rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index af27f7d1cbf3..5ef626331f85 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -389,7 +389,7 @@ struct axidma_bd { * @dma_err_tasklet: Tasklet structure to process Axi DMA errors * @tx_irq: Axidma TX IRQ number * @rx_irq: Axidma RX IRQ number - * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X + * @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X * @options: AxiEthernet option word * @last_link: Phy link state in which the PHY was negotiated earlier * @features: Stores the extended features supported by the axienet hw @@ -432,7 +432,7 @@ struct axienet_local { int tx_irq; int rx_irq; - u32 phy_type; + phy_interface_t phy_mode; u32 options; /* Current options word */ u32 last_link; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 33c595f4691d..e74e1e897864 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -531,11 +531,11 @@ static void axienet_adjust_link(struct net_device *ndev) link_state = phy->speed | (phy->duplex << 1) | phy->link; if (lp->last_link != link_state) { if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { - if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X) + if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) setspeed = 0; } else { if ((phy->speed == SPEED_1000) && - (lp->phy_type == XAE_PHY_TYPE_MII)) + (lp->phy_mode == PHY_INTERFACE_MODE_MII)) setspeed = 0; } @@ -935,15 +935,8 @@ static int axienet_open(struct net_device *ndev) return ret; if (lp->phy_node) { - if (lp->phy_type == XAE_PHY_TYPE_GMII) { - phydev = of_phy_connect(lp->ndev, lp->phy_node, - axienet_adjust_link, 0, - PHY_INTERFACE_MODE_GMII); - } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) { - phydev = of_phy_connect(lp->ndev, lp->phy_node, - axienet_adjust_link, 0, - PHY_INTERFACE_MODE_RGMII_ID); - } + phydev = of_phy_connect(lp->ndev, lp->phy_node, + axienet_adjust_link, 0, lp->phy_mode); if (!phydev) dev_err(lp->dev, "of_phy_connect() failed\n"); @@ -1539,7 +1532,38 @@ static int axienet_probe(struct platform_device *pdev) * the device-tree and accordingly set flags. */ of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); - of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type); + + /* Start with the proprietary, and broken phy_type */ + ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); + if (!ret) { + netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); + switch (value) { + case XAE_PHY_TYPE_MII: + lp->phy_mode = PHY_INTERFACE_MODE_MII; + break; + case XAE_PHY_TYPE_GMII: + lp->phy_mode = PHY_INTERFACE_MODE_GMII; + break; + case XAE_PHY_TYPE_RGMII_2_0: + lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; + break; + case XAE_PHY_TYPE_SGMII: + lp->phy_mode = PHY_INTERFACE_MODE_SGMII; + break; + case XAE_PHY_TYPE_1000BASE_X: + lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; + break; + default: + ret = -EINVAL; + goto free_netdev; + } + } else { + lp->phy_mode = of_get_phy_mode(pdev->dev.of_node); + if (lp->phy_mode < 0) { + ret = -EINVAL; + goto free_netdev; + } + } /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 2bbda71818ad..8b8565dd2afb 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -715,6 +715,7 @@ free_dst: static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct net_device *dev, + struct geneve_sock *gs4, struct flowi4 *fl4, const struct ip_tunnel_info *info) { @@ -724,7 +725,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct rtable *rt = NULL; __u8 tos; - if (!rcu_dereference(geneve->sock4)) + if (!gs4) return ERR_PTR(-EIO); memset(fl4, 0, sizeof(*fl4)); @@ -764,6 +765,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct net_device *dev, + struct geneve_sock *gs6, struct flowi6 *fl6, const struct ip_tunnel_info *info) { @@ -771,10 +773,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct geneve_dev *geneve = netdev_priv(dev); struct dst_entry *dst = NULL; struct dst_cache *dst_cache; - struct geneve_sock *gs6; __u8 prio; - gs6 = rcu_dereference(geneve->sock6); if (!gs6) return ERR_PTR(-EIO); @@ -827,7 +827,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 df; int err; - rt = geneve_get_v4_rt(skb, dev, &fl4, info); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -866,7 +866,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - dst = geneve_get_v6_dst(skb, dev, &fl6, info); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -951,8 +951,9 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; struct flowi4 fl4; + struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); - rt = geneve_get_v4_rt(skb, dev, &fl4, info); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -962,8 +963,9 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct flowi6 fl6; + struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); - dst = geneve_get_v6_dst(skb, dev, &fl6, info); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -1014,16 +1016,22 @@ static struct device_type geneve_type = { * supply the listening GENEVE udp ports. Callers are expected * to implement the ndo_udp_tunnel_add. */ -static void geneve_push_rx_ports(struct net_device *dev) +static void geneve_offload_rx_ports(struct net_device *dev, bool push) { struct net *net = dev_net(dev); struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; rcu_read_lock(); - list_for_each_entry_rcu(gs, &gn->sock_list, list) - udp_tunnel_push_rx_port(dev, gs->sock, - UDP_TUNNEL_TYPE_GENEVE); + list_for_each_entry_rcu(gs, &gn->sock_list, list) { + if (push) { + udp_tunnel_push_rx_port(dev, gs->sock, + UDP_TUNNEL_TYPE_GENEVE); + } else { + udp_tunnel_drop_rx_port(dev, gs->sock, + UDP_TUNNEL_TYPE_GENEVE); + } + } rcu_read_unlock(); } @@ -1140,6 +1148,15 @@ static bool is_tnl_info_zero(const struct ip_tunnel_info *info) return true; } +static bool geneve_dst_addr_equal(struct ip_tunnel_info *a, + struct ip_tunnel_info *b) +{ + if (ip_tunnel_info_af(a) == AF_INET) + return a->key.u.ipv4.dst == b->key.u.ipv4.dst; + else + return ipv6_addr_equal(&a->key.u.ipv6.dst, &b->key.u.ipv6.dst); +} + static int geneve_configure(struct net *net, struct net_device *dev, const struct ip_tunnel_info *info, bool metadata, bool ipv6_rx_csum) @@ -1197,24 +1214,22 @@ static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port) info->key.tp_dst = htons(dst_port); } -static int geneve_newlink(struct net *net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) +static int geneve_nl2info(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], struct ip_tunnel_info *info, + bool *metadata, bool *use_udp6_rx_checksums, + bool changelink) { - bool use_udp6_rx_checksums = false; - struct ip_tunnel_info info; - bool metadata = false; - - init_tnl_info(&info, GENEVE_UDP_PORT); - if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) return -EINVAL; if (data[IFLA_GENEVE_REMOTE]) { - info.key.u.ipv4.dst = + if (changelink && (ip_tunnel_info_af(info) == AF_INET6)) + return -EOPNOTSUPP; + + info->key.u.ipv4.dst = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); - if (IN_MULTICAST(ntohl(info.key.u.ipv4.dst))) { + if (IN_MULTICAST(ntohl(info->key.u.ipv4.dst))) { netdev_dbg(dev, "multicast remote is unsupported\n"); return -EINVAL; } @@ -1222,21 +1237,24 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (data[IFLA_GENEVE_REMOTE6]) { #if IS_ENABLED(CONFIG_IPV6) - info.mode = IP_TUNNEL_INFO_IPV6; - info.key.u.ipv6.dst = + if (changelink && (ip_tunnel_info_af(info) == AF_INET)) + return -EOPNOTSUPP; + + info->mode = IP_TUNNEL_INFO_IPV6; + info->key.u.ipv6.dst = nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]); - if (ipv6_addr_type(&info.key.u.ipv6.dst) & + if (ipv6_addr_type(&info->key.u.ipv6.dst) & IPV6_ADDR_LINKLOCAL) { netdev_dbg(dev, "link-local remote is unsupported\n"); return -EINVAL; } - if (ipv6_addr_is_multicast(&info.key.u.ipv6.dst)) { + if (ipv6_addr_is_multicast(&info->key.u.ipv6.dst)) { netdev_dbg(dev, "multicast remote is unsupported\n"); return -EINVAL; } - info.key.tun_flags |= TUNNEL_CSUM; - use_udp6_rx_checksums = true; + info->key.tun_flags |= TUNNEL_CSUM; + *use_udp6_rx_checksums = true; #else return -EPFNOSUPPORT; #endif @@ -1245,48 +1263,169 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (data[IFLA_GENEVE_ID]) { __u32 vni; __u8 tvni[3]; + __be64 tunid; vni = nla_get_u32(data[IFLA_GENEVE_ID]); tvni[0] = (vni & 0x00ff0000) >> 16; tvni[1] = (vni & 0x0000ff00) >> 8; tvni[2] = vni & 0x000000ff; - info.key.tun_id = vni_to_tunnel_id(tvni); + tunid = vni_to_tunnel_id(tvni); + if (changelink && (tunid != info->key.tun_id)) + return -EOPNOTSUPP; + info->key.tun_id = tunid; } + if (data[IFLA_GENEVE_TTL]) - info.key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); + info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); if (data[IFLA_GENEVE_TOS]) - info.key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); + info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); if (data[IFLA_GENEVE_LABEL]) { - info.key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & + info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & IPV6_FLOWLABEL_MASK; - if (info.key.label && (!(info.mode & IP_TUNNEL_INFO_IPV6))) + if (info->key.label && (!(info->mode & IP_TUNNEL_INFO_IPV6))) return -EINVAL; } - if (data[IFLA_GENEVE_PORT]) - info.key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); + if (data[IFLA_GENEVE_PORT]) { + if (changelink) + return -EOPNOTSUPP; + info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); + } - if (data[IFLA_GENEVE_COLLECT_METADATA]) - metadata = true; + if (data[IFLA_GENEVE_COLLECT_METADATA]) { + if (changelink) + return -EOPNOTSUPP; + *metadata = true; + } - if (data[IFLA_GENEVE_UDP_CSUM] && - nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) - info.key.tun_flags |= TUNNEL_CSUM; + if (data[IFLA_GENEVE_UDP_CSUM]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) + info->key.tun_flags |= TUNNEL_CSUM; + } + + if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) + info->key.tun_flags &= ~TUNNEL_CSUM; + } + + if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) + *use_udp6_rx_checksums = false; + } + + return 0; +} - if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] && - nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) - info.key.tun_flags &= ~TUNNEL_CSUM; +static int geneve_newlink(struct net *net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + bool use_udp6_rx_checksums = false; + struct ip_tunnel_info info; + bool metadata = false; + int err; - if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] && - nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) - use_udp6_rx_checksums = false; + init_tnl_info(&info, GENEVE_UDP_PORT); + err = geneve_nl2info(dev, tb, data, &info, &metadata, + &use_udp6_rx_checksums, false); + if (err) + return err; return geneve_configure(net, dev, &info, metadata, use_udp6_rx_checksums); } +/* Quiesces the geneve device data path for both TX and RX. + * + * On transmit geneve checks for non-NULL geneve_sock before it proceeds. + * So, if we set that socket to NULL under RCU and wait for synchronize_net() + * to complete for the existing set of in-flight packets to be transmitted, + * then we would have quiesced the transmit data path. All the future packets + * will get dropped until we unquiesce the data path. + * + * On receive geneve dereference the geneve_sock stashed in the socket. So, + * if we set that to NULL under RCU and wait for synchronize_net() to + * complete, then we would have quiesced the receive data path. + */ +static void geneve_quiesce(struct geneve_dev *geneve, struct geneve_sock **gs4, + struct geneve_sock **gs6) +{ + *gs4 = rtnl_dereference(geneve->sock4); + rcu_assign_pointer(geneve->sock4, NULL); + if (*gs4) + rcu_assign_sk_user_data((*gs4)->sock->sk, NULL); +#if IS_ENABLED(CONFIG_IPV6) + *gs6 = rtnl_dereference(geneve->sock6); + rcu_assign_pointer(geneve->sock6, NULL); + if (*gs6) + rcu_assign_sk_user_data((*gs6)->sock->sk, NULL); +#else + *gs6 = NULL; +#endif + synchronize_net(); +} + +/* Resumes the geneve device data path for both TX and RX. */ +static void geneve_unquiesce(struct geneve_dev *geneve, struct geneve_sock *gs4, + struct geneve_sock __maybe_unused *gs6) +{ + rcu_assign_pointer(geneve->sock4, gs4); + if (gs4) + rcu_assign_sk_user_data(gs4->sock->sk, gs4); +#if IS_ENABLED(CONFIG_IPV6) + rcu_assign_pointer(geneve->sock6, gs6); + if (gs6) + rcu_assign_sk_user_data(gs6->sock->sk, gs6); +#endif + synchronize_net(); +} + +static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct geneve_dev *geneve = netdev_priv(dev); + struct geneve_sock *gs4, *gs6; + struct ip_tunnel_info info; + bool metadata; + bool use_udp6_rx_checksums; + int err; + + /* If the geneve device is configured for metadata (or externally + * controlled, for example, OVS), then nothing can be changed. + */ + if (geneve->collect_md) + return -EOPNOTSUPP; + + /* Start with the existing info. */ + memcpy(&info, &geneve->info, sizeof(info)); + metadata = geneve->collect_md; + use_udp6_rx_checksums = geneve->use_udp6_rx_checksums; + err = geneve_nl2info(dev, tb, data, &info, &metadata, + &use_udp6_rx_checksums, true); + if (err) + return err; + + if (!geneve_dst_addr_equal(&geneve->info, &info)) + dst_cache_reset(&info.dst_cache); + + geneve_quiesce(geneve, &gs4, &gs6); + geneve->info = info; + geneve->collect_md = metadata; + geneve->use_udp6_rx_checksums = use_udp6_rx_checksums; + geneve_unquiesce(geneve, gs4, gs6); + + return 0; +} + static void geneve_dellink(struct net_device *dev, struct list_head *head) { struct geneve_dev *geneve = netdev_priv(dev); @@ -1375,6 +1514,7 @@ static struct rtnl_link_ops geneve_link_ops __read_mostly = { .setup = geneve_setup, .validate = geneve_validate, .newlink = geneve_newlink, + .changelink = geneve_changelink, .dellink = geneve_dellink, .get_size = geneve_get_size, .fill_info = geneve_fill_info, @@ -1426,8 +1566,14 @@ static int geneve_netdevice_event(struct notifier_block *unused, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) - geneve_push_rx_ports(dev); + if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || + event == NETDEV_UDP_TUNNEL_DROP_INFO) { + geneve_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); + } else if (event == NETDEV_UNREGISTER) { + geneve_offload_rx_ports(dev, false); + } else if (event == NETDEV_REGISTER) { + geneve_offload_rx_ports(dev, true); + } return NOTIFY_DONE; } diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c index 92b13b39f426..e1783832d304 100644 --- a/drivers/net/hamradio/baycom_par.c +++ b/drivers/net/hamradio/baycom_par.c @@ -386,7 +386,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, /* --------------------------------------------------------------------- */ -static struct hdlcdrv_ops par96_ops = { +static const struct hdlcdrv_ops par96_ops = { .drvname = bc_drvname, .drvinfo = bc_drvinfo, .open = par96_open, diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index d9a646acca20..190f66c88479 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -508,7 +508,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, /* --------------------------------------------------------------------- */ -static struct hdlcdrv_ops ser12_ops = { +static const struct hdlcdrv_ops ser12_ops = { .drvname = bc_drvname, .drvinfo = bc_drvinfo, .open = ser12_open, diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c index f1c8a9ff3891..3c823c648cf5 100644 --- a/drivers/net/hamradio/baycom_ser_hdx.c +++ b/drivers/net/hamradio/baycom_ser_hdx.c @@ -542,7 +542,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, /* --------------------------------------------------------------------- */ -static struct hdlcdrv_ops ser12_ops = { +static const struct hdlcdrv_ops ser12_ops = { .drvname = bc_drvname, .drvinfo = bc_drvinfo, .open = ser12_open, diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index dec6b76bc0fb..cde41200f40a 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -581,7 +581,7 @@ static int __init setup_adapter(int card_base, int type, int n) priv->param.dma = -1; INIT_WORK(&priv->rx_work, rx_bh); dev->ml_priv = priv; - sprintf(dev->name, "dmascc%i", 2 * n + i); + snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i); dev->base_addr = card_base; dev->irq = irq; dev->netdev_ops = &scc_netdev_ops; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 12cc64bfcff8..98b25f6900c8 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -147,7 +147,6 @@ struct hv_netvsc_packet { struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; int ring_size; - u32 max_num_vrss_chns; u32 num_chn; }; @@ -183,13 +182,16 @@ struct rndis_device { /* Interface */ struct rndis_message; struct netvsc_device; -int netvsc_device_add(struct hv_device *device, - const struct netvsc_device_info *info); +struct net_device_context; + +struct netvsc_device *netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *info); +int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx); void netvsc_device_remove(struct hv_device *device); -int netvsc_send(struct hv_device *device, +int netvsc_send(struct net_device_context *ndc, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, - struct hv_page_buffer **page_buffer, + struct hv_page_buffer *page_buffer, struct sk_buff *skb); void netvsc_linkstatus_callback(struct hv_device *device_obj, struct rndis_message *resp); @@ -200,10 +202,11 @@ int netvsc_recv_callback(struct net_device *net, const struct ndis_pkt_8021q_info *vlan); void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); +bool rndis_filter_opened(const struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); -int rndis_filter_device_add(struct hv_device *dev, - struct netvsc_device_info *info); +struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, + struct netvsc_device_info *info); void rndis_filter_update(struct netvsc_device *nvdev); void rndis_filter_device_remove(struct hv_device *dev, struct netvsc_device *nvdev); @@ -215,7 +218,8 @@ int rndis_filter_receive(struct net_device *ndev, struct vmbus_channel *channel, void *data, u32 buflen); -int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); +int rndis_filter_set_device_mac(struct netvsc_device *ndev, + const char *mac); void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); @@ -654,13 +658,10 @@ struct recv_comp_data { u32 status; }; -/* Netvsc Receive Slots Max */ -#define NETVSC_RECVSLOT_MAX (NETVSC_RECEIVE_BUFFER_SIZE / ETH_DATA_LEN + 1) - struct multi_recv_comp { - void *buf; /* queued receive completions */ - u32 first; /* first data entry */ - u32 next; /* next entry for writing */ + struct recv_comp_data *slots; + u32 first; /* first data entry */ + u32 next; /* next entry for writing */ }; struct netvsc_stats { @@ -679,6 +680,15 @@ struct netvsc_ethtool_stats { unsigned long tx_busy; }; +struct netvsc_vf_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 tx_dropped; +}; + struct netvsc_reconfig { struct list_head list; u32 event; @@ -712,18 +722,19 @@ struct net_device_context { /* State to manage the associated VF interface. */ struct net_device __rcu *vf_netdev; + struct netvsc_vf_pcpu_stats __percpu *vf_stats; + struct work_struct vf_takeover; /* 1: allocated, serial number is valid. 0: not allocated */ u32 vf_alloc; /* Serial number of the VF to team with */ u32 vf_serial; - - bool datapath; /* 0 - synthetic, 1 - VF nic */ }; /* Per channel data */ struct netvsc_channel { struct vmbus_channel *channel; + struct netvsc_device *net_device; const struct vmpacket_descriptor *desc; struct napi_struct napi; struct multi_send_data msd; @@ -746,7 +757,7 @@ struct netvsc_device { u32 recv_buf_size; u32 recv_buf_gpadl_handle; u32 recv_section_cnt; - struct nvsp_1_receive_buffer_section *recv_section; + u32 recv_completion_cnt; /* Send buffer allocated by us */ void *send_buf; @@ -775,8 +786,6 @@ struct netvsc_device { u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ - atomic_t num_outstanding_recvs; - atomic_t open_cnt; struct netvsc_channel chan_table[VRSS_CHANNEL_MAX]; @@ -784,18 +793,6 @@ struct netvsc_device { struct rcu_head rcu; }; -static inline struct netvsc_device * -net_device_to_netvsc_device(struct net_device *ndev) -{ - return ((struct net_device_context *)netdev_priv(ndev))->nvdev; -} - -static inline struct netvsc_device * -hv_device_to_netvsc_device(struct hv_device *device) -{ - return net_device_to_netvsc_device(hv_get_drvdata(device)); -} - /* NdisInitialize message */ struct rndis_initialize_request { u32 req_id; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index d18c3326a1f7..bffaf93d3cb0 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -29,6 +29,9 @@ #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/vmalloc.h> +#include <linux/rtnetlink.h> +#include <linux/prefetch.h> + #include <asm/sync_bitops.h> #include "hyperv_net.h" @@ -41,7 +44,7 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct hv_device *dev = net_device_ctx->device_ctx; - struct netvsc_device *nv_dev = net_device_ctx->nvdev; + struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev); struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; memset(init_pkt, 0, sizeof(struct nvsp_message)); @@ -57,8 +60,6 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) sizeof(struct nvsp_message), (unsigned long)init_pkt, VM_PKT_DATA_INBAND, 0); - - net_device_ctx->datapath = vf; } static struct netvsc_device *alloc_net_device(void) @@ -69,9 +70,6 @@ static struct netvsc_device *alloc_net_device(void) if (!net_device) return NULL; - net_device->chan_table[0].mrc.buf - = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); - init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; atomic_set(&net_device->open_cnt, 0); @@ -90,7 +88,7 @@ static void free_netvsc_device(struct rcu_head *head) int i; for (i = 0; i < VRSS_CHANNEL_MAX; i++) - vfree(nvdev->chan_table[i].mrc.buf); + vfree(nvdev->chan_table[i].mrc.slots); kfree(nvdev); } @@ -104,7 +102,8 @@ static void netvsc_destroy_buf(struct hv_device *device) { struct nvsp_message *revoke_packet; struct net_device *ndev = hv_get_drvdata(device); - struct netvsc_device *net_device = net_device_to_netvsc_device(ndev); + struct net_device_context *ndc = netdev_priv(ndev); + struct netvsc_device *net_device = rtnl_dereference(ndc->nvdev); int ret; /* @@ -168,12 +167,6 @@ static void netvsc_destroy_buf(struct hv_device *device) net_device->recv_buf = NULL; } - if (net_device->recv_section) { - net_device->recv_section_cnt = 0; - kfree(net_device->recv_section); - net_device->recv_section = NULL; - } - /* Deal with the send buffer we may have setup. * If we got a send section size, it means we received a * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent @@ -236,11 +229,26 @@ static void netvsc_destroy_buf(struct hv_device *device) kfree(net_device->send_section_map); } +int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) +{ + struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; + int node = cpu_to_node(nvchan->channel->target_cpu); + size_t size; + + size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data); + nvchan->mrc.slots = vzalloc_node(size, node); + if (!nvchan->mrc.slots) + nvchan->mrc.slots = vzalloc(size); + + return nvchan->mrc.slots ? 0 : -ENOMEM; +} + static int netvsc_init_buf(struct hv_device *device, struct netvsc_device *net_device) { int ret = 0; struct nvsp_message *init_packet; + struct nvsp_1_message_send_receive_buffer_complete *resp; struct net_device *ndev; size_t map_words; int node; @@ -297,43 +305,41 @@ static int netvsc_init_buf(struct hv_device *device, wait_for_completion(&net_device->channel_init_wait); /* Check the response */ - if (init_packet->msg.v1_msg. - send_recv_buf_complete.status != NVSP_STAT_SUCCESS) { - netdev_err(ndev, "Unable to complete receive buffer " - "initialization with NetVsp - status %d\n", - init_packet->msg.v1_msg. - send_recv_buf_complete.status); + resp = &init_packet->msg.v1_msg.send_recv_buf_complete; + if (resp->status != NVSP_STAT_SUCCESS) { + netdev_err(ndev, + "Unable to complete receive buffer initialization with NetVsp - status %d\n", + resp->status); ret = -EINVAL; goto cleanup; } /* Parse the response */ + netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n", + resp->num_sections, resp->sections[0].sub_alloc_size, + resp->sections[0].num_sub_allocs); - net_device->recv_section_cnt = init_packet->msg. - v1_msg.send_recv_buf_complete.num_sections; - - net_device->recv_section = kmemdup( - init_packet->msg.v1_msg.send_recv_buf_complete.sections, - net_device->recv_section_cnt * - sizeof(struct nvsp_1_receive_buffer_section), - GFP_KERNEL); - if (net_device->recv_section == NULL) { - ret = -EINVAL; - goto cleanup; - } + net_device->recv_section_cnt = resp->num_sections; /* * For 1st release, there should only be 1 section that represents the * entire receive buffer */ if (net_device->recv_section_cnt != 1 || - net_device->recv_section->offset != 0) { + resp->sections[0].offset != 0) { ret = -EINVAL; goto cleanup; } - /* Now setup the send buffer. - */ + /* Setup receive completion ring */ + net_device->recv_completion_cnt + = round_up(resp->sections[0].num_sub_allocs + 1, + PAGE_SIZE / sizeof(u64)); + ret = netvsc_alloc_recv_comp_ring(net_device, 0); + if (ret) + goto cleanup; + + /* Now setup the send buffer. */ net_device->send_buf = vzalloc_node(net_device->send_buf_size, node); if (!net_device->send_buf) net_device->send_buf = vzalloc(net_device->send_buf_size); @@ -550,7 +556,8 @@ void netvsc_device_remove(struct hv_device *device) { struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *net_device = net_device_ctx->nvdev; + struct netvsc_device *net_device + = rtnl_dereference(net_device_ctx->nvdev); int i; netvsc_disconnect_vsp(device); @@ -693,7 +700,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, u32 pend_size, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, - struct hv_page_buffer **pb, + struct hv_page_buffer *pb, struct sk_buff *skb) { char *start = net_device->send_buf; @@ -714,9 +721,9 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, } for (i = 0; i < page_count; i++) { - char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT); - u32 offset = (*pb)[i].offset; - u32 len = (*pb)[i].len; + char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT); + u32 offset = pb[i].offset; + u32 len = pb[i].len; memcpy(dest, (src + offset), len); msg_size += len; @@ -735,36 +742,32 @@ static inline int netvsc_send_pkt( struct hv_device *device, struct hv_netvsc_packet *packet, struct netvsc_device *net_device, - struct hv_page_buffer **pb, + struct hv_page_buffer *pb, struct sk_buff *skb) { struct nvsp_message nvmsg; - struct netvsc_channel *nvchan - = &net_device->chan_table[packet->q_idx]; + struct nvsp_1_message_send_rndis_packet * const rpkt = + &nvmsg.msg.v1_msg.send_rndis_pkt; + struct netvsc_channel * const nvchan = + &net_device->chan_table[packet->q_idx]; struct vmbus_channel *out_channel = nvchan->channel; struct net_device *ndev = hv_get_drvdata(device); struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); u64 req_id; int ret; - struct hv_page_buffer *pgbuf; u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound); nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; - if (skb != NULL) { - /* 0 is RMC_DATA; */ - nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0; - } else { - /* 1 is RMC_CONTROL; */ - nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1; - } + if (skb) + rpkt->channel_type = 0; /* 0 is RMC_DATA */ + else + rpkt->channel_type = 1; /* 1 is RMC_CONTROL */ - nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index = - packet->send_buf_index; + rpkt->send_buf_section_index = packet->send_buf_index; if (packet->send_buf_index == NETVSC_INVALID_INDEX) - nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; + rpkt->send_buf_section_size = 0; else - nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = - packet->total_data_buflen; + rpkt->send_buf_section_size = packet->total_data_buflen; req_id = (ulong)skb; @@ -772,11 +775,11 @@ static inline int netvsc_send_pkt( return -ENODEV; if (packet->page_buf_cnt) { - pgbuf = packet->cp_partial ? (*pb) + - packet->rmsg_pgcnt : (*pb); + if (packet->cp_partial) + pb += packet->rmsg_pgcnt; + ret = vmbus_sendpacket_pagebuffer_ctl(out_channel, - pgbuf, - packet->page_buf_cnt, + pb, packet->page_buf_cnt, &nvmsg, sizeof(struct nvsp_message), req_id, @@ -801,8 +804,10 @@ static inline int netvsc_send_pkt( ret = -ENOSPC; } } else { - netdev_err(ndev, "Unable to send packet %p ret %d\n", - packet, ret); + netdev_err(ndev, + "Unable to send packet pages %u len %u, ret %d\n", + packet->page_buf_cnt, packet->total_data_buflen, + ret); } return ret; @@ -820,13 +825,16 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, msdp->count = 0; } -int netvsc_send(struct hv_device *device, +/* RCU already held by caller */ +int netvsc_send(struct net_device_context *ndev_ctx, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, - struct hv_page_buffer **pb, + struct hv_page_buffer *pb, struct sk_buff *skb) { - struct netvsc_device *net_device = hv_device_to_netvsc_device(device); + struct netvsc_device *net_device + = rcu_dereference_bh(ndev_ctx->nvdev); + struct hv_device *device = ndev_ctx->device_ctx; int ret = 0; struct netvsc_channel *nvchan; u32 pktlen = packet->total_data_buflen, msd_len = 0; @@ -838,7 +846,7 @@ int netvsc_send(struct hv_device *device, bool xmit_more = (skb != NULL) ? skb->xmit_more : false; /* If device is rescinded, return error and packet will get dropped. */ - if (unlikely(net_device->destroy)) + if (unlikely(!net_device || net_device->destroy)) return -ENODEV; /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get @@ -943,130 +951,94 @@ send_now: return ret; } -static int netvsc_send_recv_completion(struct vmbus_channel *channel, - u64 transaction_id, u32 status) +/* Send pending recv completions */ +static int send_recv_completions(struct netvsc_channel *nvchan) { - struct nvsp_message recvcompMessage; + struct netvsc_device *nvdev = nvchan->net_device; + struct multi_recv_comp *mrc = &nvchan->mrc; + struct recv_comp_msg { + struct nvsp_message_header hdr; + u32 status; + } __packed; + struct recv_comp_msg msg = { + .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE, + }; int ret; - recvcompMessage.hdr.msg_type = - NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; - - recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status; - - /* Send the completion */ - ret = vmbus_sendpacket(channel, &recvcompMessage, - sizeof(struct nvsp_message_header) + sizeof(u32), - transaction_id, VM_PKT_COMP, 0); + while (mrc->first != mrc->next) { + const struct recv_comp_data *rcd + = mrc->slots + mrc->first; - return ret; -} + msg.status = rcd->status; + ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), + rcd->tid, VM_PKT_COMP, 0); + if (unlikely(ret)) + return ret; -static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx, - u32 *filled, u32 *avail) -{ - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; - u32 first = mrc->first; - u32 next = mrc->next; - - *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next : - next - first; - - *avail = NETVSC_RECVSLOT_MAX - *filled - 1; -} - -/* Read the first filled slot, no change to index */ -static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device - *nvdev, u16 q_idx) -{ - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; - u32 filled, avail; - - if (unlikely(!mrc->buf)) - return NULL; + if (++mrc->first == nvdev->recv_completion_cnt) + mrc->first = 0; + } - count_recv_comp_slot(nvdev, q_idx, &filled, &avail); - if (!filled) - return NULL; + /* receive completion ring has been emptied */ + if (unlikely(nvdev->destroy)) + wake_up(&nvdev->wait_drain); - return mrc->buf + mrc->first * sizeof(struct recv_comp_data); + return 0; } -/* Put the first filled slot back to available pool */ -static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx) +/* Count how many receive completions are outstanding */ +static void recv_comp_slot_avail(const struct netvsc_device *nvdev, + const struct multi_recv_comp *mrc, + u32 *filled, u32 *avail) { - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; - int num_recv; + u32 count = nvdev->recv_completion_cnt; - mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX; - - num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs); + if (mrc->next >= mrc->first) + *filled = mrc->next - mrc->first; + else + *filled = (count - mrc->first) + mrc->next; - if (nvdev->destroy && num_recv == 0) - wake_up(&nvdev->wait_drain); + *avail = count - *filled - 1; } -/* Check and send pending recv completions */ -static void netvsc_chk_recv_comp(struct netvsc_device *nvdev, - struct vmbus_channel *channel, u16 q_idx) +/* Add receive complete to ring to send to host. */ +static void enq_receive_complete(struct net_device *ndev, + struct netvsc_device *nvdev, u16 q_idx, + u64 tid, u32 status) { + struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx]; + struct multi_recv_comp *mrc = &nvchan->mrc; struct recv_comp_data *rcd; - int ret; - - while (true) { - rcd = read_recv_comp_slot(nvdev, q_idx); - if (!rcd) - break; + u32 filled, avail; - ret = netvsc_send_recv_completion(channel, rcd->tid, - rcd->status); - if (ret) - break; + recv_comp_slot_avail(nvdev, mrc, &filled, &avail); - put_recv_comp_slot(nvdev, q_idx); + if (unlikely(filled > NAPI_POLL_WEIGHT)) { + send_recv_completions(nvchan); + recv_comp_slot_avail(nvdev, mrc, &filled, &avail); } -} - -#define NETVSC_RCD_WATERMARK 80 - -/* Get next available slot */ -static inline struct recv_comp_data *get_recv_comp_slot( - struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx) -{ - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; - u32 filled, avail, next; - struct recv_comp_data *rcd; - - if (unlikely(!nvdev->recv_section)) - return NULL; - - if (unlikely(!mrc->buf)) - return NULL; - - if (atomic_read(&nvdev->num_outstanding_recvs) > - nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100) - netvsc_chk_recv_comp(nvdev, channel, q_idx); - count_recv_comp_slot(nvdev, q_idx, &filled, &avail); - if (!avail) - return NULL; - - next = mrc->next; - rcd = mrc->buf + next * sizeof(struct recv_comp_data); - mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX; + if (unlikely(!avail)) { + netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", + q_idx, tid); + return; + } - atomic_inc(&nvdev->num_outstanding_recvs); + rcd = mrc->slots + mrc->next; + rcd->tid = tid; + rcd->status = status; - return rcd; + if (++mrc->next == nvdev->recv_completion_cnt) + mrc->next = 0; } static int netvsc_receive(struct net_device *ndev, - struct netvsc_device *net_device, - struct net_device_context *net_device_ctx, - struct hv_device *device, - struct vmbus_channel *channel, - const struct vmpacket_descriptor *desc, - struct nvsp_message *nvsp) + struct netvsc_device *net_device, + struct net_device_context *net_device_ctx, + struct hv_device *device, + struct vmbus_channel *channel, + const struct vmpacket_descriptor *desc, + struct nvsp_message *nvsp) { const struct vmtransfer_page_packet_header *vmxferpage_packet = container_of(desc, const struct vmtransfer_page_packet_header, d); @@ -1075,7 +1047,6 @@ static int netvsc_receive(struct net_device *ndev, u32 status = NVSP_STAT_SUCCESS; int i; int count = 0; - int ret; /* Make sure this is a valid nvsp packet */ if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { @@ -1106,25 +1077,9 @@ static int netvsc_receive(struct net_device *ndev, channel, data, buflen); } - if (net_device->chan_table[q_idx].mrc.buf) { - struct recv_comp_data *rcd; + enq_receive_complete(ndev, net_device, q_idx, + vmxferpage_packet->d.trans_id, status); - rcd = get_recv_comp_slot(net_device, channel, q_idx); - if (rcd) { - rcd->tid = vmxferpage_packet->d.trans_id; - rcd->status = status; - } else { - netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", - q_idx, vmxferpage_packet->d.trans_id); - } - } else { - ret = netvsc_send_recv_completion(channel, - vmxferpage_packet->d.trans_id, - status); - if (ret) - netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n", - q_idx, vmxferpage_packet->d.trans_id, ret); - } return count; } @@ -1220,11 +1175,10 @@ int netvsc_poll(struct napi_struct *napi, int budget) { struct netvsc_channel *nvchan = container_of(napi, struct netvsc_channel, napi); + struct netvsc_device *net_device = nvchan->net_device; struct vmbus_channel *channel = nvchan->channel; struct hv_device *device = netvsc_channel_to_device(channel); - u16 q_idx = channel->offermsg.offer.sub_channel_index; struct net_device *ndev = hv_get_drvdata(device); - struct netvsc_device *net_device = net_device_to_netvsc_device(ndev); int work_done = 0; /* If starting a new interval */ @@ -1237,17 +1191,23 @@ int netvsc_poll(struct napi_struct *napi, int budget) nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); } - /* If receive ring was exhausted - * and not doing busy poll + /* if ring is empty, signal host */ + if (!nvchan->desc) + hv_pkt_iter_close(channel); + + /* If send of pending receive completions suceeded + * and did not exhaust NAPI budget this time + * and not doing busy poll * then re-enable host interrupts - * and reschedule if ring is not empty. + * and reschedule if ring is not empty. */ - if (work_done < budget && + if (send_recv_completions(nvchan) == 0 && + work_done < budget && napi_complete_done(napi, work_done) && - hv_end_read(&channel->inbound) != 0) + hv_end_read(&channel->inbound)) { + hv_begin_read(&channel->inbound); napi_reschedule(napi); - - netvsc_chk_recv_comp(net_device, channel, q_idx); + } /* Driver may overshoot since multiple packets per descriptor */ return min(work_done, budget); @@ -1259,10 +1219,15 @@ int netvsc_poll(struct napi_struct *napi, int budget) void netvsc_channel_cb(void *context) { struct netvsc_channel *nvchan = context; + struct vmbus_channel *channel = nvchan->channel; + struct hv_ring_buffer_info *rbi = &channel->inbound; + + /* preload first vmpacket descriptor */ + prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); if (napi_schedule_prep(&nvchan->napi)) { /* disable interupts from host */ - hv_begin_read(&nvchan->channel->inbound); + hv_begin_read(rbi); __napi_schedule(&nvchan->napi); } @@ -1272,8 +1237,8 @@ void netvsc_channel_cb(void *context) * netvsc_device_add - Callback when the device belonging to this * driver is added */ -int netvsc_device_add(struct hv_device *device, - const struct netvsc_device_info *device_info) +struct netvsc_device *netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *device_info) { int i, ret = 0; int ring_size = device_info->ring_size; @@ -1283,7 +1248,7 @@ int netvsc_device_add(struct hv_device *device, net_device = alloc_net_device(); if (!net_device) - return -ENOMEM; + return ERR_PTR(-ENOMEM); net_device->ring_size = ring_size; @@ -1303,6 +1268,7 @@ int netvsc_device_add(struct hv_device *device, struct netvsc_channel *nvchan = &net_device->chan_table[i]; nvchan->channel = device->channel; + nvchan->net_device = net_device; u64_stats_init(&nvchan->tx_stats.syncp); u64_stats_init(&nvchan->rx_stats.syncp); } @@ -1341,10 +1307,11 @@ int netvsc_device_add(struct hv_device *device, goto close; } - return ret; + return net_device; close: - netif_napi_del(&net_device->chan_table[0].napi); + RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); + napi_disable(&net_device->chan_table[0].napi); /* Now, we can close the channel safely */ vmbus_close(device->channel); @@ -1352,6 +1319,5 @@ close: cleanup: free_netvsc_device(&net_device->rcu); - return ret; - + return ERR_PTR(ret); } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 0d78727f1a14..eb0023f55fe1 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -33,6 +33,9 @@ #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/slab.h> +#include <linux/rtnetlink.h> +#include <linux/netpoll.h> + #include <net/arp.h> #include <net/route.h> #include <net/sock.h> @@ -69,7 +72,8 @@ static void netvsc_set_multicast_list(struct net_device *net) static int netvsc_open(struct net_device *net) { struct net_device_context *ndev_ctx = netdev_priv(net); - struct netvsc_device *nvdev = ndev_ctx->nvdev; + struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); struct rndis_device *rdev; int ret = 0; @@ -85,15 +89,29 @@ static int netvsc_open(struct net_device *net) netif_tx_wake_all_queues(net); rdev = nvdev->extension; - if (!rdev->link_state && !ndev_ctx->datapath) + + if (!rdev->link_state) netif_carrier_on(net); - return ret; + if (vf_netdev) { + /* Setting synthetic device up transparently sets + * slave as up. If open fails, then slave will be + * still be offline (and not used). + */ + ret = dev_open(vf_netdev); + if (ret) + netdev_warn(net, + "unable to open slave: %s: %d\n", + vf_netdev->name, ret); + } + return 0; } static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); + struct net_device *vf_netdev + = rtnl_dereference(net_device_ctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); int ret; u32 aread, i, msec = 10, retry = 0, retry_max = 20; @@ -139,6 +157,9 @@ static int netvsc_close(struct net_device *net) ret = -ETIMEDOUT; } + if (vf_netdev) + dev_close(vf_netdev); + return ret; } @@ -222,13 +243,11 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev, * * TODO support XPS - but get_xps_queue not exported */ -static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) +static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) { - unsigned int num_tx_queues = ndev->real_num_tx_queues; int q_idx = sk_tx_queue_get(skb->sk); - if (q_idx < 0 || skb->ooo_okay) { + if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { /* If forwarding a packet, we use the recorded queue when * available for better cache locality. */ @@ -238,12 +257,33 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); } - while (unlikely(q_idx >= num_tx_queues)) - q_idx -= num_tx_queues; - return q_idx; } +static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, + void *accel_priv, + select_queue_fallback_t fallback) +{ + struct net_device_context *ndc = netdev_priv(ndev); + struct net_device *vf_netdev; + u16 txq; + + rcu_read_lock(); + vf_netdev = rcu_dereference(ndc->vf_netdev); + if (vf_netdev) { + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + } else { + txq = netvsc_pick_tx(ndev, skb); + } + rcu_read_unlock(); + + while (unlikely(txq >= ndev->real_num_tx_queues)) + txq -= ndev->real_num_tx_queues; + + return txq; +} + static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, struct hv_page_buffer *pb) { @@ -280,9 +320,8 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, struct hv_netvsc_packet *packet, - struct hv_page_buffer **page_buf) + struct hv_page_buffer *pb) { - struct hv_page_buffer *pb = *page_buf; u32 slots_used = 0; char *data = skb->data; int frags = skb_shinfo(skb)->nr_frags; @@ -359,13 +398,40 @@ static u32 net_checksum_info(struct sk_buff *skb) if (ip6->nexthdr == IPPROTO_TCP) return TRANSPORT_INFO_IPV6_TCP; - else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) + else if (ip6->nexthdr == IPPROTO_UDP) return TRANSPORT_INFO_IPV6_UDP; } return TRANSPORT_INFO_NOT_IP; } +/* Send skb on the slave VF device. */ +static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, + struct sk_buff *skb) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + unsigned int len = skb->len; + int rc; + + skb->dev = vf_netdev; + skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + + rc = dev_queue_xmit(skb); + if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { + struct netvsc_vf_pcpu_stats *pcpu_stats + = this_cpu_ptr(ndev_ctx->vf_stats); + + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->tx_packets++; + pcpu_stats->tx_bytes += len; + u64_stats_update_end(&pcpu_stats->syncp); + } else { + this_cpu_inc(ndev_ctx->vf_stats->tx_dropped); + } + + return rc; +} + static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); @@ -374,11 +440,19 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) unsigned int num_data_pgs; struct rndis_message *rndis_msg; struct rndis_packet *rndis_pkt; + struct net_device *vf_netdev; u32 rndis_msg_size; struct rndis_per_packet_info *ppi; u32 hash; - struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; - struct hv_page_buffer *pb = page_buf; + struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; + + /* if VF is present and up then redirect packets + * already called with rcu_read_lock_bh + */ + vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); + if (vf_netdev && netif_running(vf_netdev) && + !netpoll_tx_running(net)) + return netvsc_vf_xmit(net, vf_netdev, skb); /* We will atmost need two pages to describe the rndis * header. We can only transmit MAX_PAGE_BUFFER_COUNT number @@ -524,12 +598,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) rndis_msg->msg_len += rndis_msg_size; packet->total_data_buflen = rndis_msg->msg_len; packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, - skb, packet, &pb); + skb, packet, pb); /* timestamp packet in software */ skb_tx_timestamp(skb); - ret = netvsc_send(net_device_ctx->device_ctx, packet, - rndis_msg, &pb, skb); + + ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb); if (likely(ret == 0)) return NETDEV_TX_OK; @@ -658,29 +732,18 @@ int netvsc_recv_callback(struct net_device *net, struct netvsc_device *net_device; u16 q_idx = channel->offermsg.offer.sub_channel_index; struct netvsc_channel *nvchan; - struct net_device *vf_netdev; struct sk_buff *skb; struct netvsc_stats *rx_stats; if (net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; - /* - * If necessary, inject this packet into the VF interface. - * On Hyper-V, multicast and brodcast packets are only delivered - * to the synthetic interface (after subjecting these to - * policy filters on the host). Deliver these via the VF - * interface in the guest. - */ rcu_read_lock(); net_device = rcu_dereference(net_device_ctx->nvdev); if (unlikely(!net_device)) goto drop; nvchan = &net_device->chan_table[q_idx]; - vf_netdev = rcu_dereference(net_device_ctx->vf_netdev); - if (vf_netdev && (vf_netdev->flags & IFF_UP)) - net = vf_netdev; /* Allocate a skb - TODO direct I/O to pages? */ skb = netvsc_alloc_recv_skb(net, &nvchan->napi, @@ -692,8 +755,7 @@ drop: return NVSP_STAT_FAIL; } - if (net != vf_netdev) - skb_record_rx_queue(skb, q_idx); + skb_record_rx_queue(skb, q_idx); /* * Even if injecting the packet, record the statistics @@ -736,39 +798,16 @@ static void netvsc_get_channels(struct net_device *net, } } -static int netvsc_set_queues(struct net_device *net, struct hv_device *dev, - u32 num_chn) -{ - struct netvsc_device_info device_info; - int ret; - - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = num_chn; - device_info.ring_size = ring_size; - device_info.max_num_vrss_chns = num_chn; - - ret = rndis_filter_device_add(dev, &device_info); - if (ret) - return ret; - - ret = netif_set_real_num_tx_queues(net, num_chn); - if (ret) - return ret; - - ret = netif_set_real_num_rx_queues(net, num_chn); - - return ret; -} - static int netvsc_set_channels(struct net_device *net, struct ethtool_channels *channels) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); - unsigned int count = channels->combined_count; - bool was_running; - int ret; + unsigned int orig, count = channels->combined_count; + struct netvsc_device_info device_info; + bool was_opened; + int ret = 0; /* We do not support separate count for rx, tx, or other */ if (count == 0 || @@ -787,25 +826,32 @@ static int netvsc_set_channels(struct net_device *net, if (count > nvdev->max_chn) return -EINVAL; - was_running = netif_running(net); - if (was_running) { - ret = netvsc_close(net); - if (ret) - return ret; - } + orig = nvdev->num_chn; + was_opened = rndis_filter_opened(nvdev); + if (was_opened) + rndis_filter_close(nvdev); rndis_filter_device_remove(dev, nvdev); - ret = netvsc_set_queues(net, dev, count); - if (ret == 0) - nvdev->num_chn = count; - else - netvsc_set_queues(net, dev, nvdev->num_chn); + memset(&device_info, 0, sizeof(device_info)); + device_info.num_chn = count; + device_info.ring_size = ring_size; - if (was_running) - ret = netvsc_open(net); + nvdev = rndis_filter_device_add(dev, &device_info); + if (!IS_ERR(nvdev)) { + netif_set_real_num_tx_queues(net, nvdev->num_chn); + netif_set_real_num_rx_queues(net, nvdev->num_chn); + } else { + ret = PTR_ERR(nvdev); + device_info.num_chn = orig; + rndis_filter_device_add(dev, &device_info); + } + + if (was_opened) + rndis_filter_open(nvdev); /* We may have missed link change notifications */ + net_device_ctx->last_reconfig = 0; schedule_delayed_work(&net_device_ctx->dwork, 0); return ret; @@ -869,41 +915,53 @@ static int netvsc_set_link_ksettings(struct net_device *dev, static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); + struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); struct hv_device *hdev = ndevctx->device_ctx; + int orig_mtu = ndev->mtu; struct netvsc_device_info device_info; - bool was_running; + bool was_opened; int ret = 0; if (!nvdev || nvdev->destroy) return -ENODEV; - was_running = netif_running(ndev); - if (was_running) { - ret = netvsc_close(ndev); + /* Change MTU of underlying VF netdev first. */ + if (vf_netdev) { + ret = dev_set_mtu(vf_netdev, mtu); if (ret) return ret; } + netif_device_detach(ndev); + was_opened = rndis_filter_opened(nvdev); + if (was_opened) + rndis_filter_close(nvdev); + memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; - device_info.max_num_vrss_chns = nvdev->num_chn; rndis_filter_device_remove(hdev, nvdev); - /* 'nvdev' has been freed in rndis_filter_device_remove() -> - * netvsc_device_remove () -> free_netvsc_device(). - * We mustn't access it before it's re-created in - * rndis_filter_device_add() -> netvsc_device_add(). - */ - ndev->mtu = mtu; - rndis_filter_device_add(hdev, &device_info); + nvdev = rndis_filter_device_add(hdev, &device_info); + if (IS_ERR(nvdev)) { + ret = PTR_ERR(nvdev); - if (was_running) - ret = netvsc_open(ndev); + /* Attempt rollback to original MTU */ + ndev->mtu = orig_mtu; + rndis_filter_device_add(hdev, &device_info); + + if (vf_netdev) + dev_set_mtu(vf_netdev, orig_mtu); + } + + if (was_opened) + rndis_filter_open(nvdev); + + netif_device_attach(ndev); /* We may have missed link change notifications */ schedule_delayed_work(&ndevctx->dwork, 0); @@ -911,16 +969,56 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) return ret; } +static void netvsc_get_vf_stats(struct net_device *net, + struct netvsc_vf_pcpu_stats *tot) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + int i; + + memset(tot, 0, sizeof(*tot)); + + for_each_possible_cpu(i) { + const struct netvsc_vf_pcpu_stats *stats + = per_cpu_ptr(ndev_ctx->vf_stats, i); + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + rx_packets = stats->rx_packets; + tx_packets = stats->tx_packets; + rx_bytes = stats->rx_bytes; + tx_bytes = stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + tot->rx_packets += rx_packets; + tot->tx_packets += tx_packets; + tot->rx_bytes += rx_bytes; + tot->tx_bytes += tx_bytes; + tot->tx_dropped += stats->tx_dropped; + } +} + static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); - int i; + struct netvsc_vf_pcpu_stats vf_tot; + int i; if (!nvdev) return; + netdev_stats_to_stats64(t, &net->stats); + + netvsc_get_vf_stats(net, &vf_tot); + t->rx_packets += vf_tot.rx_packets; + t->tx_packets += vf_tot.tx_packets; + t->rx_bytes += vf_tot.rx_bytes; + t->tx_bytes += vf_tot.tx_bytes; + t->tx_dropped += vf_tot.tx_dropped; + for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; const struct netvsc_stats *stats; @@ -949,16 +1047,12 @@ static void netvsc_get_stats64(struct net_device *net, t->rx_packets += packets; t->multicast += multicast; } - - t->tx_dropped = net->stats.tx_dropped; - t->tx_errors = net->stats.tx_errors; - - t->rx_dropped = net->stats.rx_dropped; - t->rx_errors = net->stats.rx_errors; } static int netvsc_set_mac_addr(struct net_device *ndev, void *p) { + struct net_device_context *ndc = netdev_priv(ndev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); struct sockaddr *addr = p; char save_adr[ETH_ALEN]; unsigned char save_aatype; @@ -971,7 +1065,10 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p) if (err != 0) return err; - err = rndis_filter_set_device_mac(ndev, addr->sa_data); + if (!nvdev) + return -ENODEV; + + err = rndis_filter_set_device_mac(nvdev, addr->sa_data); if (err != 0) { /* roll back to saved MAC */ memcpy(ndev->dev_addr, save_adr, ETH_ALEN); @@ -990,9 +1087,16 @@ static const struct { { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, +}, vf_stats[] = { + { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, + { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, + { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) }, + { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) }, + { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) }, }; #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) +#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) /* 4 statistics per queue (rx/tx packets/bytes) */ #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4) @@ -1007,7 +1111,9 @@ static int netvsc_get_sset_count(struct net_device *dev, int string_set) switch (string_set) { case ETH_SS_STATS: - return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev); + return NETVSC_GLOBAL_STATS_LEN + + NETVSC_VF_STATS_LEN + + NETVSC_QUEUE_STATS_LEN(nvdev); default: return -EINVAL; } @@ -1017,9 +1123,10 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); const void *nds = &ndc->eth_stats; const struct netvsc_stats *qstats; + struct netvsc_vf_pcpu_stats sum; unsigned int start; u64 packets, bytes; int i, j; @@ -1030,6 +1137,10 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); + netvsc_get_vf_stats(dev, &sum); + for (j = 0; j < NETVSC_VF_STATS_LEN; j++) + data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); + for (j = 0; j < nvdev->num_chn; j++) { qstats = &nvdev->chan_table[j].tx_stats; @@ -1055,7 +1166,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); u8 *p = data; int i; @@ -1064,11 +1175,16 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) - memcpy(p + i * ETH_GSTRING_LEN, - netvsc_stats[i].name, ETH_GSTRING_LEN); + for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) { + memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < ARRAY_SIZE(vf_stats); i++) { + memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } - p += i * ETH_GSTRING_LEN; for (i = 0; i < nvdev->num_chn; i++) { sprintf(p, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; @@ -1113,7 +1229,7 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); if (!nvdev) return -ENODEV; @@ -1163,7 +1279,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *ndev = rcu_dereference(ndc->nvdev); + struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); struct rndis_device *rndis_dev; int i; @@ -1308,8 +1424,7 @@ static void netvsc_link_change(struct work_struct *w) case RNDIS_STATUS_MEDIA_CONNECT: if (rdev->link_state) { rdev->link_state = false; - if (!ndev_ctx->datapath) - netif_carrier_on(net); + netif_carrier_on(net); netif_tx_wake_all_queues(net); } else { notify = true; @@ -1386,7 +1501,7 @@ static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) continue; /* not a netvsc device */ net_device_ctx = netdev_priv(dev); - if (net_device_ctx->nvdev == NULL) + if (!rtnl_dereference(net_device_ctx->nvdev)) continue; /* device is removed */ if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev) @@ -1396,6 +1511,108 @@ static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) return NULL; } +/* Called when VF is injecting data into network stack. + * Change the associated network device from VF to netvsc. + * note: already called with rcu_read_lock + */ +static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data); + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct netvsc_vf_pcpu_stats *pcpu_stats + = this_cpu_ptr(ndev_ctx->vf_stats); + + skb->dev = ndev; + + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += skb->len; + u64_stats_update_end(&pcpu_stats->syncp); + + return RX_HANDLER_ANOTHER; +} + +static int netvsc_vf_join(struct net_device *vf_netdev, + struct net_device *ndev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + int ret; + + ret = netdev_rx_handler_register(vf_netdev, + netvsc_vf_handle_frame, ndev); + if (ret != 0) { + netdev_err(vf_netdev, + "can not register netvsc VF receive handler (err = %d)\n", + ret); + goto rx_handler_failed; + } + + ret = netdev_upper_dev_link(vf_netdev, ndev); + if (ret != 0) { + netdev_err(vf_netdev, + "can not set master device %s (err = %d)\n", + ndev->name, ret); + goto upper_link_failed; + } + + /* set slave flag before open to prevent IPv6 addrconf */ + vf_netdev->flags |= IFF_SLAVE; + + schedule_work(&ndev_ctx->vf_takeover); + + netdev_info(vf_netdev, "joined to %s\n", ndev->name); + return 0; + +upper_link_failed: + netdev_rx_handler_unregister(vf_netdev); +rx_handler_failed: + return ret; +} + +static void __netvsc_vf_setup(struct net_device *ndev, + struct net_device *vf_netdev) +{ + int ret; + + call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); + + /* Align MTU of VF with master */ + ret = dev_set_mtu(vf_netdev, ndev->mtu); + if (ret) + netdev_warn(vf_netdev, + "unable to change mtu to %u\n", ndev->mtu); + + if (netif_running(ndev)) { + ret = dev_open(vf_netdev); + if (ret) + netdev_warn(vf_netdev, + "unable to open: %d\n", ret); + } +} + +/* Setup VF as slave of the synthetic device. + * Runs in workqueue to avoid recursion in netlink callbacks. + */ +static void netvsc_vf_setup(struct work_struct *w) +{ + struct net_device_context *ndev_ctx + = container_of(w, struct net_device_context, vf_takeover); + struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); + struct net_device *vf_netdev; + + if (!rtnl_trylock()) { + schedule_work(w); + return; + } + + vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + if (vf_netdev) + __netvsc_vf_setup(ndev, vf_netdev); + + rtnl_unlock(); +} + static int netvsc_register_vf(struct net_device *vf_netdev) { struct net_device *ndev; @@ -1419,10 +1636,12 @@ static int netvsc_register_vf(struct net_device *vf_netdev) if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) return NOTIFY_DONE; + if (netvsc_vf_join(vf_netdev, ndev) != 0) + return NOTIFY_DONE; + netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); - /* - * Take a reference on the module. - */ + + /* Prevent this module from being unloaded while VF is registered */ try_module_get(THIS_MODULE); dev_hold(vf_netdev); @@ -1432,9 +1651,9 @@ static int netvsc_register_vf(struct net_device *vf_netdev) static int netvsc_vf_up(struct net_device *vf_netdev) { - struct net_device *ndev; - struct netvsc_device *netvsc_dev; struct net_device_context *net_device_ctx; + struct netvsc_device *netvsc_dev; + struct net_device *ndev; ndev = get_netvsc_byref(vf_netdev); if (!ndev) @@ -1442,33 +1661,24 @@ static int netvsc_vf_up(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); + if (!netvsc_dev) + return NOTIFY_DONE; - netdev_info(ndev, "VF up: %s\n", vf_netdev->name); - - /* - * Open the device before switching data path. - */ + /* Bump refcount when datapath is acvive - Why? */ rndis_filter_open(netvsc_dev); - /* - * notify the host to switch the data path. - */ + /* notify the host to switch the data path. */ netvsc_switch_datapath(ndev, true); netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name); - netif_carrier_off(ndev); - - /* Now notify peers through VF device. */ - call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev); - return NOTIFY_OK; } static int netvsc_vf_down(struct net_device *vf_netdev) { - struct net_device *ndev; - struct netvsc_device *netvsc_dev; struct net_device_context *net_device_ctx; + struct netvsc_device *netvsc_dev; + struct net_device *ndev; ndev = get_netvsc_byref(vf_netdev); if (!ndev) @@ -1476,15 +1686,12 @@ static int netvsc_vf_down(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); + if (!netvsc_dev) + return NOTIFY_DONE; - netdev_info(ndev, "VF down: %s\n", vf_netdev->name); netvsc_switch_datapath(ndev, false); netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); rndis_filter_close(netvsc_dev); - netif_carrier_on(ndev); - - /* Now notify peers through netvsc device. */ - call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev); return NOTIFY_OK; } @@ -1499,9 +1706,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); + cancel_work_sync(&net_device_ctx->vf_takeover); netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); + netdev_upper_dev_unlink(vf_netdev, ndev); RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); dev_put(vf_netdev); module_put(THIS_MODULE); @@ -1515,12 +1724,12 @@ static int netvsc_probe(struct hv_device *dev, struct net_device_context *net_device_ctx; struct netvsc_device_info device_info; struct netvsc_device *nvdev; - int ret; + int ret = -ENOMEM; net = alloc_etherdev_mq(sizeof(struct net_device_context), VRSS_CHANNEL_MAX); if (!net) - return -ENOMEM; + goto no_net; netif_carrier_off(net); @@ -1539,6 +1748,12 @@ static int netvsc_probe(struct hv_device *dev, spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); + INIT_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); + + net_device_ctx->vf_stats + = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); + if (!net_device_ctx->vf_stats) + goto no_stats; net->netdev_ops = &device_ops; net->ethtool_ops = ðtool_ops; @@ -1551,13 +1766,14 @@ static int netvsc_probe(struct hv_device *dev, memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = VRSS_CHANNEL_DEFAULT; - ret = rndis_filter_device_add(dev, &device_info); - if (ret != 0) { + + nvdev = rndis_filter_device_add(dev, &device_info); + if (IS_ERR(nvdev)) { + ret = PTR_ERR(nvdev); netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - free_netdev(net); - hv_set_drvdata(dev, NULL); - return ret; + goto rndis_failed; } + memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); /* hw_features computed in rndis_filter_device_add */ @@ -1566,11 +1782,11 @@ static int netvsc_probe(struct hv_device *dev, NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; net->vlan_features = net->features; - /* RCU not necessary here, device not registered */ - nvdev = net_device_ctx->nvdev; netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); + netdev_lockdep_set_classes(net); + /* MTU range: 68 - 1500 or 65521 */ net->min_mtu = NETVSC_MTU_MIN; if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) @@ -1581,11 +1797,20 @@ static int netvsc_probe(struct hv_device *dev, ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); - rndis_filter_device_remove(dev, nvdev); - free_netdev(net); + goto register_failed; } return ret; + +register_failed: + rndis_filter_device_remove(dev, nvdev); +rndis_failed: + free_percpu(net_device_ctx->vf_stats); +no_stats: + hv_set_drvdata(dev, NULL); + free_netdev(net); +no_net: + return ret; } static int netvsc_remove(struct hv_device *dev) @@ -1611,13 +1836,15 @@ static int netvsc_remove(struct hv_device *dev) * removed. Also blocks mtu and channel changes. */ rtnl_lock(); - rndis_filter_device_remove(dev, ndev_ctx->nvdev); + rndis_filter_device_remove(dev, + rtnl_dereference(ndev_ctx->nvdev)); rtnl_unlock(); unregister_netdev(net); hv_set_drvdata(dev, NULL); + free_percpu(ndev_ctx->vf_stats); free_netdev(net); return 0; } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index d6308ffda53e..36e9ee82ec6f 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -28,6 +28,7 @@ #include <linux/if_vlan.h> #include <linux/nls.h> #include <linux/vmalloc.h> +#include <linux/rtnetlink.h> #include "hyperv_net.h" @@ -213,11 +214,11 @@ static void dump_rndis_message(struct hv_device *hv_dev, static int rndis_filter_send_request(struct rndis_device *dev, struct rndis_request *req) { - int ret; struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; struct hv_page_buffer *pb = page_buf; struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); + int ret; /* Setup the packet to send it */ packet = &req->pkt; @@ -243,7 +244,10 @@ static int rndis_filter_send_request(struct rndis_device *dev, pb[0].len; } - ret = netvsc_send(net_device_ctx->device_ctx, packet, NULL, &pb, NULL); + rcu_read_lock_bh(); + ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL); + rcu_read_unlock_bh(); + return ret; } @@ -443,8 +447,9 @@ int rndis_filter_receive(struct net_device *ndev, return 0; } -static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, - void *result, u32 *result_size) +static int rndis_filter_query_device(struct rndis_device *dev, + struct netvsc_device *nvdev, + u32 oid, void *result, u32 *result_size) { struct rndis_request *request; u32 inresult_size = *result_size; @@ -471,8 +476,6 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, query->dev_vc_handle = 0; if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) { - struct net_device_context *ndevctx = netdev_priv(dev->ndev); - struct netvsc_device *nvdev = ndevctx->nvdev; struct ndis_offload *hwcaps; u32 nvsp_version = nvdev->nvsp_version; u8 ndis_rev; @@ -541,14 +544,15 @@ cleanup: /* Get the hardware offload capabilities */ static int -rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps) +rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device, + struct ndis_offload *caps) { u32 caps_len = sizeof(*caps); int ret; memset(caps, 0, sizeof(*caps)); - ret = rndis_filter_query_device(dev, + ret = rndis_filter_query_device(dev, net_device, OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES, caps, &caps_len); if (ret) @@ -577,11 +581,12 @@ rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps) return 0; } -static int rndis_filter_query_device_mac(struct rndis_device *dev) +static int rndis_filter_query_device_mac(struct rndis_device *dev, + struct netvsc_device *net_device) { u32 size = ETH_ALEN; - return rndis_filter_query_device(dev, + return rndis_filter_query_device(dev, net_device, RNDIS_OID_802_3_PERMANENT_ADDRESS, dev->hw_mac_adr, &size); } @@ -589,9 +594,9 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev) #define NWADR_STR "NetworkAddress" #define NWADR_STRLEN 14 -int rndis_filter_set_device_mac(struct net_device *ndev, char *mac) +int rndis_filter_set_device_mac(struct netvsc_device *nvdev, + const char *mac) { - struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev); struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; @@ -645,11 +650,8 @@ int rndis_filter_set_device_mac(struct net_device *ndev, char *mac) wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status != RNDIS_STATUS_SUCCESS) { - netdev_err(ndev, "Fail to set MAC on host side:0x%x\n", - set_complete->status); - ret = -EINVAL; - } + if (set_complete->status != RNDIS_STATUS_SUCCESS) + ret = -EIO; cleanup: put_rndis_request(rdev, request); @@ -658,9 +660,9 @@ cleanup: static int rndis_filter_set_offload_params(struct net_device *ndev, + struct netvsc_device *nvdev, struct ndis_offload_params *req_offloads) { - struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev); struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; @@ -782,27 +784,27 @@ cleanup: return ret; } -static int rndis_filter_query_device_link_status(struct rndis_device *dev) +static int rndis_filter_query_device_link_status(struct rndis_device *dev, + struct netvsc_device *net_device) { u32 size = sizeof(u32); u32 link_status; - int ret; - - ret = rndis_filter_query_device(dev, - RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, - &link_status, &size); - return ret; + return rndis_filter_query_device(dev, net_device, + RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, + &link_status, &size); } -static int rndis_filter_query_link_speed(struct rndis_device *dev) +static int rndis_filter_query_link_speed(struct rndis_device *dev, + struct netvsc_device *net_device) { u32 size = sizeof(u32); u32 link_speed; struct net_device_context *ndc; int ret; - ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_LINK_SPEED, + ret = rndis_filter_query_device(dev, net_device, + RNDIS_OID_GEN_LINK_SPEED, &link_speed, &size); if (!ret) { @@ -871,14 +873,14 @@ void rndis_filter_update(struct netvsc_device *nvdev) schedule_work(&rdev->mcast_work); } -static int rndis_filter_init_device(struct rndis_device *dev) +static int rndis_filter_init_device(struct rndis_device *dev, + struct netvsc_device *nvdev) { struct rndis_request *request; struct rndis_initialize_request *init; struct rndis_initialize_complete *init_complete; u32 status; int ret; - struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev); request = get_rndis_request(dev, RNDIS_MSG_INIT, RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); @@ -926,12 +928,12 @@ static bool netvsc_device_idle(const struct netvsc_device *nvdev) { int i; - if (atomic_read(&nvdev->num_outstanding_recvs) > 0) - return false; - for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; + if (nvchan->mrc.first != nvchan->mrc.next) + return false; + if (atomic_read(&nvchan->queue_sends) > 0) return false; } @@ -944,7 +946,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev) struct rndis_request *request; struct rndis_halt_request *halt; struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); - struct netvsc_device *nvdev = net_device_ctx->nvdev; + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, @@ -1015,20 +1017,20 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) { struct net_device *ndev = hv_get_drvdata(new_sc->primary_channel->device_obj); - struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev); + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct netvsc_device *nvscdev; u16 chn_index = new_sc->offermsg.offer.sub_channel_index; struct netvsc_channel *nvchan; int ret; - if (chn_index >= nvscdev->num_chn) + /* This is safe because this callback only happens when + * new device is being setup and waiting on the channel_init_wait. + */ + nvscdev = rcu_dereference_raw(ndev_ctx->nvdev); + if (!nvscdev || chn_index >= nvscdev->num_chn) return; nvchan = nvscdev->chan_table + chn_index; - nvchan->mrc.buf - = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); - - if (!nvchan->mrc.buf) - return; /* Because the device uses NAPI, all the interrupt batching and * control is done via Net softirq, not the channel handling @@ -1052,8 +1054,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) wake_up(&nvscdev->subchan_open); } -int rndis_filter_device_add(struct hv_device *dev, - struct netvsc_device_info *device_info) +struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, + struct netvsc_device_info *device_info) { struct net_device *net = hv_get_drvdata(dev); struct net_device_context *net_device_ctx = netdev_priv(net); @@ -1072,21 +1074,20 @@ int rndis_filter_device_add(struct hv_device *dev, rndis_device = get_rndis_device(); if (!rndis_device) - return -ENODEV; + return ERR_PTR(-ENODEV); /* * Let the inner driver handle this first to create the netvsc channel * NOTE! Once the channel is created, we may get a receive callback * (RndisFilterOnReceive()) before this call is completed */ - ret = netvsc_device_add(dev, device_info); - if (ret != 0) { + net_device = netvsc_device_add(dev, device_info); + if (IS_ERR(net_device)) { kfree(rndis_device); - return ret; + return net_device; } /* Initialize the rndis device */ - net_device = net_device_ctx->nvdev; net_device->max_chn = 1; net_device->num_chn = 1; @@ -1094,35 +1095,29 @@ int rndis_filter_device_add(struct hv_device *dev, rndis_device->ndev = net; /* Send the rndis initialization message */ - ret = rndis_filter_init_device(rndis_device); - if (ret != 0) { - rndis_filter_device_remove(dev, net_device); - return ret; - } + ret = rndis_filter_init_device(rndis_device, net_device); + if (ret != 0) + goto err_dev_remv; /* Get the MTU from the host */ size = sizeof(u32); - ret = rndis_filter_query_device(rndis_device, + ret = rndis_filter_query_device(rndis_device, net_device, RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, &mtu, &size); if (ret == 0 && size == sizeof(u32) && mtu < net->mtu) net->mtu = mtu; /* Get the mac address */ - ret = rndis_filter_query_device_mac(rndis_device); - if (ret != 0) { - rndis_filter_device_remove(dev, net_device); - return ret; - } + ret = rndis_filter_query_device_mac(rndis_device, net_device); + if (ret != 0) + goto err_dev_remv; memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); /* Find HW offload capabilities */ - ret = rndis_query_hwcaps(rndis_device, &hwcaps); - if (ret != 0) { - rndis_filter_device_remove(dev, net_device); - return ret; - } + ret = rndis_query_hwcaps(rndis_device, net_device, &hwcaps); + if (ret != 0) + goto err_dev_remv; /* A value of zero means "no change"; now turn on what we want. */ memset(&offloads, 0, sizeof(struct ndis_offload_params)); @@ -1177,24 +1172,24 @@ int rndis_filter_device_add(struct hv_device *dev, netif_set_gso_max_size(net, gso_max_size); - ret = rndis_filter_set_offload_params(net, &offloads); + ret = rndis_filter_set_offload_params(net, net_device, &offloads); if (ret) goto err_dev_remv; - rndis_filter_query_device_link_status(rndis_device); + rndis_filter_query_device_link_status(rndis_device, net_device); netdev_dbg(net, "Device MAC %pM link state %s\n", rndis_device->hw_mac_adr, rndis_device->link_state ? "down" : "up"); if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) - return 0; + return net_device; - rndis_filter_query_link_speed(rndis_device); + rndis_filter_query_link_speed(rndis_device, net_device); /* vRSS setup */ memset(&rsscap, 0, rsscap_size); - ret = rndis_filter_query_device(rndis_device, + ret = rndis_filter_query_device(rndis_device, net_device, OID_GEN_RECEIVE_SCALE_CAPABILITIES, &rsscap, &rsscap_size); if (ret || rsscap.num_recv_que < 2) @@ -1222,7 +1217,16 @@ int rndis_filter_device_add(struct hv_device *dev, atomic_set(&net_device->open_chn, 1); num_rss_qs = net_device->num_chn - 1; if (num_rss_qs == 0) - return 0; + return net_device; + + for (i = 1; i < net_device->num_chn; i++) { + ret = netvsc_alloc_recv_comp_ring(net_device, i); + if (ret) { + while (--i != 0) + vfree(net_device->chan_table[i].mrc.slots); + goto out; + } + } vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); @@ -1262,11 +1266,11 @@ out: net_device->num_chn = 1; } - return 0; /* return 0 because primary channel can be used alone */ + return net_device; err_dev_remv: rndis_filter_device_remove(dev, net_device); - return ret; + return ERR_PTR(ret); } void rndis_filter_device_remove(struct hv_device *dev, @@ -1304,3 +1308,8 @@ int rndis_filter_close(struct netvsc_device *nvdev) return rndis_filter_close_device(nvdev->extension); } + +bool rndis_filter_opened(const struct netvsc_device *nvdev) +{ + return atomic_read(&nvdev->open_cnt) > 0; +} diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index a626c539fb17..326243fae7e2 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -66,6 +66,7 @@ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/workqueue.h> +#include <linux/interrupt.h> #include <net/ieee802154_netdev.h> #include <net/mac802154.h> diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index 7d334963dc08..ee7084b2d52d 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -1330,7 +1330,8 @@ static int mrf24j40_probe(struct spi_device *spi) if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) { dev_warn(&spi->dev, "spi clock above possible maximum: %d", MAX_SPI_SPEED_HZ); - return -EINVAL; + ret = -EINVAL; + goto err_register_device; } ret = mrf24j40_hw_init(devrec); diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 8dab74a81303..58a9f990b553 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -169,7 +169,7 @@ static void ipvlan_port_destroy(struct net_device *dev) #define IPVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ - NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c index 22f133ea8d7b..5dea2063dbc8 100644 --- a/drivers/net/ipvlan/ipvtap.c +++ b/drivers/net/ipvlan/ipvtap.c @@ -24,7 +24,7 @@ #include <linux/virtio_net.h> #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6 | NETIF_F_UFO) + NETIF_F_TSO6) static dev_t ipvtap_major; static struct cdev ipvtap_cdev; diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 6f3c805f7211..723e49bc4baa 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -72,7 +72,7 @@ static int qos_mtt_bits = 0; /* These are the currently known IrDA USB dongles. Add new dongles here */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* ACTiSYS Corp., ACT-IR2000U FIR-USB Adapter */ { USB_DEVICE(0x9c4, 0x011), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW }, /* Look like ACTiSYS, Report : IBM Corp., IBM UltraPort IrDA */ diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c index 24c0f169a7b1..4fd4ac2fe09f 100644 --- a/drivers/net/irda/kingsun-sir.c +++ b/drivers/net/irda/kingsun-sir.c @@ -85,7 +85,7 @@ #define KING_PRODUCT_ID 0x4200 /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ { USB_DEVICE(KING_VENDOR_ID, KING_PRODUCT_ID) }, { } diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c index 3affded3e30d..8025741e7586 100644 --- a/drivers/net/irda/ks959-sir.c +++ b/drivers/net/irda/ks959-sir.c @@ -133,7 +133,7 @@ #define KS959_PRODUCT_ID 0x4959 /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ {USB_DEVICE(KS959_VENDOR_ID, KS959_PRODUCT_ID)}, {} diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c index 741452c7ce35..d2a0755df596 100644 --- a/drivers/net/irda/ksdazzle-sir.c +++ b/drivers/net/irda/ksdazzle-sir.c @@ -97,7 +97,7 @@ #define KSDAZZLE_PRODUCT_ID 0x4100 /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ {USB_DEVICE(KSDAZZLE_VENDOR_ID, KSDAZZLE_PRODUCT_ID)}, {} diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 765de3bedb88..c3f0b254b344 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -66,7 +66,7 @@ #define MCS_VENDOR_ID 0x9710 #define MCS_PRODUCT_ID 0x7780 -static struct usb_device_id mcs_table[] = { +static const struct usb_device_id mcs_table[] = { /* MosChip Corp., MCS7780 FIR-USB Adapter */ {USB_DEVICE(MCS_VENDOR_ID, MCS_PRODUCT_ID)}, {}, diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 7ee514879531..ee2cb70b688d 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -183,7 +183,7 @@ struct stir_cb { /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* SigmaTel, Inc, STIr4200 IrDA/USB Bridge */ { USB_DEVICE(0x066f, 0x4200) }, { } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 0f581ee74fe4..ca35c6ba7947 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -841,7 +841,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ - NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_LRO | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 91e7b19bbf86..c2d0ea2fb019 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -49,7 +49,7 @@ static struct class macvtap_class = { static struct cdev macvtap_cdev; #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6 | NETIF_F_UFO) + NETIF_F_TSO6) static void macvtap_count_tx_dropped(struct tap_dev *tap) { diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 928fd892f167..bf73969a9d2b 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -106,6 +106,16 @@ config MDIO_HISI_FEMAC This module provides a driver for the MDIO busses found in the Hisilicon SoC that have an Fast Ethernet MAC. +config MDIO_I2C + tristate + depends on I2C + help + Support I2C based PHYs. This provides a MDIO bus bridged + to I2C to allow PHYs connected in I2C mode to be accessed + using the existing infrastructure. + + This is library mode. + config MDIO_MOXART tristate "MOXA ART MDIO interface support" depends on ARCH_MOXART @@ -159,6 +169,16 @@ menuconfig PHYLIB devices. This option provides infrastructure for managing PHY devices. +config PHYLINK + tristate + depends on NETDEVICES + select PHYLIB + select SWPHY + help + PHYlink models the link between the PHY and MAC, allowing fixed + configuration links, PHYs, and Serdes links with MAC level + autonegotiation modes. + if PHYLIB config SWPHY @@ -180,6 +200,11 @@ config LED_TRIGGER_PHY comment "MII PHY device drivers" +config SFP + tristate "SFP cage support" + depends on I2C && PHYLINK + select MDIO_I2C + config AMD_PHY tristate "AMD PHYs" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 8e9b9f349384..7237255bad68 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -18,6 +18,7 @@ endif libphy-$(CONFIG_SWPHY) += swphy.o libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o +obj-$(CONFIG_PHYLINK) += phylink.o obj-$(CONFIG_PHYLIB) += libphy.o obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o @@ -30,12 +31,17 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o +obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o +obj-$(CONFIG_SFP) += sfp.o +sfp-obj-$(CONFIG_SFP) += sfp-bus.o +obj-y += $(sfp-obj-y) $(sfp-obj-m) + obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o obj-$(CONFIG_AT803X_PHY) += at803x.o diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 5d314f143aea..15cbcdba618a 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -55,43 +55,35 @@ #define MII_M1011_IMASK_INIT 0x6400 #define MII_M1011_IMASK_CLEAR 0x0000 -#define MII_M1011_PHY_SCR 0x10 -#define MII_M1011_PHY_SCR_MDI 0x0000 -#define MII_M1011_PHY_SCR_MDI_X 0x0020 -#define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 - -#define MII_M1145_PHY_EXT_SR 0x1b -#define MII_M1145_PHY_EXT_CR 0x14 -#define MII_M1145_RGMII_RX_DELAY 0x0080 -#define MII_M1145_RGMII_TX_DELAY 0x0002 -#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 -#define MII_M1145_HWCFG_MODE_MASK 0xf -#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 - -#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 -#define MII_M1145_HWCFG_MODE_MASK 0xf -#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 +#define MII_M1011_PHY_SCR 0x10 +#define MII_M1011_PHY_SCR_DOWNSHIFT_EN BIT(11) +#define MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT 12 +#define MII_M1011_PHY_SRC_DOWNSHIFT_MASK 0x7800 +#define MII_M1011_PHY_SCR_MDI (0x0 << 5) +#define MII_M1011_PHY_SCR_MDI_X (0x1 << 5) +#define MII_M1011_PHY_SCR_AUTO_CROSS (0x3 << 5) #define MII_M1111_PHY_LED_CONTROL 0x18 #define MII_M1111_PHY_LED_DIRECT 0x4100 #define MII_M1111_PHY_LED_COMBINE 0x411c #define MII_M1111_PHY_EXT_CR 0x14 -#define MII_M1111_RX_DELAY 0x80 -#define MII_M1111_TX_DELAY 0x2 +#define MII_M1111_RGMII_RX_DELAY BIT(7) +#define MII_M1111_RGMII_TX_DELAY BIT(1) #define MII_M1111_PHY_EXT_SR 0x1b #define MII_M1111_HWCFG_MODE_MASK 0xf -#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb #define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3 #define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4 +#define MII_M1111_HWCFG_MODE_RTBI 0x7 #define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9 -#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000 -#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000 +#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb +#define MII_M1111_HWCFG_FIBER_COPPER_RES BIT(13) +#define MII_M1111_HWCFG_FIBER_COPPER_AUTO BIT(15) #define MII_88E1121_PHY_MSCR_REG 21 #define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5) #define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) -#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4)) +#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(BIT(5) | BIT(4))) #define MII_88E1121_MISC_TEST 0x1a #define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00 @@ -108,24 +100,24 @@ #define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) /* Copper Specific Interrupt Enable Register */ -#define MII_88E1318S_PHY_CSIER 0x12 +#define MII_88E1318S_PHY_CSIER 0x12 /* WOL Event Interrupt Enable */ -#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7) +#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7) /* LED Timer Control Register */ -#define MII_88E1318S_PHY_LED_TCR 0x12 -#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15) -#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7) -#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11) +#define MII_88E1318S_PHY_LED_TCR 0x12 +#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15) +#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7) +#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11) /* Magic Packet MAC address registers */ -#define MII_88E1318S_PHY_MAGIC_PACKET_WORD2 0x17 -#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18 -#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19 +#define MII_88E1318S_PHY_MAGIC_PACKET_WORD2 0x17 +#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18 +#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19 -#define MII_88E1318S_PHY_WOL_CTRL 0x10 -#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) -#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) +#define MII_88E1318S_PHY_WOL_CTRL 0x10 +#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) +#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) #define MII_88E1121_PHY_LED_CTRL 16 #define MII_88E1121_PHY_LED_DEF 0x0030 @@ -138,8 +130,6 @@ #define MII_M1011_PHY_STATUS_RESOLVED 0x0800 #define MII_M1011_PHY_STATUS_LINK 0x0400 -#define MII_M1116R_CONTROL_REG_MAC 21 - #define MII_88E3016_PHY_SPEC_CTRL 0x10 #define MII_88E3016_DISABLE_SCRAMBLER 0x0200 #define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030 @@ -152,7 +142,7 @@ #define LPA_FIBER_1000HALF 0x40 #define LPA_FIBER_1000FULL 0x20 -#define LPA_PAUSE_FIBER 0x180 +#define LPA_PAUSE_FIBER 0x180 #define LPA_PAUSE_ASYM_FIBER 0x100 #define ADVERTISE_FIBER_1000HALF 0x40 @@ -274,6 +264,23 @@ static int marvell_set_polarity(struct phy_device *phydev, int polarity) return 0; } +static int marvell_set_downshift(struct phy_device *phydev, bool enable, + u8 retries) +{ + int reg; + + reg = phy_read(phydev, MII_M1011_PHY_SCR); + if (reg < 0) + return reg; + + reg &= MII_M1011_PHY_SRC_DOWNSHIFT_MASK; + reg |= ((retries - 1) << MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT); + if (enable) + reg |= MII_M1011_PHY_SCR_DOWNSHIFT_EN; + + return phy_write(phydev, MII_M1011_PHY_SCR, reg); +} + static int marvell_config_aneg(struct phy_device *phydev) { int err; @@ -292,17 +299,11 @@ static int marvell_config_aneg(struct phy_device *phydev) return err; if (phydev->autoneg != AUTONEG_ENABLE) { - int bmcr; - /* A write to speed/duplex bits (that is performed by * genphy_config_aneg() call above) must be followed by * a software reset. Otherwise, the write has no effect. */ - bmcr = phy_read(phydev, MII_BMCR); - if (bmcr < 0) - return bmcr; - - err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; } @@ -318,8 +319,7 @@ static int m88e1101_config_aneg(struct phy_device *phydev) * that certain registers get written in order * to restart autonegotiation */ - err = phy_write(phydev, MII_BMCR, BMCR_RESET); - + err = genphy_soft_reset(phydev); if (err < 0) return err; @@ -354,7 +354,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev) * that certain registers get written in order * to restart autonegotiation */ - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + err = genphy_soft_reset(phydev); err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) @@ -370,17 +370,11 @@ static int m88e1111_config_aneg(struct phy_device *phydev) return err; if (phydev->autoneg != AUTONEG_ENABLE) { - int bmcr; - /* A write to speed/duplex bits (that is performed by * genphy_config_aneg() call above) must be followed by * a software reset. Otherwise, the write has no effect. */ - bmcr = phy_read(phydev, MII_BMCR); - if (bmcr < 0) - return bmcr; - - err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; } @@ -466,7 +460,7 @@ static int marvell_of_reg_init(struct phy_device *phydev) } #endif /* CONFIG_OF_MDIO */ -static int m88e1121_config_aneg(struct phy_device *phydev) +static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev) { int err, oldpage, mscr; @@ -474,31 +468,45 @@ static int m88e1121_config_aneg(struct phy_device *phydev) if (oldpage < 0) return oldpage; - if (phy_interface_is_rgmii(phydev)) { - mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) & - MII_88E1121_PHY_MSCR_DELAY_MASK; - - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) - mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY | - MII_88E1121_PHY_MSCR_TX_DELAY); - else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) - mscr |= MII_88E1121_PHY_MSCR_RX_DELAY; - else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) - mscr |= MII_88E1121_PHY_MSCR_TX_DELAY; - - err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); - if (err < 0) - return err; + mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG); + if (mscr < 0) { + err = mscr; + goto out; } + mscr &= MII_88E1121_PHY_MSCR_DELAY_MASK; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY | + MII_88E1121_PHY_MSCR_TX_DELAY); + else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + mscr |= MII_88E1121_PHY_MSCR_RX_DELAY; + else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + mscr |= MII_88E1121_PHY_MSCR_TX_DELAY; + + err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); + +out: marvell_set_page(phydev, oldpage); - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + return err; +} + +static int m88e1121_config_aneg(struct phy_device *phydev) +{ + int err = 0; + + if (phy_interface_is_rgmii(phydev)) { + err = m88e1121_config_aneg_rgmii_delays(phydev); + if (err) + return err; + } + + err = genphy_soft_reset(phydev); if (err < 0) return err; - err = phy_write(phydev, MII_M1011_PHY_SCR, - MII_M1011_PHY_SCR_AUTO_CROSS); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; @@ -596,7 +604,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev) if (changed == 0) { /* Advertisement hasn't changed, but maybe aneg was never on to - * begin with? Or maybe phy was isolated? + * begin with? Or maybe phy was isolated? */ int ctl = phy_read(phydev, MII_BMCR); @@ -653,12 +661,9 @@ static int marvell_config_init(struct phy_device *phydev) static int m88e1116r_config_init(struct phy_device *phydev) { - int temp; int err; - temp = phy_read(phydev, MII_BMCR); - temp |= BMCR_RESET; - err = phy_write(phydev, MII_BMCR, temp); + err = genphy_soft_reset(phydev); if (err < 0) return err; @@ -668,35 +673,22 @@ static int m88e1116r_config_init(struct phy_device *phydev) if (err < 0) return err; - temp = phy_read(phydev, MII_M1011_PHY_SCR); - temp |= (7 << 12); /* max number of gigabit attempts */ - temp |= (1 << 11); /* enable downshift */ - temp |= MII_M1011_PHY_SCR_AUTO_CROSS; - err = phy_write(phydev, MII_M1011_PHY_SCR, temp); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; - err = marvell_set_page(phydev, MII_MARVELL_MSCR_PAGE); - if (err < 0) - return err; - temp = phy_read(phydev, MII_M1116R_CONTROL_REG_MAC); - temp |= (1 << 5); - temp |= (1 << 4); - err = phy_write(phydev, MII_M1116R_CONTROL_REG_MAC, temp); + err = marvell_set_downshift(phydev, true, 8); if (err < 0) return err; - err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); + + err = m88e1121_config_aneg_rgmii_delays(phydev); if (err < 0) return err; - temp = phy_read(phydev, MII_BMCR); - temp |= BMCR_RESET; - err = phy_write(phydev, MII_BMCR, temp); + err = genphy_soft_reset(phydev); if (err < 0) return err; - mdelay(500); - return marvell_config_init(phydev); } @@ -719,9 +711,29 @@ static int m88e3016_config_init(struct phy_device *phydev) return marvell_config_init(phydev); } -static int m88e1111_config_init_rgmii(struct phy_device *phydev) +static int m88e1111_config_init_hwcfg_mode(struct phy_device *phydev, + u16 mode, + int fibre_copper_auto) +{ + int temp; + + temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); + if (temp < 0) + return temp; + + temp &= ~(MII_M1111_HWCFG_MODE_MASK | + MII_M1111_HWCFG_FIBER_COPPER_AUTO | + MII_M1111_HWCFG_FIBER_COPPER_RES); + temp |= mode; + + if (fibre_copper_auto) + temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO; + + return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); +} + +static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev) { - int err; int temp; temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); @@ -729,16 +741,24 @@ static int m88e1111_config_init_rgmii(struct phy_device *phydev) return temp; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { - temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY); + temp |= (MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY); } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { - temp &= ~MII_M1111_TX_DELAY; - temp |= MII_M1111_RX_DELAY; + temp &= ~MII_M1111_RGMII_TX_DELAY; + temp |= MII_M1111_RGMII_RX_DELAY; } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { - temp &= ~MII_M1111_RX_DELAY; - temp |= MII_M1111_TX_DELAY; + temp &= ~MII_M1111_RGMII_RX_DELAY; + temp |= MII_M1111_RGMII_TX_DELAY; } - err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); + return phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); +} + +static int m88e1111_config_init_rgmii(struct phy_device *phydev) +{ + int temp; + int err; + + err = m88e1111_config_init_rgmii_delays(phydev); if (err < 0) return err; @@ -759,17 +779,11 @@ static int m88e1111_config_init_rgmii(struct phy_device *phydev) static int m88e1111_config_init_sgmii(struct phy_device *phydev) { int err; - int temp; - - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); - if (temp < 0) - return temp; - - temp &= ~(MII_M1111_HWCFG_MODE_MASK); - temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK; - temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO; - err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + err = m88e1111_config_init_hwcfg_mode( + phydev, + MII_M1111_HWCFG_MODE_SGMII_NO_CLK, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); if (err < 0) return err; @@ -780,48 +794,27 @@ static int m88e1111_config_init_sgmii(struct phy_device *phydev) static int m88e1111_config_init_rtbi(struct phy_device *phydev) { int err; - int temp; - temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); - if (temp < 0) - return temp; - - temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY); - err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); - if (err < 0) + err = m88e1111_config_init_rgmii_delays(phydev); + if (err) return err; - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); - if (temp < 0) - return temp; - - temp &= ~(MII_M1111_HWCFG_MODE_MASK | - MII_M1111_HWCFG_FIBER_COPPER_RES); - temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO; - - err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + err = m88e1111_config_init_hwcfg_mode( + phydev, + MII_M1111_HWCFG_MODE_RTBI, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); if (err < 0) return err; /* soft reset */ - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; - do - temp = phy_read(phydev, MII_BMCR); - while (temp & BMCR_RESET); - - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); - if (temp < 0) - return temp; - - temp &= ~(MII_M1111_HWCFG_MODE_MASK | - MII_M1111_HWCFG_FIBER_COPPER_RES); - temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | - MII_M1111_HWCFG_FIBER_COPPER_AUTO; - - return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + return m88e1111_config_init_hwcfg_mode( + phydev, + MII_M1111_HWCFG_MODE_RTBI, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); } static int m88e1111_config_init(struct phy_device *phydev) @@ -850,7 +843,7 @@ static int m88e1111_config_init(struct phy_device *phydev) if (err < 0) return err; - return phy_write(phydev, MII_BMCR, BMCR_RESET); + return genphy_soft_reset(phydev); } static int m88e1121_config_init(struct phy_device *phydev) @@ -912,12 +905,11 @@ static int m88e1118_config_aneg(struct phy_device *phydev) { int err; - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; - err = phy_write(phydev, MII_M1011_PHY_SCR, - MII_M1011_PHY_SCR_AUTO_CROSS); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; @@ -961,7 +953,7 @@ static int m88e1118_config_init(struct phy_device *phydev) if (err < 0) return err; - return phy_write(phydev, MII_BMCR, BMCR_RESET); + return genphy_soft_reset(phydev); } static int m88e1149_config_init(struct phy_device *phydev) @@ -987,20 +979,15 @@ static int m88e1149_config_init(struct phy_device *phydev) if (err < 0) return err; - return phy_write(phydev, MII_BMCR, BMCR_RESET); + return genphy_soft_reset(phydev); } static int m88e1145_config_init_rgmii(struct phy_device *phydev) { + int temp; int err; - int temp = phy_read(phydev, MII_M1145_PHY_EXT_CR); - - if (temp < 0) - return temp; - temp |= (MII_M1145_RGMII_RX_DELAY | MII_M1145_RGMII_TX_DELAY); - - err = phy_write(phydev, MII_M1145_PHY_EXT_CR, temp); + err = m88e1111_config_init_rgmii_delays(phydev); if (err < 0) return err; @@ -1032,16 +1019,9 @@ static int m88e1145_config_init_rgmii(struct phy_device *phydev) static int m88e1145_config_init_sgmii(struct phy_device *phydev) { - int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR); - - if (temp < 0) - return temp; - - temp &= ~MII_M1145_HWCFG_MODE_MASK; - temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK; - temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO; - - return phy_write(phydev, MII_M1145_PHY_EXT_SR, temp); + return m88e1111_config_init_hwcfg_mode( + phydev, MII_M1111_HWCFG_MODE_SGMII_NO_CLK, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); } static int m88e1145_config_init(struct phy_device *phydev) @@ -1515,7 +1495,7 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data) } #ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) +#define UINT64_MAX (u64)(~((u64)0)) #endif static u64 marvell_get_stat(struct phy_device *phydev, int i) { diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 34395230ce70..73c5267a11fd 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -21,6 +21,8 @@ #include <linux/of_platform.h> #include <linux/of_mdio.h> +#include <linux/platform_data/mdio-bcm-unimac.h> + #define MDIO_CMD 0x00 #define MDIO_START_BUSY (1 << 29) #define MDIO_READ_FAIL (1 << 28) @@ -41,6 +43,8 @@ struct unimac_mdio_priv { struct mii_bus *mii_bus; void __iomem *base; + int (*wait_func) (void *wait_func_data); + void *wait_func_data; }; static inline void unimac_mdio_start(struct unimac_mdio_priv *priv) @@ -57,10 +61,28 @@ static inline unsigned int unimac_mdio_busy(struct unimac_mdio_priv *priv) return __raw_readl(priv->base + MDIO_CMD) & MDIO_START_BUSY; } +static int unimac_mdio_poll(void *wait_func_data) +{ + struct unimac_mdio_priv *priv = wait_func_data; + unsigned int timeout = 1000; + + do { + if (!unimac_mdio_busy(priv)) + return 0; + + usleep_range(1000, 2000); + } while (--timeout); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct unimac_mdio_priv *priv = bus->priv; - unsigned int timeout = 1000; + int ret; u32 cmd; /* Prepare the read operation */ @@ -70,15 +92,9 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) /* Start MDIO transaction */ unimac_mdio_start(priv); - do { - if (!unimac_mdio_busy(priv)) - break; - - usleep_range(1000, 2000); - } while (timeout--); - - if (!timeout) - return -ETIMEDOUT; + ret = priv->wait_func(priv->wait_func_data); + if (ret) + return ret; cmd = __raw_readl(priv->base + MDIO_CMD); @@ -97,7 +113,6 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) { struct unimac_mdio_priv *priv = bus->priv; - unsigned int timeout = 1000; u32 cmd; /* Prepare the write operation */ @@ -107,17 +122,7 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id, unimac_mdio_start(priv); - do { - if (!unimac_mdio_busy(priv)) - break; - - usleep_range(1000, 2000); - } while (timeout--); - - if (!timeout) - return -ETIMEDOUT; - - return 0; + return priv->wait_func(priv->wait_func_data); } /* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with @@ -155,8 +160,10 @@ static int unimac_mdio_reset(struct mii_bus *bus) } for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - if (read_mask & 1 << addr) + if (read_mask & 1 << addr) { + dev_dbg(&bus->dev, "Workaround for PHY @ %d\n", addr); mdiobus_read(bus, addr, MII_BMSR); + } } return 0; @@ -164,6 +171,7 @@ static int unimac_mdio_reset(struct mii_bus *bus) static int unimac_mdio_probe(struct platform_device *pdev) { + struct unimac_mdio_pdata *pdata = pdev->dev.platform_data; struct unimac_mdio_priv *priv; struct device_node *np; struct mii_bus *bus; @@ -193,12 +201,21 @@ static int unimac_mdio_probe(struct platform_device *pdev) bus = priv->mii_bus; bus->priv = priv; - bus->name = "unimac MII bus"; + if (pdata) { + bus->name = pdata->bus_name; + priv->wait_func = pdata->wait_func; + priv->wait_func_data = pdata->wait_func_data; + bus->phy_mask = ~pdata->phy_mask; + } else { + bus->name = "unimac MII bus"; + priv->wait_func_data = priv; + priv->wait_func = unimac_mdio_poll; + } bus->parent = &pdev->dev; bus->read = unimac_mdio_read; bus->write = unimac_mdio_write; bus->reset = unimac_mdio_reset; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); ret = of_mdiobus_register(bus, np); if (ret) { @@ -240,7 +257,7 @@ MODULE_DEVICE_TABLE(of, unimac_mdio_ids); static struct platform_driver unimac_mdio_driver = { .driver = { - .name = "unimac-mdio", + .name = UNIMAC_MDIO_DRV_NAME, .of_match_table = unimac_mdio_ids, }, .probe = unimac_mdio_probe, @@ -251,4 +268,4 @@ module_platform_driver(unimac_mdio_driver); MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom UniMAC MDIO bus controller"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:unimac-mdio"); +MODULE_ALIAS("platform:" UNIMAC_MDIO_DRV_NAME); diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/phy/mdio-i2c.c new file mode 100644 index 000000000000..6d24fd13ca86 --- /dev/null +++ b/drivers/net/phy/mdio-i2c.c @@ -0,0 +1,109 @@ +/* + * MDIO I2C bridge + * + * Copyright (C) 2015-2016 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Network PHYs can appear on I2C buses when they are part of SFP module. + * This driver exposes these PHYs to the networking PHY code, allowing + * our PHY drivers access to these PHYs, and so allowing configuration + * of their settings. + */ +#include <linux/i2c.h> +#include <linux/phy.h> + +#include "mdio-i2c.h" + +/* + * I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is + * specified to be present in SFP modules. These correspond with PHY + * addresses 16 and 17. Disallow access to these "phy" addresses. + */ +static bool i2c_mii_valid_phy_id(int phy_id) +{ + return phy_id != 0x10 && phy_id != 0x11; +} + +static unsigned int i2c_mii_phy_addr(int phy_id) +{ + return phy_id + 0x40; +} + +static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg) +{ + struct i2c_adapter *i2c = bus->priv; + struct i2c_msg msgs[2]; + u8 data[2], dev_addr = reg; + int bus_addr, ret; + + if (!i2c_mii_valid_phy_id(phy_id)) + return 0xffff; + + bus_addr = i2c_mii_phy_addr(phy_id); + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = 1; + msgs[0].buf = &dev_addr; + msgs[1].addr = bus_addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = sizeof(data); + msgs[1].buf = data; + + ret = i2c_transfer(i2c, msgs, ARRAY_SIZE(msgs)); + if (ret != ARRAY_SIZE(msgs)) + return 0xffff; + + return data[0] << 8 | data[1]; +} + +static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val) +{ + struct i2c_adapter *i2c = bus->priv; + struct i2c_msg msg; + int ret; + u8 data[3]; + + if (!i2c_mii_valid_phy_id(phy_id)) + return 0; + + data[0] = reg; + data[1] = val >> 8; + data[2] = val; + + msg.addr = i2c_mii_phy_addr(phy_id); + msg.flags = 0; + msg.len = 3; + msg.buf = data; + + ret = i2c_transfer(i2c, &msg, 1); + + return ret < 0 ? ret : 0; +} + +struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c) +{ + struct mii_bus *mii; + + if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) + return ERR_PTR(-EINVAL); + + mii = mdiobus_alloc(); + if (!mii) + return ERR_PTR(-ENOMEM); + + snprintf(mii->id, MII_BUS_ID_SIZE, "i2c:%s", dev_name(parent)); + mii->parent = parent; + mii->read = i2c_mii_read; + mii->write = i2c_mii_write; + mii->priv = i2c; + + return mii; +} +EXPORT_SYMBOL_GPL(mdio_i2c_alloc); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("MDIO I2C bridge library"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h new file mode 100644 index 000000000000..889ab57d7f3e --- /dev/null +++ b/drivers/net/phy/mdio-i2c.h @@ -0,0 +1,19 @@ +/* + * MDIO I2C bridge + * + * Copyright (C) 2015 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef MDIO_I2C_H +#define MDIO_I2C_H + +struct device; +struct i2c_adapter; +struct mii_bus; + +struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c); + +#endif diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c index 6a33646bdf05..c3825c7da038 100644 --- a/drivers/net/phy/mdio-mux-mmioreg.c +++ b/drivers/net/phy/mdio-mux-mmioreg.c @@ -105,7 +105,7 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) const __be32 *iprop; int len, ret; - dev_dbg(&pdev->dev, "probing node %s\n", np->full_name); + dev_dbg(&pdev->dev, "probing node %pOF\n", np); s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); if (!s) @@ -113,8 +113,8 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) ret = of_address_to_resource(np, 0, &res); if (ret) { - dev_err(&pdev->dev, "could not obtain memory map for node %s\n", - np->full_name); + dev_err(&pdev->dev, "could not obtain memory map for node %pOF\n", + np); return ret; } s->phys = res.start; @@ -145,15 +145,15 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) for_each_available_child_of_node(np, np2) { iprop = of_get_property(np2, "reg", &len); if (!iprop || len != sizeof(uint32_t)) { - dev_err(&pdev->dev, "mdio-mux child node %s is " - "missing a 'reg' property\n", np2->full_name); + dev_err(&pdev->dev, "mdio-mux child node %pOF is " + "missing a 'reg' property\n", np2); of_node_put(np2); return -ENODEV; } if (be32_to_cpup(iprop) & ~s->mask) { - dev_err(&pdev->dev, "mdio-mux child node %s has " + dev_err(&pdev->dev, "mdio-mux child node %pOF has " "a 'reg' value with unmasked bits\n", - np2->full_name); + np2); of_node_put(np2); return -ENODEV; } @@ -162,8 +162,8 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn, &s->mux_handle, s, NULL); if (ret) { - dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n", - np->full_name); + dev_err(&pdev->dev, "failed to register mdio-mux bus %pOF\n", + np); return ret; } diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index c608e1dfaf09..942ceaf3fd3f 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -138,16 +138,16 @@ int mdio_mux_init(struct device *dev, r = of_property_read_u32(child_bus_node, "reg", &v); if (r) { dev_err(dev, - "Error: Failed to find reg for child %s\n", - of_node_full_name(child_bus_node)); + "Error: Failed to find reg for child %pOF\n", + child_bus_node); continue; } cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL); if (cb == NULL) { dev_err(dev, - "Error: Failed to allocate memory for child %s\n", - of_node_full_name(child_bus_node)); + "Error: Failed to allocate memory for child %pOF\n", + child_bus_node); ret_val = -ENOMEM; continue; } @@ -157,8 +157,8 @@ int mdio_mux_init(struct device *dev, cb->mii_bus = mdiobus_alloc(); if (!cb->mii_bus) { dev_err(dev, - "Error: Failed to allocate MDIO bus for child %s\n", - of_node_full_name(child_bus_node)); + "Error: Failed to allocate MDIO bus for child %pOF\n", + child_bus_node); ret_val = -ENOMEM; devm_kfree(dev, cb); continue; @@ -174,8 +174,8 @@ int mdio_mux_init(struct device *dev, r = of_mdiobus_register(cb->mii_bus, child_bus_node); if (r) { dev_err(dev, - "Error: Failed to register MDIO bus for child %s\n", - of_node_full_name(child_bus_node)); + "Error: Failed to register MDIO bus for child %pOF\n", + child_bus_node); mdiobus_free(cb->mii_bus); devm_kfree(dev, cb); } else { diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 2df7b62c1a36..b6f9fa670168 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -399,8 +399,7 @@ error: } /* Put PHYs in RESET to save power */ - if (bus->reset_gpiod) - gpiod_set_value_cansleep(bus->reset_gpiod, 1); + gpiod_set_value_cansleep(bus->reset_gpiod, 1); device_del(&bus->dev); return err; @@ -425,8 +424,7 @@ void mdiobus_unregister(struct mii_bus *bus) } /* Put PHYs in RESET to save power */ - if (bus->reset_gpiod) - gpiod_set_value_cansleep(bus->reset_gpiod, 1); + gpiod_set_value_cansleep(bus->reset_gpiod, 1); device_del(&bus->dev); } diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 6739b738bbaf..21f75ae244b3 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -9,6 +9,186 @@ #include <linux/export.h> #include <linux/phy.h> +const char *phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; + case SPEED_100000: + return "100Gbps"; + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +EXPORT_SYMBOL_GPL(phy_speed_to_str); + +const char *phy_duplex_to_str(unsigned int duplex) +{ + if (duplex == DUPLEX_HALF) + return "Half"; + if (duplex == DUPLEX_FULL) + return "Full"; + if (duplex == DUPLEX_UNKNOWN) + return "Unknown"; + return "Unsupported (update phy-core.c)"; +} +EXPORT_SYMBOL_GPL(phy_duplex_to_str); + +/* A mapping of all SUPPORTED settings to speed/duplex. This table + * must be grouped by speed and sorted in descending match priority + * - iow, descending speed. */ +static const struct phy_setting settings[] = { + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + }, + { + .speed = SPEED_2500, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, + }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_HALF, + .bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + }, + { + .speed = SPEED_100, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT, + }, + { + .speed = SPEED_100, + .duplex = DUPLEX_HALF, + .bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT, + }, + { + .speed = SPEED_10, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10baseT_Full_BIT, + }, + { + .speed = SPEED_10, + .duplex = DUPLEX_HALF, + .bit = ETHTOOL_LINK_MODE_10baseT_Half_BIT, + }, +}; + +/** + * phy_lookup_setting - lookup a PHY setting + * @speed: speed to match + * @duplex: duplex to match + * @mask: allowed link modes + * @maxbit: bit size of link modes + * @exact: an exact match is required + * + * Search the settings array for a setting that matches the speed and + * duplex, and which is supported. + * + * If @exact is unset, either an exact match or %NULL for no match will + * be returned. + * + * If @exact is set, an exact match, the fastest supported setting at + * or below the specified speed, the slowest supported setting, or if + * they all fail, %NULL will be returned. + */ +const struct phy_setting * +phy_lookup_setting(int speed, int duplex, const unsigned long *mask, + size_t maxbit, bool exact) +{ + const struct phy_setting *p, *match = NULL, *last = NULL; + int i; + + for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { + if (p->bit < maxbit && test_bit(p->bit, mask)) { + last = p; + if (p->speed == speed && p->duplex == duplex) { + /* Exact match for speed and duplex */ + match = p; + break; + } else if (!exact) { + if (!match && p->speed <= speed) + /* Candidate */ + match = p; + + if (p->speed < speed) + break; + } + } + } + + if (!match && !exact) + match = last; + + return match; +} +EXPORT_SYMBOL_GPL(phy_lookup_setting); + +size_t phy_speeds(unsigned int *speeds, size_t size, + unsigned long *mask, size_t maxbit) +{ + size_t count; + int i; + + for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++) + if (settings[i].bit < maxbit && + test_bit(settings[i].bit, mask) && + (count == 0 || speeds[count - 1] != settings[i].speed)) + speeds[count++] = settings[i].speed; + + return count; +} + static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, u16 regnum) { diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 5068c582d502..dae13f028c84 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -30,7 +30,6 @@ #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/phy_led_triggers.h> -#include <linux/timer.h> #include <linux/workqueue.h> #include <linux/mdio.h> #include <linux/io.h> @@ -39,42 +38,6 @@ #include <asm/irq.h> -static const char *phy_speed_to_str(int speed) -{ - switch (speed) { - case SPEED_10: - return "10Mbps"; - case SPEED_100: - return "100Mbps"; - case SPEED_1000: - return "1Gbps"; - case SPEED_2500: - return "2.5Gbps"; - case SPEED_5000: - return "5Gbps"; - case SPEED_10000: - return "10Gbps"; - case SPEED_14000: - return "14Gbps"; - case SPEED_20000: - return "20Gbps"; - case SPEED_25000: - return "25Gbps"; - case SPEED_40000: - return "40Gbps"; - case SPEED_50000: - return "50Gbps"; - case SPEED_56000: - return "56Gbps"; - case SPEED_100000: - return "100Gbps"; - case SPEED_UNKNOWN: - return "Unknown"; - default: - return "Unsupported (update phy.c)"; - } -} - #define PHY_STATE_STR(_state) \ case PHY_##_state: \ return __stringify(_state); \ @@ -110,7 +73,7 @@ void phy_print_status(struct phy_device *phydev) netdev_info(phydev->attached_dev, "Link is Up - %s/%s - flow control %s\n", phy_speed_to_str(phydev->speed), - DUPLEX_FULL == phydev->duplex ? "Full" : "Half", + phy_duplex_to_str(phydev->duplex), phydev->pause ? "rx/tx" : "off"); } else { netdev_info(phydev->attached_dev, "Link is Down\n"); @@ -194,123 +157,6 @@ int phy_aneg_done(struct phy_device *phydev) } EXPORT_SYMBOL(phy_aneg_done); -/* A structure for mapping a particular speed and duplex - * combination to a particular SUPPORTED and ADVERTISED value - */ -struct phy_setting { - int speed; - int duplex; - u32 setting; -}; - -/* A mapping of all SUPPORTED settings to speed/duplex. This table - * must be grouped by speed and sorted in descending match priority - * - iow, descending speed. */ -static const struct phy_setting settings[] = { - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10000baseKR_Full, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10000baseKX4_Full, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10000baseT_Full, - }, - { - .speed = SPEED_2500, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_2500baseX_Full, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_1000baseKX_Full, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_1000baseT_Full, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_HALF, - .setting = SUPPORTED_1000baseT_Half, - }, - { - .speed = SPEED_100, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_100baseT_Full, - }, - { - .speed = SPEED_100, - .duplex = DUPLEX_HALF, - .setting = SUPPORTED_100baseT_Half, - }, - { - .speed = SPEED_10, - .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10baseT_Full, - }, - { - .speed = SPEED_10, - .duplex = DUPLEX_HALF, - .setting = SUPPORTED_10baseT_Half, - }, -}; - -/** - * phy_lookup_setting - lookup a PHY setting - * @speed: speed to match - * @duplex: duplex to match - * @features: allowed link modes - * @exact: an exact match is required - * - * Search the settings array for a setting that matches the speed and - * duplex, and which is supported. - * - * If @exact is unset, either an exact match or %NULL for no match will - * be returned. - * - * If @exact is set, an exact match, the fastest supported setting at - * or below the specified speed, the slowest supported setting, or if - * they all fail, %NULL will be returned. - */ -static const struct phy_setting * -phy_lookup_setting(int speed, int duplex, u32 features, bool exact) -{ - const struct phy_setting *p, *match = NULL, *last = NULL; - int i; - - for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { - if (p->setting & features) { - last = p; - if (p->speed == speed && p->duplex == duplex) { - /* Exact match for speed and duplex */ - match = p; - break; - } else if (!exact) { - if (!match && p->speed <= speed) - /* Candidate */ - match = p; - - if (p->speed < speed) - break; - } - } - } - - if (!match && !exact) - match = last; - - return match; -} - /** * phy_find_valid - find a PHY setting that matches the requested parameters * @speed: desired speed @@ -327,7 +173,9 @@ phy_lookup_setting(int speed, int duplex, u32 features, bool exact) static const struct phy_setting * phy_find_valid(int speed, int duplex, u32 supported) { - return phy_lookup_setting(speed, duplex, supported, false); + unsigned long mask = supported; + + return phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, false); } /** @@ -344,16 +192,9 @@ unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int *speeds, unsigned int size) { - unsigned int count = 0; - unsigned int idx = 0; + unsigned long supported = phy->supported; - for (idx = 0; idx < ARRAY_SIZE(settings) && count < size; idx++) - /* Assumes settings are grouped by speed */ - if ((settings[idx].setting & phy->supported) && - (count == 0 || speeds[count - 1] != settings[idx].speed)) - speeds[count++] = settings[idx].speed; - - return count; + return phy_speeds(speeds, size, &supported, BITS_PER_LONG); } /** @@ -367,7 +208,9 @@ unsigned int phy_supported_speeds(struct phy_device *phy, */ static inline bool phy_check_valid(int speed, int duplex, u32 features) { - return !!phy_lookup_setting(speed, duplex, features, true); + unsigned long mask = features; + + return !!phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, true); } /** @@ -705,14 +548,15 @@ EXPORT_SYMBOL(phy_start_aneg); * * Description: The PHY infrastructure can run a state machine * which tracks whether the PHY is starting up, negotiating, - * etc. This function starts the timer which tracks the state - * of the PHY. If you want to maintain your own state machine, + * etc. This function starts the delayed workqueue which tracks + * the state of the PHY. If you want to maintain your own state machine, * do not call this function. */ void phy_start_machine(struct phy_device *phydev) { queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ); } +EXPORT_SYMBOL_GPL(phy_start_machine); /** * phy_trigger_machine - trigger the state machine to run @@ -737,9 +581,9 @@ void phy_trigger_machine(struct phy_device *phydev, bool sync) * phy_stop_machine - stop the PHY state machine tracking * @phydev: target phy_device struct * - * Description: Stops the state machine timer, sets the state to UP - * (unless it wasn't up yet). This function must be called BEFORE - * phy_detach. + * Description: Stops the state machine delayed workqueue, sets the + * state to UP (unless it wasn't up yet). This function must be + * called BEFORE phy_detach. */ void phy_stop_machine(struct phy_device *phydev) { @@ -1022,9 +866,15 @@ void phy_start(struct phy_device *phydev) } EXPORT_SYMBOL(phy_start); -static void phy_adjust_link(struct phy_device *phydev) +static void phy_link_up(struct phy_device *phydev) { - phydev->adjust_link(phydev->attached_dev); + phydev->phy_link_change(phydev, true, true); + phy_led_trigger_change_speed(phydev); +} + +static void phy_link_down(struct phy_device *phydev, bool do_carrier) +{ + phydev->phy_link_change(phydev, false, do_carrier); phy_led_trigger_change_speed(phydev); } @@ -1069,8 +919,7 @@ void phy_state_machine(struct work_struct *work) /* If the link is down, give up on negotiation for now */ if (!phydev->link) { phydev->state = PHY_NOLINK; - netif_carrier_off(phydev->attached_dev); - phy_adjust_link(phydev); + phy_link_down(phydev, true); break; } @@ -1082,9 +931,7 @@ void phy_state_machine(struct work_struct *work) /* If AN is done, we're running */ if (err > 0) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); - phy_adjust_link(phydev); - + phy_link_up(phydev); } else if (0 == phydev->link_timeout--) needs_aneg = true; break; @@ -1109,8 +956,7 @@ void phy_state_machine(struct work_struct *work) } } phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); - phy_adjust_link(phydev); + phy_link_up(phydev); } break; case PHY_FORCING: @@ -1120,13 +966,12 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { if (0 == phydev->link_timeout--) needs_aneg = true; + phy_link_down(phydev, false); } - - phy_adjust_link(phydev); break; case PHY_RUNNING: /* Only register a CHANGE if we are polling and link changed @@ -1158,14 +1003,12 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { phydev->state = PHY_NOLINK; - netif_carrier_off(phydev->attached_dev); + phy_link_down(phydev, true); } - phy_adjust_link(phydev); - if (phy_interrupt_is_valid(phydev)) err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); @@ -1173,8 +1016,7 @@ void phy_state_machine(struct work_struct *work) case PHY_HALTED: if (phydev->link) { phydev->link = 0; - netif_carrier_off(phydev->attached_dev); - phy_adjust_link(phydev); + phy_link_down(phydev, true); do_suspend = true; } break; @@ -1194,11 +1036,11 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { phydev->state = PHY_NOLINK; + phy_link_down(phydev, false); } - phy_adjust_link(phydev); } else { phydev->state = PHY_AN; phydev->link_timeout = PHY_AN_TIMEOUT; @@ -1210,11 +1052,11 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { phydev->state = PHY_NOLINK; + phy_link_down(phydev, false); } - phy_adjust_link(phydev); } break; } @@ -1229,9 +1071,10 @@ void phy_state_machine(struct work_struct *work) if (err < 0) phy_error(phydev); - phydev_dbg(phydev, "PHY state change %s -> %s\n", - phy_state_to_str(old_state), - phy_state_to_str(phydev->state)); + if (old_state != phydev->state) + phydev_dbg(phydev, "PHY state change %s -> %s\n", + phy_state_to_str(old_state), + phy_state_to_str(phydev->state)); /* Only re-schedule a PHY state machine change if we are polling the * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1790f7fec125..9493fb369682 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -688,6 +688,19 @@ struct phy_device *phy_find_first(struct mii_bus *bus) } EXPORT_SYMBOL(phy_find_first); +static void phy_link_change(struct phy_device *phydev, bool up, bool do_carrier) +{ + struct net_device *netdev = phydev->attached_dev; + + if (do_carrier) { + if (up) + netif_carrier_on(netdev); + else + netif_carrier_off(netdev); + } + phydev->adjust_link(netdev); +} + /** * phy_prepare_link - prepares the PHY layer to monitor link status * @phydev: target phy_device struct @@ -951,6 +964,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, goto error; } + phydev->phy_link_change = phy_link_change; phydev->attached_dev = dev; dev->phydev = phydev; @@ -1070,6 +1084,7 @@ void phy_detach(struct phy_device *phydev) phydev->attached_dev->phydev = NULL; phydev->attached_dev = NULL; phy_suspend(phydev); + phydev->phylink = NULL; phy_led_triggers_unregister(phydev); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c new file mode 100644 index 000000000000..32917bdd1432 --- /dev/null +++ b/drivers/net/phy/phylink.c @@ -0,0 +1,1462 @@ +/* + * phylink models the MAC to optional PHY connection, supporting + * technologies such as SFP cages where the PHY is hot-pluggable. + * + * Copyright (C) 2015 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/ethtool.h> +#include <linux/export.h> +#include <linux/gpio/consumer.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> +#include <linux/phylink.h> +#include <linux/rtnetlink.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#include "sfp.h" +#include "swphy.h" + +#define SUPPORTED_INTERFACES \ + (SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE | \ + SUPPORTED_BNC | SUPPORTED_AUI | SUPPORTED_Backplane) +#define ADVERTISED_INTERFACES \ + (ADVERTISED_TP | ADVERTISED_MII | ADVERTISED_FIBRE | \ + ADVERTISED_BNC | ADVERTISED_AUI | ADVERTISED_Backplane) + +enum { + PHYLINK_DISABLE_STOPPED, + PHYLINK_DISABLE_LINK, +}; + +struct phylink { + struct net_device *netdev; + const struct phylink_mac_ops *ops; + + unsigned long phylink_disable_state; /* bitmask of disables */ + struct phy_device *phydev; + phy_interface_t link_interface; /* PHY_INTERFACE_xxx */ + u8 link_an_mode; /* MLO_AN_xxx */ + u8 link_port; /* The current non-phy ethtool port */ + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + + /* The link configuration settings */ + struct phylink_link_state link_config; + struct gpio_desc *link_gpio; + + struct mutex state_mutex; + struct phylink_link_state phy_state; + struct work_struct resolve; + + bool mac_link_dropped; + + struct sfp_bus *sfp_bus; +}; + +static inline void linkmode_zero(unsigned long *dst) +{ + bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_copy(unsigned long *dst, const unsigned long *src) +{ + bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_and(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_or(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline bool linkmode_empty(const unsigned long *src) +{ + return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +void phylink_set_port_modes(unsigned long *mask) +{ + phylink_set(mask, TP); + phylink_set(mask, AUI); + phylink_set(mask, MII); + phylink_set(mask, FIBRE); + phylink_set(mask, BNC); + phylink_set(mask, Backplane); +} +EXPORT_SYMBOL_GPL(phylink_set_port_modes); + +static int phylink_is_empty_linkmode(const unsigned long *linkmode) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, }; + + phylink_set_port_modes(tmp); + phylink_set(tmp, Autoneg); + phylink_set(tmp, Pause); + phylink_set(tmp, Asym_Pause); + + bitmap_andnot(tmp, linkmode, tmp, __ETHTOOL_LINK_MODE_MASK_NBITS); + + return linkmode_empty(tmp); +} + +static const char *phylink_an_mode_str(unsigned int mode) +{ + static const char *modestr[] = { + [MLO_AN_PHY] = "phy", + [MLO_AN_FIXED] = "fixed", + [MLO_AN_SGMII] = "SGMII", + [MLO_AN_8023Z] = "802.3z", + }; + + return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown"; +} + +static int phylink_validate(struct phylink *pl, unsigned long *supported, + struct phylink_link_state *state) +{ + pl->ops->validate(pl->netdev, supported, state); + + return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; +} + +static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np) +{ + struct device_node *fixed_node; + const struct phy_setting *s; + struct gpio_desc *desc; + const __be32 *fixed_prop; + u32 speed; + int ret, len; + + fixed_node = of_get_child_by_name(np, "fixed-link"); + if (fixed_node) { + ret = of_property_read_u32(fixed_node, "speed", &speed); + + pl->link_config.speed = speed; + pl->link_config.duplex = DUPLEX_HALF; + + if (of_property_read_bool(fixed_node, "full-duplex")) + pl->link_config.duplex = DUPLEX_FULL; + + /* We treat the "pause" and "asym-pause" terminology as + * defining the link partner's ability. */ + if (of_property_read_bool(fixed_node, "pause")) + pl->link_config.pause |= MLO_PAUSE_SYM; + if (of_property_read_bool(fixed_node, "asym-pause")) + pl->link_config.pause |= MLO_PAUSE_ASYM; + + if (ret == 0) { + desc = fwnode_get_named_gpiod(&fixed_node->fwnode, + "link-gpios", 0, + GPIOD_IN, "?"); + + if (!IS_ERR(desc)) + pl->link_gpio = desc; + else if (desc == ERR_PTR(-EPROBE_DEFER)) + ret = -EPROBE_DEFER; + } + of_node_put(fixed_node); + + if (ret) + return ret; + } else { + fixed_prop = of_get_property(np, "fixed-link", &len); + if (!fixed_prop) { + netdev_err(pl->netdev, "broken fixed-link?\n"); + return -EINVAL; + } + if (len == 5 * sizeof(*fixed_prop)) { + pl->link_config.duplex = be32_to_cpu(fixed_prop[1]) ? + DUPLEX_FULL : DUPLEX_HALF; + pl->link_config.speed = be32_to_cpu(fixed_prop[2]); + if (be32_to_cpu(fixed_prop[3])) + pl->link_config.pause |= MLO_PAUSE_SYM; + if (be32_to_cpu(fixed_prop[4])) + pl->link_config.pause |= MLO_PAUSE_ASYM; + } + } + + if (pl->link_config.speed > SPEED_1000 && + pl->link_config.duplex != DUPLEX_FULL) + netdev_warn(pl->netdev, "fixed link specifies half duplex for %dMbps link?\n", + pl->link_config.speed); + + bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_copy(pl->link_config.advertising, pl->supported); + phylink_validate(pl, pl->supported, &pl->link_config); + + s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex, + pl->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, true); + linkmode_zero(pl->supported); + phylink_set(pl->supported, MII); + if (s) { + __set_bit(s->bit, pl->supported); + } else { + netdev_warn(pl->netdev, "fixed link %s duplex %dMbps not recognised\n", + pl->link_config.duplex == DUPLEX_FULL ? "full" : "half", + pl->link_config.speed); + } + + linkmode_and(pl->link_config.advertising, pl->link_config.advertising, + pl->supported); + + pl->link_config.link = 1; + pl->link_config.an_complete = 1; + + return 0; +} + +static int phylink_parse_mode(struct phylink *pl, struct device_node *np) +{ + struct device_node *dn; + const char *managed; + + dn = of_get_child_by_name(np, "fixed-link"); + if (dn || of_find_property(np, "fixed-link", NULL)) + pl->link_an_mode = MLO_AN_FIXED; + of_node_put(dn); + + if (of_property_read_string(np, "managed", &managed) == 0 && + strcmp(managed, "in-band-status") == 0) { + if (pl->link_an_mode == MLO_AN_FIXED) { + netdev_err(pl->netdev, + "can't use both fixed-link and in-band-status\n"); + return -EINVAL; + } + + linkmode_zero(pl->supported); + phylink_set(pl->supported, MII); + phylink_set(pl->supported, Autoneg); + phylink_set(pl->supported, Asym_Pause); + phylink_set(pl->supported, Pause); + pl->link_config.an_enabled = true; + + switch (pl->link_config.interface) { + case PHY_INTERFACE_MODE_SGMII: + phylink_set(pl->supported, 10baseT_Half); + phylink_set(pl->supported, 10baseT_Full); + phylink_set(pl->supported, 100baseT_Half); + phylink_set(pl->supported, 100baseT_Full); + phylink_set(pl->supported, 1000baseT_Half); + phylink_set(pl->supported, 1000baseT_Full); + pl->link_an_mode = MLO_AN_SGMII; + break; + + case PHY_INTERFACE_MODE_1000BASEX: + phylink_set(pl->supported, 1000baseX_Full); + pl->link_an_mode = MLO_AN_8023Z; + break; + + case PHY_INTERFACE_MODE_2500BASEX: + phylink_set(pl->supported, 2500baseX_Full); + pl->link_an_mode = MLO_AN_8023Z; + break; + + case PHY_INTERFACE_MODE_10GKR: + phylink_set(pl->supported, 10baseT_Half); + phylink_set(pl->supported, 10baseT_Full); + phylink_set(pl->supported, 100baseT_Half); + phylink_set(pl->supported, 100baseT_Full); + phylink_set(pl->supported, 1000baseT_Half); + phylink_set(pl->supported, 1000baseT_Full); + phylink_set(pl->supported, 1000baseX_Full); + phylink_set(pl->supported, 10000baseKR_Full); + phylink_set(pl->supported, 10000baseCR_Full); + phylink_set(pl->supported, 10000baseSR_Full); + phylink_set(pl->supported, 10000baseLR_Full); + phylink_set(pl->supported, 10000baseLRM_Full); + phylink_set(pl->supported, 10000baseER_Full); + pl->link_an_mode = MLO_AN_SGMII; + break; + + default: + netdev_err(pl->netdev, + "incorrect link mode %s for in-band status\n", + phy_modes(pl->link_config.interface)); + return -EINVAL; + } + + linkmode_copy(pl->link_config.advertising, pl->supported); + + if (phylink_validate(pl, pl->supported, &pl->link_config)) { + netdev_err(pl->netdev, + "failed to validate link configuration for in-band status\n"); + return -EINVAL; + } + } + + return 0; +} + +static void phylink_mac_config(struct phylink *pl, + const struct phylink_link_state *state) +{ + netdev_dbg(pl->netdev, + "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n", + __func__, phylink_an_mode_str(pl->link_an_mode), + phy_modes(state->interface), + phy_speed_to_str(state->speed), + phy_duplex_to_str(state->duplex), + __ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising, + state->pause, state->link, state->an_enabled); + + pl->ops->mac_config(pl->netdev, pl->link_an_mode, state); +} + +static void phylink_mac_an_restart(struct phylink *pl) +{ + if (pl->link_config.an_enabled && + (pl->link_config.interface == PHY_INTERFACE_MODE_1000BASEX || + pl->link_config.interface == PHY_INTERFACE_MODE_2500BASEX)) + pl->ops->mac_an_restart(pl->netdev); +} + +static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *state) +{ + struct net_device *ndev = pl->netdev; + + linkmode_copy(state->advertising, pl->link_config.advertising); + linkmode_zero(state->lp_advertising); + state->interface = pl->link_config.interface; + state->an_enabled = pl->link_config.an_enabled; + state->link = 1; + + return pl->ops->mac_link_state(ndev, state); +} + +/* The fixed state is... fixed except for the link state, + * which may be determined by a GPIO. + */ +static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state) +{ + *state = pl->link_config; + if (pl->link_gpio) + state->link = !!gpiod_get_value(pl->link_gpio); +} + +/* Flow control is resolved according to our and the link partners + * advertisments using the following drawn from the 802.3 specs: + * Local device Link partner + * Pause AsymDir Pause AsymDir Result + * 1 X 1 X TX+RX + * 0 1 1 1 RX + * 1 1 0 1 TX + */ +static void phylink_resolve_flow(struct phylink *pl, + struct phylink_link_state *state) +{ + int new_pause = 0; + + if (pl->link_config.pause & MLO_PAUSE_AN) { + int pause = 0; + + if (phylink_test(pl->link_config.advertising, Pause)) + pause |= MLO_PAUSE_SYM; + if (phylink_test(pl->link_config.advertising, Asym_Pause)) + pause |= MLO_PAUSE_ASYM; + + pause &= state->pause; + + if (pause & MLO_PAUSE_SYM) + new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX; + else if (pause & MLO_PAUSE_ASYM) + new_pause = state->pause & MLO_PAUSE_SYM ? + MLO_PAUSE_RX : MLO_PAUSE_TX; + } else { + new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; + } + + state->pause &= ~MLO_PAUSE_TXRX_MASK; + state->pause |= new_pause; +} + +static const char *phylink_pause_to_str(int pause) +{ + switch (pause & MLO_PAUSE_TXRX_MASK) { + case MLO_PAUSE_TX | MLO_PAUSE_RX: + return "rx/tx"; + case MLO_PAUSE_TX: + return "tx"; + case MLO_PAUSE_RX: + return "rx"; + default: + return "off"; + } +} + +static void phylink_resolve(struct work_struct *w) +{ + struct phylink *pl = container_of(w, struct phylink, resolve); + struct phylink_link_state link_state; + struct net_device *ndev = pl->netdev; + + mutex_lock(&pl->state_mutex); + if (pl->phylink_disable_state) { + pl->mac_link_dropped = false; + link_state.link = false; + } else if (pl->mac_link_dropped) { + link_state.link = false; + } else { + switch (pl->link_an_mode) { + case MLO_AN_PHY: + link_state = pl->phy_state; + phylink_resolve_flow(pl, &link_state); + phylink_mac_config(pl, &link_state); + break; + + case MLO_AN_FIXED: + phylink_get_fixed_state(pl, &link_state); + phylink_mac_config(pl, &link_state); + break; + + case MLO_AN_SGMII: + phylink_get_mac_state(pl, &link_state); + if (pl->phydev) { + bool changed = false; + + link_state.link = link_state.link && + pl->phy_state.link; + + if (pl->phy_state.interface != + link_state.interface) { + link_state.interface = pl->phy_state.interface; + changed = true; + } + + /* Propagate the flow control from the PHY + * to the MAC. Also propagate the interface + * if changed. + */ + if (pl->phy_state.link || changed) { + link_state.pause |= pl->phy_state.pause; + phylink_resolve_flow(pl, &link_state); + + phylink_mac_config(pl, &link_state); + } + } + break; + + case MLO_AN_8023Z: + phylink_get_mac_state(pl, &link_state); + break; + } + } + + if (link_state.link != netif_carrier_ok(ndev)) { + if (!link_state.link) { + netif_carrier_off(ndev); + pl->ops->mac_link_down(ndev, pl->link_an_mode); + netdev_info(ndev, "Link is Down\n"); + } else { + pl->ops->mac_link_up(ndev, pl->link_an_mode, + pl->phydev); + + netif_carrier_on(ndev); + + netdev_info(ndev, + "Link is Up - %s/%s - flow control %s\n", + phy_speed_to_str(link_state.speed), + phy_duplex_to_str(link_state.duplex), + phylink_pause_to_str(link_state.pause)); + } + } + if (!link_state.link && pl->mac_link_dropped) { + pl->mac_link_dropped = false; + queue_work(system_power_efficient_wq, &pl->resolve); + } + mutex_unlock(&pl->state_mutex); +} + +static void phylink_run_resolve(struct phylink *pl) +{ + if (!pl->phylink_disable_state) + queue_work(system_power_efficient_wq, &pl->resolve); +} + +static const struct sfp_upstream_ops sfp_phylink_ops; + +static int phylink_register_sfp(struct phylink *pl, struct device_node *np) +{ + struct device_node *sfp_np; + + sfp_np = of_parse_phandle(np, "sfp", 0); + if (!sfp_np) + return 0; + + pl->sfp_bus = sfp_register_upstream(sfp_np, pl->netdev, pl, + &sfp_phylink_ops); + if (!pl->sfp_bus) + return -ENOMEM; + + return 0; +} + +struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, + phy_interface_t iface, const struct phylink_mac_ops *ops) +{ + struct phylink *pl; + int ret; + + pl = kzalloc(sizeof(*pl), GFP_KERNEL); + if (!pl) + return ERR_PTR(-ENOMEM); + + mutex_init(&pl->state_mutex); + INIT_WORK(&pl->resolve, phylink_resolve); + pl->netdev = ndev; + pl->phy_state.interface = iface; + pl->link_interface = iface; + pl->link_port = PORT_MII; + pl->link_config.interface = iface; + pl->link_config.pause = MLO_PAUSE_AN; + pl->link_config.speed = SPEED_UNKNOWN; + pl->link_config.duplex = DUPLEX_UNKNOWN; + pl->ops = ops; + __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + + bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_copy(pl->link_config.advertising, pl->supported); + phylink_validate(pl, pl->supported, &pl->link_config); + + ret = phylink_parse_mode(pl, np); + if (ret < 0) { + kfree(pl); + return ERR_PTR(ret); + } + + if (pl->link_an_mode == MLO_AN_FIXED) { + ret = phylink_parse_fixedlink(pl, np); + if (ret < 0) { + kfree(pl); + return ERR_PTR(ret); + } + } + + ret = phylink_register_sfp(pl, np); + if (ret < 0) { + kfree(pl); + return ERR_PTR(ret); + } + + return pl; +} +EXPORT_SYMBOL_GPL(phylink_create); + +void phylink_destroy(struct phylink *pl) +{ + if (pl->sfp_bus) + sfp_unregister_upstream(pl->sfp_bus); + + cancel_work_sync(&pl->resolve); + kfree(pl); +} +EXPORT_SYMBOL_GPL(phylink_destroy); + +void phylink_phy_change(struct phy_device *phydev, bool up, bool do_carrier) +{ + struct phylink *pl = phydev->phylink; + + mutex_lock(&pl->state_mutex); + pl->phy_state.speed = phydev->speed; + pl->phy_state.duplex = phydev->duplex; + pl->phy_state.pause = MLO_PAUSE_NONE; + if (phydev->pause) + pl->phy_state.pause |= MLO_PAUSE_SYM; + if (phydev->asym_pause) + pl->phy_state.pause |= MLO_PAUSE_ASYM; + pl->phy_state.interface = phydev->interface; + pl->phy_state.link = up; + mutex_unlock(&pl->state_mutex); + + phylink_run_resolve(pl); + + netdev_dbg(pl->netdev, "phy link %s %s/%s/%s\n", up ? "up" : "down", + phy_modes(phydev->interface), + phy_speed_to_str(phydev->speed), + phy_duplex_to_str(phydev->duplex)); +} + +static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) +{ + struct phylink_link_state config; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + u32 advertising; + int ret; + + memset(&config, 0, sizeof(config)); + ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported); + ethtool_convert_legacy_u32_to_link_mode(config.advertising, + phy->advertising); + config.interface = pl->link_config.interface; + + /* + * This is the new way of dealing with flow control for PHYs, + * as described by Timur Tabi in commit 529ed1275263 ("net: phy: + * phy drivers should not set SUPPORTED_[Asym_]Pause") except + * using our validate call to the MAC, we rely upon the MAC + * clearing the bits from both supported and advertising fields. + */ + if (phylink_test(supported, Pause)) + phylink_set(config.advertising, Pause); + if (phylink_test(supported, Asym_Pause)) + phylink_set(config.advertising, Asym_Pause); + + ret = phylink_validate(pl, supported, &config); + if (ret) + return ret; + + phy->phylink = pl; + phy->phy_link_change = phylink_phy_change; + + netdev_info(pl->netdev, + "PHY [%s] driver [%s]\n", dev_name(&phy->mdio.dev), + phy->drv->name); + + mutex_lock(&phy->lock); + mutex_lock(&pl->state_mutex); + pl->netdev->phydev = phy; + pl->phydev = phy; + linkmode_copy(pl->supported, supported); + linkmode_copy(pl->link_config.advertising, config.advertising); + + /* Restrict the phy advertisment according to the MAC support. */ + ethtool_convert_link_mode_to_legacy_u32(&advertising, config.advertising); + phy->advertising = advertising; + mutex_unlock(&pl->state_mutex); + mutex_unlock(&phy->lock); + + netdev_dbg(pl->netdev, + "phy: setting supported %*pb advertising 0x%08x\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported, + phy->advertising); + + phy_start_machine(phy); + if (phy->irq > 0) + phy_start_interrupts(phy); + + return 0; +} + +int phylink_connect_phy(struct phylink *pl, struct phy_device *phy) +{ + int ret; + + ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface); + if (ret) + return ret; + + ret = phylink_bringup_phy(pl, phy); + if (ret) + phy_detach(phy); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_connect_phy); + +int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn) +{ + struct device_node *phy_node; + struct phy_device *phy_dev; + int ret; + + /* Fixed links are handled without needing a PHY */ + if (pl->link_an_mode == MLO_AN_FIXED) + return 0; + + phy_node = of_parse_phandle(dn, "phy-handle", 0); + if (!phy_node) + phy_node = of_parse_phandle(dn, "phy", 0); + if (!phy_node) + phy_node = of_parse_phandle(dn, "phy-device", 0); + + if (!phy_node) { + if (pl->link_an_mode == MLO_AN_PHY) { + netdev_err(pl->netdev, "unable to find PHY node\n"); + return -ENODEV; + } + return 0; + } + + phy_dev = of_phy_attach(pl->netdev, phy_node, 0, pl->link_interface); + /* We're done with the phy_node handle */ + of_node_put(phy_node); + + if (!phy_dev) + return -ENODEV; + + ret = phylink_bringup_phy(pl, phy_dev); + if (ret) + phy_detach(phy_dev); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_of_phy_connect); + +void phylink_disconnect_phy(struct phylink *pl) +{ + struct phy_device *phy; + + WARN_ON(!lockdep_rtnl_is_held()); + + phy = pl->phydev; + if (phy) { + mutex_lock(&phy->lock); + mutex_lock(&pl->state_mutex); + pl->netdev->phydev = NULL; + pl->phydev = NULL; + mutex_unlock(&pl->state_mutex); + mutex_unlock(&phy->lock); + flush_work(&pl->resolve); + + phy_disconnect(phy); + } +} +EXPORT_SYMBOL_GPL(phylink_disconnect_phy); + +void phylink_mac_change(struct phylink *pl, bool up) +{ + if (!up) + pl->mac_link_dropped = true; + phylink_run_resolve(pl); + netdev_dbg(pl->netdev, "mac link %s\n", up ? "up" : "down"); +} +EXPORT_SYMBOL_GPL(phylink_mac_change); + +void phylink_start(struct phylink *pl) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + netdev_info(pl->netdev, "configuring for %s/%s link mode\n", + phylink_an_mode_str(pl->link_an_mode), + phy_modes(pl->link_config.interface)); + + /* Apply the link configuration to the MAC when starting. This allows + * a fixed-link to start with the correct parameters, and also + * ensures that we set the appropriate advertisment for Serdes links. + */ + phylink_resolve_flow(pl, &pl->link_config); + phylink_mac_config(pl, &pl->link_config); + + clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + phylink_run_resolve(pl); + + if (pl->sfp_bus) + sfp_upstream_start(pl->sfp_bus); + if (pl->phydev) + phy_start(pl->phydev); +} +EXPORT_SYMBOL_GPL(phylink_start); + +void phylink_stop(struct phylink *pl) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + phy_stop(pl->phydev); + if (pl->sfp_bus) + sfp_upstream_stop(pl->sfp_bus); + + set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + flush_work(&pl->resolve); +} +EXPORT_SYMBOL_GPL(phylink_stop); + +void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + wol->supported = 0; + wol->wolopts = 0; + + if (pl->phydev) + phy_ethtool_get_wol(pl->phydev, wol); +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol); + +int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_ethtool_set_wol(pl->phydev, wol); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_set_wol); + +static void phylink_merge_link_mode(unsigned long *dst, const unsigned long *b) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask); + + linkmode_zero(mask); + phylink_set_port_modes(mask); + + linkmode_and(dst, dst, mask); + linkmode_or(dst, dst, b); +} + +static void phylink_get_ksettings(const struct phylink_link_state *state, + struct ethtool_link_ksettings *kset) +{ + phylink_merge_link_mode(kset->link_modes.advertising, state->advertising); + linkmode_copy(kset->link_modes.lp_advertising, state->lp_advertising); + kset->base.speed = state->speed; + kset->base.duplex = state->duplex; + kset->base.autoneg = state->an_enabled ? AUTONEG_ENABLE : + AUTONEG_DISABLE; +} + +int phylink_ethtool_ksettings_get(struct phylink *pl, + struct ethtool_link_ksettings *kset) +{ + struct phylink_link_state link_state; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) { + phy_ethtool_ksettings_get(pl->phydev, kset); + } else { + kset->base.port = pl->link_port; + } + + linkmode_copy(kset->link_modes.supported, pl->supported); + + switch (pl->link_an_mode) { + case MLO_AN_FIXED: + /* We are using fixed settings. Report these as the + * current link settings - and note that these also + * represent the supported speeds/duplex/pause modes. + */ + phylink_get_fixed_state(pl, &link_state); + phylink_get_ksettings(&link_state, kset); + break; + + case MLO_AN_SGMII: + /* If there is a phy attached, then use the reported + * settings from the phy with no modification. + */ + if (pl->phydev) + break; + + case MLO_AN_8023Z: + phylink_get_mac_state(pl, &link_state); + + /* The MAC is reporting the link results from its own PCS + * layer via in-band status. Report these as the current + * link settings. + */ + phylink_get_ksettings(&link_state, kset); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get); + +int phylink_ethtool_ksettings_set(struct phylink *pl, + const struct ethtool_link_ksettings *kset) +{ + struct ethtool_link_ksettings our_kset; + struct phylink_link_state config; + int ret; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (kset->base.autoneg != AUTONEG_DISABLE && + kset->base.autoneg != AUTONEG_ENABLE) + return -EINVAL; + + config = pl->link_config; + + /* Mask out unsupported advertisments */ + linkmode_and(config.advertising, kset->link_modes.advertising, + pl->supported); + + /* FIXME: should we reject autoneg if phy/mac does not support it? */ + if (kset->base.autoneg == AUTONEG_DISABLE) { + const struct phy_setting *s; + + /* Autonegotiation disabled, select a suitable speed and + * duplex. + */ + s = phy_lookup_setting(kset->base.speed, kset->base.duplex, + pl->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, false); + if (!s) + return -EINVAL; + + /* If we have a fixed link (as specified by firmware), refuse + * to change link parameters. + */ + if (pl->link_an_mode == MLO_AN_FIXED && + (s->speed != pl->link_config.speed || + s->duplex != pl->link_config.duplex)) + return -EINVAL; + + config.speed = s->speed; + config.duplex = s->duplex; + config.an_enabled = false; + + __clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising); + } else { + /* If we have a fixed link, refuse to enable autonegotiation */ + if (pl->link_an_mode == MLO_AN_FIXED) + return -EINVAL; + + config.speed = SPEED_UNKNOWN; + config.duplex = DUPLEX_UNKNOWN; + config.an_enabled = true; + + __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising); + } + + if (phylink_validate(pl, pl->supported, &config)) + return -EINVAL; + + /* If autonegotiation is enabled, we must have an advertisment */ + if (config.an_enabled && phylink_is_empty_linkmode(config.advertising)) + return -EINVAL; + + our_kset = *kset; + linkmode_copy(our_kset.link_modes.advertising, config.advertising); + our_kset.base.speed = config.speed; + our_kset.base.duplex = config.duplex; + + /* If we have a PHY, configure the phy */ + if (pl->phydev) { + ret = phy_ethtool_ksettings_set(pl->phydev, &our_kset); + if (ret) + return ret; + } + + mutex_lock(&pl->state_mutex); + /* Configure the MAC to match the new settings */ + linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); + pl->link_config.speed = our_kset.base.speed; + pl->link_config.duplex = our_kset.base.duplex; + pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; + + if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { + phylink_mac_config(pl, &pl->link_config); + phylink_mac_an_restart(pl); + } + mutex_unlock(&pl->state_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_set); + +int phylink_ethtool_nway_reset(struct phylink *pl) +{ + int ret = 0; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_restart_aneg(pl->phydev); + phylink_mac_an_restart(pl); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_nway_reset); + +void phylink_ethtool_get_pauseparam(struct phylink *pl, + struct ethtool_pauseparam *pause) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + pause->autoneg = !!(pl->link_config.pause & MLO_PAUSE_AN); + pause->rx_pause = !!(pl->link_config.pause & MLO_PAUSE_RX); + pause->tx_pause = !!(pl->link_config.pause & MLO_PAUSE_TX); +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_pauseparam); + +int phylink_ethtool_set_pauseparam(struct phylink *pl, + struct ethtool_pauseparam *pause) +{ + struct phylink_link_state *config = &pl->link_config; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (!phylink_test(pl->supported, Pause) && + !phylink_test(pl->supported, Asym_Pause)) + return -EOPNOTSUPP; + + if (!phylink_test(pl->supported, Asym_Pause) && + !pause->autoneg && pause->rx_pause != pause->tx_pause) + return -EINVAL; + + config->pause &= ~(MLO_PAUSE_AN | MLO_PAUSE_TXRX_MASK); + + if (pause->autoneg) + config->pause |= MLO_PAUSE_AN; + if (pause->rx_pause) + config->pause |= MLO_PAUSE_RX; + if (pause->tx_pause) + config->pause |= MLO_PAUSE_TX; + + if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { + switch (pl->link_an_mode) { + case MLO_AN_PHY: + /* Silently mark the carrier down, and then trigger a resolve */ + netif_carrier_off(pl->netdev); + phylink_run_resolve(pl); + break; + + case MLO_AN_FIXED: + /* Should we allow fixed links to change against the config? */ + phylink_resolve_flow(pl, config); + phylink_mac_config(pl, config); + break; + + case MLO_AN_SGMII: + case MLO_AN_8023Z: + phylink_mac_config(pl, config); + phylink_mac_an_restart(pl); + break; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_set_pauseparam); + +int phylink_ethtool_get_module_info(struct phylink *pl, + struct ethtool_modinfo *modinfo) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->sfp_bus) + ret = sfp_get_module_info(pl->sfp_bus, modinfo); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_info); + +int phylink_ethtool_get_module_eeprom(struct phylink *pl, + struct ethtool_eeprom *ee, u8 *buf) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->sfp_bus) + ret = sfp_get_module_eeprom(pl->sfp_bus, ee, buf); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_eeprom); + +int phylink_init_eee(struct phylink *pl, bool clk_stop_enable) +{ + int ret = -EPROTONOSUPPORT; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_init_eee(pl->phydev, clk_stop_enable); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_init_eee); + +int phylink_get_eee_err(struct phylink *pl) +{ + int ret = 0; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_get_eee_err(pl->phydev); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_get_eee_err); + +int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_ethtool_get_eee(pl->phydev, eee); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee); + +int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_ethtool_set_eee(pl->phydev, eee); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_set_eee); + +/* This emulates MII registers for a fixed-mode phy operating as per the + * passed in state. "aneg" defines if we report negotiation is possible. + * + * FIXME: should deal with negotiation state too. + */ +static int phylink_mii_emul_read(struct net_device *ndev, unsigned int reg, + struct phylink_link_state *state, bool aneg) +{ + struct fixed_phy_status fs; + int val; + + fs.link = state->link; + fs.speed = state->speed; + fs.duplex = state->duplex; + fs.pause = state->pause & MLO_PAUSE_SYM; + fs.asym_pause = state->pause & MLO_PAUSE_ASYM; + + val = swphy_read_reg(reg, &fs); + if (reg == MII_BMSR) { + if (!state->an_complete) + val &= ~BMSR_ANEGCOMPLETE; + if (!aneg) + val &= ~BMSR_ANEGCAPABLE; + } + return val; +} + +static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, + unsigned int reg) +{ + struct phy_device *phydev = pl->phydev; + int prtad, devad; + + if (mdio_phy_id_is_c45(phy_id)) { + prtad = mdio_phy_id_prtad(phy_id); + devad = mdio_phy_id_devad(phy_id); + devad = MII_ADDR_C45 | devad << 16 | reg; + } else if (phydev->is_c45) { + switch (reg) { + case MII_BMCR: + case MII_BMSR: + case MII_PHYSID1: + case MII_PHYSID2: + devad = __ffs(phydev->c45_ids.devices_in_package); + break; + case MII_ADVERTISE: + case MII_LPA: + if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + return -EINVAL; + devad = MDIO_MMD_AN; + if (reg == MII_ADVERTISE) + reg = MDIO_AN_ADVERTISE; + else + reg = MDIO_AN_LPA; + break; + default: + return -EINVAL; + } + prtad = phy_id; + devad = MII_ADDR_C45 | devad << 16 | reg; + } else { + prtad = phy_id; + devad = reg; + } + return mdiobus_read(pl->phydev->mdio.bus, prtad, devad); +} + +static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, + unsigned int reg, unsigned int val) +{ + struct phy_device *phydev = pl->phydev; + int prtad, devad; + + if (mdio_phy_id_is_c45(phy_id)) { + prtad = mdio_phy_id_prtad(phy_id); + devad = mdio_phy_id_devad(phy_id); + devad = MII_ADDR_C45 | devad << 16 | reg; + } else if (phydev->is_c45) { + switch (reg) { + case MII_BMCR: + case MII_BMSR: + case MII_PHYSID1: + case MII_PHYSID2: + devad = __ffs(phydev->c45_ids.devices_in_package); + break; + case MII_ADVERTISE: + case MII_LPA: + if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + return -EINVAL; + devad = MDIO_MMD_AN; + if (reg == MII_ADVERTISE) + reg = MDIO_AN_ADVERTISE; + else + reg = MDIO_AN_LPA; + break; + default: + return -EINVAL; + } + prtad = phy_id; + devad = MII_ADDR_C45 | devad << 16 | reg; + } else { + prtad = phy_id; + devad = reg; + } + + return mdiobus_write(phydev->mdio.bus, prtad, devad, val); +} + +static int phylink_mii_read(struct phylink *pl, unsigned int phy_id, + unsigned int reg) +{ + struct phylink_link_state state; + int val = 0xffff; + + switch (pl->link_an_mode) { + case MLO_AN_FIXED: + if (phy_id == 0) { + phylink_get_fixed_state(pl, &state); + val = phylink_mii_emul_read(pl->netdev, reg, &state, + true); + } + break; + + case MLO_AN_PHY: + return -EOPNOTSUPP; + + case MLO_AN_SGMII: + /* No phy, fall through to 8023z method */ + case MLO_AN_8023Z: + if (phy_id == 0) { + val = phylink_get_mac_state(pl, &state); + if (val < 0) + return val; + + val = phylink_mii_emul_read(pl->netdev, reg, &state, + true); + } + break; + } + + return val & 0xffff; +} + +static int phylink_mii_write(struct phylink *pl, unsigned int phy_id, + unsigned int reg, unsigned int val) +{ + switch (pl->link_an_mode) { + case MLO_AN_FIXED: + break; + + case MLO_AN_PHY: + return -EOPNOTSUPP; + + case MLO_AN_SGMII: + /* No phy, fall through to 8023z method */ + case MLO_AN_8023Z: + break; + } + + return 0; +} + +int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *mii = if_mii(ifr); + int ret; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) { + /* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */ + switch (cmd) { + case SIOCGMIIPHY: + mii->phy_id = pl->phydev->mdio.addr; + + case SIOCGMIIREG: + ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); + if (ret >= 0) { + mii->val_out = ret; + ret = 0; + } + break; + + case SIOCSMIIREG: + ret = phylink_phy_write(pl, mii->phy_id, mii->reg_num, + mii->val_in); + break; + + default: + ret = phy_mii_ioctl(pl->phydev, ifr, cmd); + break; + } + } else { + switch (cmd) { + case SIOCGMIIPHY: + mii->phy_id = 0; + + case SIOCGMIIREG: + ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); + if (ret >= 0) { + mii->val_out = ret; + ret = 0; + } + break; + + case SIOCSMIIREG: + ret = phylink_mii_write(pl, mii->phy_id, mii->reg_num, + mii->val_in); + break; + + default: + ret = -EOPNOTSUPP; + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_mii_ioctl); + + + +static int phylink_sfp_module_insert(void *upstream, + const struct sfp_eeprom_id *id) +{ + struct phylink *pl = upstream; + __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, }; + struct phylink_link_state config; + phy_interface_t iface; + int mode, ret = 0; + bool changed; + u8 port; + + sfp_parse_support(pl->sfp_bus, id, support); + port = sfp_parse_port(pl->sfp_bus, id, support); + iface = sfp_parse_interface(pl->sfp_bus, id); + + WARN_ON(!lockdep_rtnl_is_held()); + + switch (iface) { + case PHY_INTERFACE_MODE_SGMII: + mode = MLO_AN_SGMII; + break; + case PHY_INTERFACE_MODE_1000BASEX: + mode = MLO_AN_8023Z; + break; + default: + return -EINVAL; + } + + memset(&config, 0, sizeof(config)); + linkmode_copy(config.advertising, support); + config.interface = iface; + config.speed = SPEED_UNKNOWN; + config.duplex = DUPLEX_UNKNOWN; + config.pause = MLO_PAUSE_AN; + config.an_enabled = pl->link_config.an_enabled; + + /* Ignore errors if we're expecting a PHY to attach later */ + ret = phylink_validate(pl, support, &config); + if (ret) { + netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n", + phylink_an_mode_str(mode), phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret); + return ret; + } + + netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n", + phylink_an_mode_str(mode), phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, support); + + if (mode == MLO_AN_8023Z && pl->phydev) + return -EINVAL; + + changed = !bitmap_equal(pl->supported, support, + __ETHTOOL_LINK_MODE_MASK_NBITS); + if (changed) { + linkmode_copy(pl->supported, support); + linkmode_copy(pl->link_config.advertising, config.advertising); + } + + if (pl->link_an_mode != mode || + pl->link_config.interface != config.interface) { + pl->link_config.interface = config.interface; + pl->link_an_mode = mode; + + changed = true; + + netdev_info(pl->netdev, "switched to %s/%s link mode\n", + phylink_an_mode_str(mode), + phy_modes(config.interface)); + } + + pl->link_port = port; + + if (changed && !test_bit(PHYLINK_DISABLE_STOPPED, + &pl->phylink_disable_state)) + phylink_mac_config(pl, &pl->link_config); + + return ret; +} + +static void phylink_sfp_link_down(void *upstream) +{ + struct phylink *pl = upstream; + + WARN_ON(!lockdep_rtnl_is_held()); + + set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); + flush_work(&pl->resolve); + + netif_carrier_off(pl->netdev); +} + +static void phylink_sfp_link_up(void *upstream) +{ + struct phylink *pl = upstream; + + WARN_ON(!lockdep_rtnl_is_held()); + + clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); + phylink_run_resolve(pl); +} + +static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) +{ + return phylink_connect_phy(upstream, phy); +} + +static void phylink_sfp_disconnect_phy(void *upstream) +{ + phylink_disconnect_phy(upstream); +} + +static const struct sfp_upstream_ops sfp_phylink_ops = { + .module_insert = phylink_sfp_module_insert, + .link_up = phylink_sfp_link_up, + .link_down = phylink_sfp_link_down, + .connect_phy = phylink_sfp_connect_phy, + .disconnect_phy = phylink_sfp_disconnect_phy, +}; + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c new file mode 100644 index 000000000000..5cb5384697ea --- /dev/null +++ b/drivers/net/phy/sfp-bus.c @@ -0,0 +1,475 @@ +#include <linux/export.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/phylink.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> + +#include "sfp.h" + +struct sfp_bus { + struct kref kref; + struct list_head node; + struct device_node *device_node; + + const struct sfp_socket_ops *socket_ops; + struct device *sfp_dev; + struct sfp *sfp; + + const struct sfp_upstream_ops *upstream_ops; + void *upstream; + struct net_device *netdev; + struct phy_device *phydev; + + bool registered; + bool started; +}; + + +int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, + unsigned long *support) +{ + int port; + + /* port is the physical connector, set this from the connector field. */ + switch (id->base.connector) { + case SFP_CONNECTOR_SC: + case SFP_CONNECTOR_FIBERJACK: + case SFP_CONNECTOR_LC: + case SFP_CONNECTOR_MT_RJ: + case SFP_CONNECTOR_MU: + case SFP_CONNECTOR_OPTICAL_PIGTAIL: + if (support) + phylink_set(support, FIBRE); + port = PORT_FIBRE; + break; + + case SFP_CONNECTOR_RJ45: + if (support) + phylink_set(support, TP); + port = PORT_TP; + break; + + case SFP_CONNECTOR_UNSPEC: + if (id->base.e1000_base_t) { + if (support) + phylink_set(support, TP); + port = PORT_TP; + break; + } + /* fallthrough */ + case SFP_CONNECTOR_SG: /* guess */ + case SFP_CONNECTOR_MPO_1X12: + case SFP_CONNECTOR_MPO_2X16: + case SFP_CONNECTOR_HSSDC_II: + case SFP_CONNECTOR_COPPER_PIGTAIL: + case SFP_CONNECTOR_NOSEPARATE: + case SFP_CONNECTOR_MXC_2X16: + port = PORT_OTHER; + break; + default: + dev_warn(bus->sfp_dev, "SFP: unknown connector id 0x%02x\n", + id->base.connector); + port = PORT_OTHER; + break; + } + + return port; +} +EXPORT_SYMBOL_GPL(sfp_parse_port); + +phy_interface_t sfp_parse_interface(struct sfp_bus *bus, + const struct sfp_eeprom_id *id) +{ + phy_interface_t iface; + + /* Setting the serdes link mode is guesswork: there's no field in + * the EEPROM which indicates what mode should be used. + * + * If the module wants 64b66b, then it must be >= 10G. + * + * If it's a gigabit-only fiber module, it probably does not have + * a PHY, so switch to 802.3z negotiation mode. Otherwise, switch + * to SGMII mode (which is required to support non-gigabit speeds). + */ + switch (id->base.encoding) { + case SFP_ENCODING_8472_64B66B: + iface = PHY_INTERFACE_MODE_10GKR; + break; + + case SFP_ENCODING_8B10B: + if (!id->base.e1000_base_t && + !id->base.e100_base_lx && + !id->base.e100_base_fx) + iface = PHY_INTERFACE_MODE_1000BASEX; + else + iface = PHY_INTERFACE_MODE_SGMII; + break; + + default: + iface = PHY_INTERFACE_MODE_NA; + dev_err(bus->sfp_dev, + "SFP module encoding does not support 8b10b nor 64b66b\n"); + break; + } + + return iface; +} +EXPORT_SYMBOL_GPL(sfp_parse_interface); + +void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, + unsigned long *support) +{ + phylink_set(support, Autoneg); + phylink_set(support, Pause); + phylink_set(support, Asym_Pause); + + /* Set ethtool support from the compliance fields. */ + if (id->base.e10g_base_sr) + phylink_set(support, 10000baseSR_Full); + if (id->base.e10g_base_lr) + phylink_set(support, 10000baseLR_Full); + if (id->base.e10g_base_lrm) + phylink_set(support, 10000baseLRM_Full); + if (id->base.e10g_base_er) + phylink_set(support, 10000baseER_Full); + if (id->base.e1000_base_sx || + id->base.e1000_base_lx || + id->base.e1000_base_cx) + phylink_set(support, 1000baseX_Full); + if (id->base.e1000_base_t) { + phylink_set(support, 1000baseT_Half); + phylink_set(support, 1000baseT_Full); + } + + switch (id->base.extended_cc) { + case 0x00: /* Unspecified */ + break; + case 0x02: /* 100Gbase-SR4 or 25Gbase-SR */ + phylink_set(support, 100000baseSR4_Full); + phylink_set(support, 25000baseSR_Full); + break; + case 0x03: /* 100Gbase-LR4 or 25Gbase-LR */ + case 0x04: /* 100Gbase-ER4 or 25Gbase-ER */ + phylink_set(support, 100000baseLR4_ER4_Full); + break; + case 0x0b: /* 100Gbase-CR4 or 25Gbase-CR CA-L */ + case 0x0c: /* 25Gbase-CR CA-S */ + case 0x0d: /* 25Gbase-CR CA-N */ + phylink_set(support, 100000baseCR4_Full); + phylink_set(support, 25000baseCR_Full); + break; + default: + dev_warn(bus->sfp_dev, + "Unknown/unsupported extended compliance code: 0x%02x\n", + id->base.extended_cc); + break; + } + + /* For fibre channel SFP, derive possible BaseX modes */ + if (id->base.fc_speed_100 || + id->base.fc_speed_200 || + id->base.fc_speed_400) { + if (id->base.br_nominal >= 31) + phylink_set(support, 2500baseX_Full); + if (id->base.br_nominal >= 12) + phylink_set(support, 1000baseX_Full); + } + + switch (id->base.connector) { + case SFP_CONNECTOR_SC: + case SFP_CONNECTOR_FIBERJACK: + case SFP_CONNECTOR_LC: + case SFP_CONNECTOR_MT_RJ: + case SFP_CONNECTOR_MU: + case SFP_CONNECTOR_OPTICAL_PIGTAIL: + break; + + case SFP_CONNECTOR_UNSPEC: + if (id->base.e1000_base_t) + break; + + case SFP_CONNECTOR_SG: /* guess */ + case SFP_CONNECTOR_MPO_1X12: + case SFP_CONNECTOR_MPO_2X16: + case SFP_CONNECTOR_HSSDC_II: + case SFP_CONNECTOR_COPPER_PIGTAIL: + case SFP_CONNECTOR_NOSEPARATE: + case SFP_CONNECTOR_MXC_2X16: + default: + /* a guess at the supported link modes */ + dev_warn(bus->sfp_dev, + "Guessing link modes, please report...\n"); + phylink_set(support, 1000baseT_Half); + phylink_set(support, 1000baseT_Full); + break; + } +} +EXPORT_SYMBOL_GPL(sfp_parse_support); + + +static LIST_HEAD(sfp_buses); +static DEFINE_MUTEX(sfp_mutex); + +static const struct sfp_upstream_ops *sfp_get_upstream_ops(struct sfp_bus *bus) +{ + return bus->registered ? bus->upstream_ops : NULL; +} + +static struct sfp_bus *sfp_bus_get(struct device_node *np) +{ + struct sfp_bus *sfp, *new, *found = NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + + mutex_lock(&sfp_mutex); + + list_for_each_entry(sfp, &sfp_buses, node) { + if (sfp->device_node == np) { + kref_get(&sfp->kref); + found = sfp; + break; + } + } + + if (!found && new) { + kref_init(&new->kref); + new->device_node = np; + list_add(&new->node, &sfp_buses); + found = new; + new = NULL; + } + + mutex_unlock(&sfp_mutex); + + kfree(new); + + return found; +} + +static void sfp_bus_release(struct kref *kref) __releases(sfp_mutex) +{ + struct sfp_bus *bus = container_of(kref, struct sfp_bus, kref); + + list_del(&bus->node); + mutex_unlock(&sfp_mutex); + kfree(bus); +} + +static void sfp_bus_put(struct sfp_bus *bus) +{ + kref_put_mutex(&bus->kref, sfp_bus_release, &sfp_mutex); +} + +static int sfp_register_bus(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = bus->upstream_ops; + int ret; + + if (ops) { + if (ops->link_down) + ops->link_down(bus->upstream); + if (ops->connect_phy && bus->phydev) { + ret = ops->connect_phy(bus->upstream, bus->phydev); + if (ret) + return ret; + } + } + if (bus->started) + bus->socket_ops->start(bus->sfp); + bus->registered = true; + return 0; +} + +static void sfp_unregister_bus(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = bus->upstream_ops; + + if (bus->registered) { + if (bus->started) + bus->socket_ops->stop(bus->sfp); + if (bus->phydev && ops && ops->disconnect_phy) + ops->disconnect_phy(bus->upstream); + } + bus->registered = false; +} + + +int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo) +{ + if (!bus->registered) + return -ENOIOCTLCMD; + return bus->socket_ops->module_info(bus->sfp, modinfo); +} +EXPORT_SYMBOL_GPL(sfp_get_module_info); + +int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, + u8 *data) +{ + if (!bus->registered) + return -ENOIOCTLCMD; + return bus->socket_ops->module_eeprom(bus->sfp, ee, data); +} +EXPORT_SYMBOL_GPL(sfp_get_module_eeprom); + +void sfp_upstream_start(struct sfp_bus *bus) +{ + if (bus->registered) + bus->socket_ops->start(bus->sfp); + bus->started = true; +} +EXPORT_SYMBOL_GPL(sfp_upstream_start); + +void sfp_upstream_stop(struct sfp_bus *bus) +{ + if (bus->registered) + bus->socket_ops->stop(bus->sfp); + bus->started = false; +} +EXPORT_SYMBOL_GPL(sfp_upstream_stop); + +struct sfp_bus *sfp_register_upstream(struct device_node *np, + struct net_device *ndev, void *upstream, + const struct sfp_upstream_ops *ops) +{ + struct sfp_bus *bus = sfp_bus_get(np); + int ret = 0; + + if (bus) { + rtnl_lock(); + bus->upstream_ops = ops; + bus->upstream = upstream; + bus->netdev = ndev; + + if (bus->sfp) + ret = sfp_register_bus(bus); + rtnl_unlock(); + } + + if (ret) { + sfp_bus_put(bus); + bus = NULL; + } + + return bus; +} +EXPORT_SYMBOL_GPL(sfp_register_upstream); + +void sfp_unregister_upstream(struct sfp_bus *bus) +{ + rtnl_lock(); + sfp_unregister_bus(bus); + bus->upstream = NULL; + bus->netdev = NULL; + rtnl_unlock(); + + sfp_bus_put(bus); +} +EXPORT_SYMBOL_GPL(sfp_unregister_upstream); + + +/* Socket driver entry points */ +int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + int ret = 0; + + if (ops && ops->connect_phy) + ret = ops->connect_phy(bus->upstream, phydev); + + if (ret == 0) + bus->phydev = phydev; + + return ret; +} +EXPORT_SYMBOL_GPL(sfp_add_phy); + +void sfp_remove_phy(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->disconnect_phy) + ops->disconnect_phy(bus->upstream); + bus->phydev = NULL; +} +EXPORT_SYMBOL_GPL(sfp_remove_phy); + + +void sfp_link_up(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->link_up) + ops->link_up(bus->upstream); +} +EXPORT_SYMBOL_GPL(sfp_link_up); + +void sfp_link_down(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->link_down) + ops->link_down(bus->upstream); +} +EXPORT_SYMBOL_GPL(sfp_link_down); + +int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + int ret = 0; + + if (ops && ops->module_insert) + ret = ops->module_insert(bus->upstream, id); + + return ret; +} +EXPORT_SYMBOL_GPL(sfp_module_insert); + +void sfp_module_remove(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->module_remove) + ops->module_remove(bus->upstream); +} +EXPORT_SYMBOL_GPL(sfp_module_remove); + +struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, + const struct sfp_socket_ops *ops) +{ + struct sfp_bus *bus = sfp_bus_get(dev->of_node); + int ret = 0; + + if (bus) { + rtnl_lock(); + bus->sfp_dev = dev; + bus->sfp = sfp; + bus->socket_ops = ops; + + if (bus->netdev) + ret = sfp_register_bus(bus); + rtnl_unlock(); + } + + if (ret) { + sfp_bus_put(bus); + bus = NULL; + } + + return bus; +} +EXPORT_SYMBOL_GPL(sfp_register_socket); + +void sfp_unregister_socket(struct sfp_bus *bus) +{ + rtnl_lock(); + sfp_unregister_bus(bus); + bus->sfp_dev = NULL; + bus->sfp = NULL; + bus->socket_ops = NULL; + rtnl_unlock(); + + sfp_bus_put(bus); +} +EXPORT_SYMBOL_GPL(sfp_unregister_socket); diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c new file mode 100644 index 000000000000..fb2cf4342f48 --- /dev/null +++ b/drivers/net/phy/sfp.c @@ -0,0 +1,915 @@ +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/jiffies.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> +#include <linux/workqueue.h> + +#include "mdio-i2c.h" +#include "sfp.h" +#include "swphy.h" + +enum { + GPIO_MODDEF0, + GPIO_LOS, + GPIO_TX_FAULT, + GPIO_TX_DISABLE, + GPIO_RATE_SELECT, + GPIO_MAX, + + SFP_F_PRESENT = BIT(GPIO_MODDEF0), + SFP_F_LOS = BIT(GPIO_LOS), + SFP_F_TX_FAULT = BIT(GPIO_TX_FAULT), + SFP_F_TX_DISABLE = BIT(GPIO_TX_DISABLE), + SFP_F_RATE_SELECT = BIT(GPIO_RATE_SELECT), + + SFP_E_INSERT = 0, + SFP_E_REMOVE, + SFP_E_DEV_DOWN, + SFP_E_DEV_UP, + SFP_E_TX_FAULT, + SFP_E_TX_CLEAR, + SFP_E_LOS_HIGH, + SFP_E_LOS_LOW, + SFP_E_TIMEOUT, + + SFP_MOD_EMPTY = 0, + SFP_MOD_PROBE, + SFP_MOD_PRESENT, + SFP_MOD_ERROR, + + SFP_DEV_DOWN = 0, + SFP_DEV_UP, + + SFP_S_DOWN = 0, + SFP_S_INIT, + SFP_S_WAIT_LOS, + SFP_S_LINK_UP, + SFP_S_TX_FAULT, + SFP_S_REINIT, + SFP_S_TX_DISABLE, +}; + +static const char *gpio_of_names[] = { + "moddef0", + "los", + "tx-fault", + "tx-disable", + "rate-select", +}; + +static const enum gpiod_flags gpio_flags[] = { + GPIOD_IN, + GPIOD_IN, + GPIOD_IN, + GPIOD_ASIS, + GPIOD_ASIS, +}; + +#define T_INIT_JIFFIES msecs_to_jiffies(300) +#define T_RESET_US 10 +#define T_FAULT_RECOVER msecs_to_jiffies(1000) + +/* SFP module presence detection is poor: the three MOD DEF signals are + * the same length on the PCB, which means it's possible for MOD DEF 0 to + * connect before the I2C bus on MOD DEF 1/2. + * + * The SFP MSA specifies 300ms as t_init (the time taken for TX_FAULT to + * be deasserted) but makes no mention of the earliest time before we can + * access the I2C EEPROM. However, Avago modules require 300ms. + */ +#define T_PROBE_INIT msecs_to_jiffies(300) +#define T_PROBE_RETRY msecs_to_jiffies(100) + +/* + * SFP modules appear to always have their PHY configured for bus address + * 0x56 (which with mdio-i2c, translates to a PHY address of 22). + */ +#define SFP_PHY_ADDR 22 + +/* + * Give this long for the PHY to reset. + */ +#define T_PHY_RESET_MS 50 + +static DEFINE_MUTEX(sfp_mutex); + +struct sfp { + struct device *dev; + struct i2c_adapter *i2c; + struct mii_bus *i2c_mii; + struct sfp_bus *sfp_bus; + struct phy_device *mod_phy; + + unsigned int (*get_state)(struct sfp *); + void (*set_state)(struct sfp *, unsigned int); + int (*read)(struct sfp *, bool, u8, void *, size_t); + + struct gpio_desc *gpio[GPIO_MAX]; + + unsigned int state; + struct delayed_work poll; + struct delayed_work timeout; + struct mutex sm_mutex; + unsigned char sm_mod_state; + unsigned char sm_dev_state; + unsigned short sm_state; + unsigned int sm_retries; + + struct sfp_eeprom_id id; +}; + +static unsigned long poll_jiffies; + +static unsigned int sfp_gpio_get_state(struct sfp *sfp) +{ + unsigned int i, state, v; + + for (i = state = 0; i < GPIO_MAX; i++) { + if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) + continue; + + v = gpiod_get_value_cansleep(sfp->gpio[i]); + if (v) + state |= BIT(i); + } + + return state; +} + +static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state) +{ + if (state & SFP_F_PRESENT) { + /* If the module is present, drive the signals */ + if (sfp->gpio[GPIO_TX_DISABLE]) + gpiod_direction_output(sfp->gpio[GPIO_TX_DISABLE], + state & SFP_F_TX_DISABLE); + if (state & SFP_F_RATE_SELECT) + gpiod_direction_output(sfp->gpio[GPIO_RATE_SELECT], + state & SFP_F_RATE_SELECT); + } else { + /* Otherwise, let them float to the pull-ups */ + if (sfp->gpio[GPIO_TX_DISABLE]) + gpiod_direction_input(sfp->gpio[GPIO_TX_DISABLE]); + if (state & SFP_F_RATE_SELECT) + gpiod_direction_input(sfp->gpio[GPIO_RATE_SELECT]); + } +} + +static int sfp__i2c_read(struct i2c_adapter *i2c, u8 bus_addr, u8 dev_addr, + void *buf, size_t len) +{ + struct i2c_msg msgs[2]; + int ret; + + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = 1; + msgs[0].buf = &dev_addr; + msgs[1].addr = bus_addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = buf; + + ret = i2c_transfer(i2c, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + return ret; + + return ret == ARRAY_SIZE(msgs) ? len : 0; +} + +static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 addr, void *buf, + size_t len) +{ + return sfp__i2c_read(sfp->i2c, a2 ? 0x51 : 0x50, addr, buf, len); +} + +static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c) +{ + struct mii_bus *i2c_mii; + int ret; + + if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) + return -EINVAL; + + sfp->i2c = i2c; + sfp->read = sfp_i2c_read; + + i2c_mii = mdio_i2c_alloc(sfp->dev, i2c); + if (IS_ERR(i2c_mii)) + return PTR_ERR(i2c_mii); + + i2c_mii->name = "SFP I2C Bus"; + i2c_mii->phy_mask = ~0; + + ret = mdiobus_register(i2c_mii); + if (ret < 0) { + mdiobus_free(i2c_mii); + return ret; + } + + sfp->i2c_mii = i2c_mii; + + return 0; +} + + +/* Interface */ +static unsigned int sfp_get_state(struct sfp *sfp) +{ + return sfp->get_state(sfp); +} + +static void sfp_set_state(struct sfp *sfp, unsigned int state) +{ + sfp->set_state(sfp, state); +} + +static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len) +{ + return sfp->read(sfp, a2, addr, buf, len); +} + +static unsigned int sfp_check(void *buf, size_t len) +{ + u8 *p, check; + + for (p = buf, check = 0; len; p++, len--) + check += *p; + + return check; +} + +/* Helpers */ +static void sfp_module_tx_disable(struct sfp *sfp) +{ + dev_dbg(sfp->dev, "tx disable %u -> %u\n", + sfp->state & SFP_F_TX_DISABLE ? 1 : 0, 1); + sfp->state |= SFP_F_TX_DISABLE; + sfp_set_state(sfp, sfp->state); +} + +static void sfp_module_tx_enable(struct sfp *sfp) +{ + dev_dbg(sfp->dev, "tx disable %u -> %u\n", + sfp->state & SFP_F_TX_DISABLE ? 1 : 0, 0); + sfp->state &= ~SFP_F_TX_DISABLE; + sfp_set_state(sfp, sfp->state); +} + +static void sfp_module_tx_fault_reset(struct sfp *sfp) +{ + unsigned int state = sfp->state; + + if (state & SFP_F_TX_DISABLE) + return; + + sfp_set_state(sfp, state | SFP_F_TX_DISABLE); + + udelay(T_RESET_US); + + sfp_set_state(sfp, state); +} + +/* SFP state machine */ +static void sfp_sm_set_timer(struct sfp *sfp, unsigned int timeout) +{ + if (timeout) + mod_delayed_work(system_power_efficient_wq, &sfp->timeout, + timeout); + else + cancel_delayed_work(&sfp->timeout); +} + +static void sfp_sm_next(struct sfp *sfp, unsigned int state, + unsigned int timeout) +{ + sfp->sm_state = state; + sfp_sm_set_timer(sfp, timeout); +} + +static void sfp_sm_ins_next(struct sfp *sfp, unsigned int state, unsigned int timeout) +{ + sfp->sm_mod_state = state; + sfp_sm_set_timer(sfp, timeout); +} + +static void sfp_sm_phy_detach(struct sfp *sfp) +{ + phy_stop(sfp->mod_phy); + sfp_remove_phy(sfp->sfp_bus); + phy_device_remove(sfp->mod_phy); + phy_device_free(sfp->mod_phy); + sfp->mod_phy = NULL; +} + +static void sfp_sm_probe_phy(struct sfp *sfp) +{ + struct phy_device *phy; + int err; + + msleep(T_PHY_RESET_MS); + + phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR); + if (IS_ERR(phy)) { + dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy)); + return; + } + if (!phy) { + dev_info(sfp->dev, "no PHY detected\n"); + return; + } + + err = sfp_add_phy(sfp->sfp_bus, phy); + if (err) { + phy_device_remove(phy); + phy_device_free(phy); + dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err); + return; + } + + sfp->mod_phy = phy; + phy_start(phy); +} + +static void sfp_sm_link_up(struct sfp *sfp) +{ + sfp_link_up(sfp->sfp_bus); + sfp_sm_next(sfp, SFP_S_LINK_UP, 0); +} + +static void sfp_sm_link_down(struct sfp *sfp) +{ + sfp_link_down(sfp->sfp_bus); +} + +static void sfp_sm_link_check_los(struct sfp *sfp) +{ + unsigned int los = sfp->state & SFP_F_LOS; + + /* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor + * SFP_OPTIONS_LOS_NORMAL are set? For now, we assume + * the same as SFP_OPTIONS_LOS_NORMAL set. + */ + if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED) + los ^= SFP_F_LOS; + + if (los) + sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); + else + sfp_sm_link_up(sfp); +} + +static void sfp_sm_fault(struct sfp *sfp, bool warn) +{ + if (sfp->sm_retries && !--sfp->sm_retries) { + dev_err(sfp->dev, "module persistently indicates fault, disabling\n"); + sfp_sm_next(sfp, SFP_S_TX_DISABLE, 0); + } else { + if (warn) + dev_err(sfp->dev, "module transmit fault indicated\n"); + + sfp_sm_next(sfp, SFP_S_TX_FAULT, T_FAULT_RECOVER); + } +} + +static void sfp_sm_mod_init(struct sfp *sfp) +{ + sfp_module_tx_enable(sfp); + + /* Wait t_init before indicating that the link is up, provided the + * current state indicates no TX_FAULT. If TX_FAULT clears before + * this time, that's fine too. + */ + sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES); + sfp->sm_retries = 5; + + /* Setting the serdes link mode is guesswork: there's no + * field in the EEPROM which indicates what mode should + * be used. + * + * If it's a gigabit-only fiber module, it probably does + * not have a PHY, so switch to 802.3z negotiation mode. + * Otherwise, switch to SGMII mode (which is required to + * support non-gigabit speeds) and probe for a PHY. + */ + if (sfp->id.base.e1000_base_t || + sfp->id.base.e100_base_lx || + sfp->id.base.e100_base_fx) + sfp_sm_probe_phy(sfp); +} + +static int sfp_sm_mod_probe(struct sfp *sfp) +{ + /* SFP module inserted - read I2C data */ + struct sfp_eeprom_id id; + char vendor[17]; + char part[17]; + char sn[17]; + char date[9]; + char rev[5]; + u8 check; + int err; + + err = sfp_read(sfp, false, 0, &id, sizeof(id)); + if (err < 0) { + dev_err(sfp->dev, "failed to read EEPROM: %d\n", err); + return -EAGAIN; + } + + if (err != sizeof(id)) { + dev_err(sfp->dev, "EEPROM short read: %d\n", err); + return -EAGAIN; + } + + /* Validate the checksum over the base structure */ + check = sfp_check(&id.base, sizeof(id.base) - 1); + if (check != id.base.cc_base) { + dev_err(sfp->dev, + "EEPROM base structure checksum failure: 0x%02x\n", + check); + print_hex_dump(KERN_ERR, "sfp EE: ", DUMP_PREFIX_OFFSET, + 16, 1, &id, sizeof(id.base) - 1, true); + return -EINVAL; + } + + check = sfp_check(&id.ext, sizeof(id.ext) - 1); + if (check != id.ext.cc_ext) { + dev_err(sfp->dev, + "EEPROM extended structure checksum failure: 0x%02x\n", + check); + memset(&id.ext, 0, sizeof(id.ext)); + } + + sfp->id = id; + + memcpy(vendor, sfp->id.base.vendor_name, 16); + vendor[16] = '\0'; + memcpy(part, sfp->id.base.vendor_pn, 16); + part[16] = '\0'; + memcpy(rev, sfp->id.base.vendor_rev, 4); + rev[4] = '\0'; + memcpy(sn, sfp->id.ext.vendor_sn, 16); + sn[16] = '\0'; + memcpy(date, sfp->id.ext.datecode, 8); + date[8] = '\0'; + + dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n", vendor, part, rev, sn, date); + + /* We only support SFP modules, not the legacy GBIC modules. */ + if (sfp->id.base.phys_id != SFP_PHYS_ID_SFP || + sfp->id.base.phys_ext_id != SFP_PHYS_EXT_ID_SFP) { + dev_err(sfp->dev, "module is not SFP - phys id 0x%02x 0x%02x\n", + sfp->id.base.phys_id, sfp->id.base.phys_ext_id); + return -EINVAL; + } + + return sfp_module_insert(sfp->sfp_bus, &sfp->id); +} + +static void sfp_sm_mod_remove(struct sfp *sfp) +{ + sfp_module_remove(sfp->sfp_bus); + + if (sfp->mod_phy) + sfp_sm_phy_detach(sfp); + + sfp_module_tx_disable(sfp); + + memset(&sfp->id, 0, sizeof(sfp->id)); + + dev_info(sfp->dev, "module removed\n"); +} + +static void sfp_sm_event(struct sfp *sfp, unsigned int event) +{ + mutex_lock(&sfp->sm_mutex); + + dev_dbg(sfp->dev, "SM: enter %u:%u:%u event %u\n", + sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state, event); + + /* This state machine tracks the insert/remove state of + * the module, and handles probing the on-board EEPROM. + */ + switch (sfp->sm_mod_state) { + default: + if (event == SFP_E_INSERT) { + sfp_module_tx_disable(sfp); + sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); + } + break; + + case SFP_MOD_PROBE: + if (event == SFP_E_REMOVE) { + sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0); + } else if (event == SFP_E_TIMEOUT) { + int err = sfp_sm_mod_probe(sfp); + + if (err == 0) + sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0); + else if (err == -EAGAIN) + sfp_sm_set_timer(sfp, T_PROBE_RETRY); + else + sfp_sm_ins_next(sfp, SFP_MOD_ERROR, 0); + } + break; + + case SFP_MOD_PRESENT: + case SFP_MOD_ERROR: + if (event == SFP_E_REMOVE) { + sfp_sm_mod_remove(sfp); + sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0); + } + break; + } + + /* This state machine tracks the netdev up/down state */ + switch (sfp->sm_dev_state) { + default: + if (event == SFP_E_DEV_UP) + sfp->sm_dev_state = SFP_DEV_UP; + break; + + case SFP_DEV_UP: + if (event == SFP_E_DEV_DOWN) { + /* If the module has a PHY, avoid raising TX disable + * as this resets the PHY. Otherwise, raise it to + * turn the laser off. + */ + if (!sfp->mod_phy) + sfp_module_tx_disable(sfp); + sfp->sm_dev_state = SFP_DEV_DOWN; + } + break; + } + + /* Some events are global */ + if (sfp->sm_state != SFP_S_DOWN && + (sfp->sm_mod_state != SFP_MOD_PRESENT || + sfp->sm_dev_state != SFP_DEV_UP)) { + if (sfp->sm_state == SFP_S_LINK_UP && + sfp->sm_dev_state == SFP_DEV_UP) + sfp_sm_link_down(sfp); + if (sfp->mod_phy) + sfp_sm_phy_detach(sfp); + sfp_sm_next(sfp, SFP_S_DOWN, 0); + mutex_unlock(&sfp->sm_mutex); + return; + } + + /* The main state machine */ + switch (sfp->sm_state) { + case SFP_S_DOWN: + if (sfp->sm_mod_state == SFP_MOD_PRESENT && + sfp->sm_dev_state == SFP_DEV_UP) + sfp_sm_mod_init(sfp); + break; + + case SFP_S_INIT: + if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) + sfp_sm_fault(sfp, true); + else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) + sfp_sm_link_check_los(sfp); + break; + + case SFP_S_WAIT_LOS: + if (event == SFP_E_TX_FAULT) + sfp_sm_fault(sfp, true); + else if (event == + (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? + SFP_E_LOS_HIGH : SFP_E_LOS_LOW)) + sfp_sm_link_up(sfp); + break; + + case SFP_S_LINK_UP: + if (event == SFP_E_TX_FAULT) { + sfp_sm_link_down(sfp); + sfp_sm_fault(sfp, true); + } else if (event == + (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? + SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) { + sfp_sm_link_down(sfp); + sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); + } + break; + + case SFP_S_TX_FAULT: + if (event == SFP_E_TIMEOUT) { + sfp_module_tx_fault_reset(sfp); + sfp_sm_next(sfp, SFP_S_REINIT, T_INIT_JIFFIES); + } + break; + + case SFP_S_REINIT: + if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) { + sfp_sm_fault(sfp, false); + } else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) { + dev_info(sfp->dev, "module transmit fault recovered\n"); + sfp_sm_link_check_los(sfp); + } + break; + + case SFP_S_TX_DISABLE: + break; + } + + dev_dbg(sfp->dev, "SM: exit %u:%u:%u\n", + sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state); + + mutex_unlock(&sfp->sm_mutex); +} + +static void sfp_start(struct sfp *sfp) +{ + sfp_sm_event(sfp, SFP_E_DEV_UP); +} + +static void sfp_stop(struct sfp *sfp) +{ + sfp_sm_event(sfp, SFP_E_DEV_DOWN); +} + +static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo) +{ + /* locking... and check module is present */ + + if (sfp->id.ext.sff8472_compliance) { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } + return 0; +} + +static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, + u8 *data) +{ + unsigned int first, last, len; + int ret; + + if (ee->len == 0) + return -EINVAL; + + first = ee->offset; + last = ee->offset + ee->len; + if (first < ETH_MODULE_SFF_8079_LEN) { + len = min_t(unsigned int, last, ETH_MODULE_SFF_8079_LEN); + len -= first; + + ret = sfp->read(sfp, false, first, data, len); + if (ret < 0) + return ret; + + first += len; + data += len; + } + if (first >= ETH_MODULE_SFF_8079_LEN && + first < ETH_MODULE_SFF_8472_LEN) { + len = min_t(unsigned int, last, ETH_MODULE_SFF_8472_LEN); + len -= first; + first -= ETH_MODULE_SFF_8079_LEN; + + ret = sfp->read(sfp, true, first, data, len); + if (ret < 0) + return ret; + } + return 0; +} + +static const struct sfp_socket_ops sfp_module_ops = { + .start = sfp_start, + .stop = sfp_stop, + .module_info = sfp_module_info, + .module_eeprom = sfp_module_eeprom, +}; + +static void sfp_timeout(struct work_struct *work) +{ + struct sfp *sfp = container_of(work, struct sfp, timeout.work); + + rtnl_lock(); + sfp_sm_event(sfp, SFP_E_TIMEOUT); + rtnl_unlock(); +} + +static void sfp_check_state(struct sfp *sfp) +{ + unsigned int state, i, changed; + + state = sfp_get_state(sfp); + changed = state ^ sfp->state; + changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; + + for (i = 0; i < GPIO_MAX; i++) + if (changed & BIT(i)) + dev_dbg(sfp->dev, "%s %u -> %u\n", gpio_of_names[i], + !!(sfp->state & BIT(i)), !!(state & BIT(i))); + + state |= sfp->state & (SFP_F_TX_DISABLE | SFP_F_RATE_SELECT); + sfp->state = state; + + rtnl_lock(); + if (changed & SFP_F_PRESENT) + sfp_sm_event(sfp, state & SFP_F_PRESENT ? + SFP_E_INSERT : SFP_E_REMOVE); + + if (changed & SFP_F_TX_FAULT) + sfp_sm_event(sfp, state & SFP_F_TX_FAULT ? + SFP_E_TX_FAULT : SFP_E_TX_CLEAR); + + if (changed & SFP_F_LOS) + sfp_sm_event(sfp, state & SFP_F_LOS ? + SFP_E_LOS_HIGH : SFP_E_LOS_LOW); + rtnl_unlock(); +} + +static irqreturn_t sfp_irq(int irq, void *data) +{ + struct sfp *sfp = data; + + sfp_check_state(sfp); + + return IRQ_HANDLED; +} + +static void sfp_poll(struct work_struct *work) +{ + struct sfp *sfp = container_of(work, struct sfp, poll.work); + + sfp_check_state(sfp); + mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); +} + +static struct sfp *sfp_alloc(struct device *dev) +{ + struct sfp *sfp; + + sfp = kzalloc(sizeof(*sfp), GFP_KERNEL); + if (!sfp) + return ERR_PTR(-ENOMEM); + + sfp->dev = dev; + + mutex_init(&sfp->sm_mutex); + INIT_DELAYED_WORK(&sfp->poll, sfp_poll); + INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout); + + return sfp; +} + +static void sfp_cleanup(void *data) +{ + struct sfp *sfp = data; + + cancel_delayed_work_sync(&sfp->poll); + cancel_delayed_work_sync(&sfp->timeout); + if (sfp->i2c_mii) { + mdiobus_unregister(sfp->i2c_mii); + mdiobus_free(sfp->i2c_mii); + } + if (sfp->i2c) + i2c_put_adapter(sfp->i2c); + kfree(sfp); +} + +static int sfp_probe(struct platform_device *pdev) +{ + struct sfp *sfp; + bool poll = false; + int irq, err, i; + + sfp = sfp_alloc(&pdev->dev); + if (IS_ERR(sfp)) + return PTR_ERR(sfp); + + platform_set_drvdata(pdev, sfp); + + err = devm_add_action(sfp->dev, sfp_cleanup, sfp); + if (err < 0) + return err; + + if (pdev->dev.of_node) { + struct device_node *node = pdev->dev.of_node; + struct device_node *np; + + np = of_parse_phandle(node, "i2c-bus", 0); + if (np) { + struct i2c_adapter *i2c; + + i2c = of_find_i2c_adapter_by_node(np); + of_node_put(np); + if (!i2c) + return -EPROBE_DEFER; + + err = sfp_i2c_configure(sfp, i2c); + if (err < 0) { + i2c_put_adapter(i2c); + return err; + } + } + + for (i = 0; i < GPIO_MAX; i++) { + sfp->gpio[i] = devm_gpiod_get_optional(sfp->dev, + gpio_of_names[i], gpio_flags[i]); + if (IS_ERR(sfp->gpio[i])) + return PTR_ERR(sfp->gpio[i]); + } + + sfp->get_state = sfp_gpio_get_state; + sfp->set_state = sfp_gpio_set_state; + } + + sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); + if (!sfp->sfp_bus) + return -ENOMEM; + + /* Get the initial state, and always signal TX disable, + * since the network interface will not be up. + */ + sfp->state = sfp_get_state(sfp) | SFP_F_TX_DISABLE; + + if (sfp->gpio[GPIO_RATE_SELECT] && + gpiod_get_value_cansleep(sfp->gpio[GPIO_RATE_SELECT])) + sfp->state |= SFP_F_RATE_SELECT; + sfp_set_state(sfp, sfp->state); + sfp_module_tx_disable(sfp); + rtnl_lock(); + if (sfp->state & SFP_F_PRESENT) + sfp_sm_event(sfp, SFP_E_INSERT); + rtnl_unlock(); + + for (i = 0; i < GPIO_MAX; i++) { + if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) + continue; + + irq = gpiod_to_irq(sfp->gpio[i]); + if (!irq) { + poll = true; + continue; + } + + err = devm_request_threaded_irq(sfp->dev, irq, NULL, sfp_irq, + IRQF_ONESHOT | + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + dev_name(sfp->dev), sfp); + if (err) + poll = true; + } + + if (poll) + mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); + + return 0; +} + +static int sfp_remove(struct platform_device *pdev) +{ + struct sfp *sfp = platform_get_drvdata(pdev); + + sfp_unregister_socket(sfp->sfp_bus); + + return 0; +} + +static const struct of_device_id sfp_of_match[] = { + { .compatible = "sff,sfp", }, + { }, +}; +MODULE_DEVICE_TABLE(of, sfp_of_match); + +static struct platform_driver sfp_driver = { + .probe = sfp_probe, + .remove = sfp_remove, + .driver = { + .name = "sfp", + .of_match_table = sfp_of_match, + }, +}; + +static int sfp_init(void) +{ + poll_jiffies = msecs_to_jiffies(100); + + return platform_driver_register(&sfp_driver); +} +module_init(sfp_init); + +static void sfp_exit(void) +{ + platform_driver_unregister(&sfp_driver); +} +module_exit(sfp_exit); + +MODULE_ALIAS("platform:sfp"); +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h new file mode 100644 index 000000000000..31b0acf337e2 --- /dev/null +++ b/drivers/net/phy/sfp.h @@ -0,0 +1,28 @@ +#ifndef SFP_H +#define SFP_H + +#include <linux/ethtool.h> +#include <linux/sfp.h> + +struct sfp; + +struct sfp_socket_ops { + void (*start)(struct sfp *sfp); + void (*stop)(struct sfp *sfp); + int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); + int (*module_eeprom)(struct sfp *sfp, struct ethtool_eeprom *ee, + u8 *data); +}; + +int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev); +void sfp_remove_phy(struct sfp_bus *bus); +void sfp_link_up(struct sfp_bus *bus); +void sfp_link_down(struct sfp_bus *bus); +int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id); +void sfp_module_remove(struct sfp_bus *bus); +int sfp_link_configure(struct sfp_bus *bus, const struct sfp_eeprom_id *id); +struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, + const struct sfp_socket_ops *ops); +void sfp_unregister_socket(struct sfp_bus *bus); + +#endif diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 3570c7576993..ca267fd28ab8 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -943,9 +943,6 @@ static int set_offload(struct tap_queue *q, unsigned long arg) if (arg & TUN_F_TSO6) feature_mask |= NETIF_F_TSO6; } - - if (arg & TUN_F_UFO) - feature_mask |= NETIF_F_UFO; } /* tun/tap driver inverts the usage for TSO offloads, where @@ -956,7 +953,7 @@ static int set_offload(struct tap_queue *q, unsigned long arg) * When user space turns off TSO, we turn off GSO/LRO so that * user-space will not receive TSO frames. */ - if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) + if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) features |= RX_OFFLOADS; else features &= ~RX_OFFLOADS; @@ -1078,7 +1075,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN | TUN_F_UFO)) + TUN_F_TSO_ECN)) return -EINVAL; rtnl_lock(); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 32ad87345f57..d21510d47aa2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -199,7 +199,7 @@ struct tun_struct { struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ - NETIF_F_TSO6|NETIF_F_UFO) + NETIF_F_TSO6) int align; int vnet_hdr_sz; @@ -892,7 +892,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) sk_filter(tfile->socket.sk, skb)) goto drop; - if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) + if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) goto drop; skb_tx_timestamp(skb); @@ -1921,11 +1921,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } - - if (arg & TUN_F_UFO) { - features |= NETIF_F_UFO; - arg &= ~TUN_F_UFO; - } } /* This gives the user a way to test for new features in future by diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index fce92f0e5abd..dbc90313f472 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -961,7 +961,7 @@ static void catc_disconnect(struct usb_interface *intf) * Module functions and tables. */ -static struct usb_device_id catc_id_table [] = { +static const struct usb_device_id catc_id_table[] = { { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */ { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */ { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */ diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 2952cb570996..288ecd999171 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -304,7 +304,7 @@ static void usbpn_setup(struct net_device *dev) /* * USB driver callbacks */ -static struct usb_device_id usbpn_ids[] = { +static const struct usb_device_id usbpn_ids[] = { { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 8f572b9f3625..811b18215cae 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -367,7 +367,7 @@ static struct attribute *cdc_ncm_sysfs_attrs[] = { NULL, }; -static struct attribute_group cdc_ncm_sysfs_attr_group = { +static const struct attribute_group cdc_ncm_sysfs_attr_group = { .name = "cdc_ncm", .attrs = cdc_ncm_sysfs_attrs, }; diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 0f213ea22c75..d49c7103085e 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -87,7 +87,7 @@ #define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ) #define IPHETH_CARRIER_ON 0x04 -static struct usb_device_id ipheth_table[] = { +static const struct usb_device_id ipheth_table[] = { { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPHONE, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 92e4fd29ae44..f1605833c5cf 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -125,7 +125,7 @@ static int kaweth_resume(struct usb_interface *intf); /**************************************************************** * usb_device_id ****************************************************************/ -static struct usb_device_id usb_klsi_table[] = { +static const struct usb_device_id usb_klsi_table[] = { { USB_DEVICE(0x03e8, 0x0008) }, /* AOX Endpoints USB Ethernet */ { USB_DEVICE(0x04bb, 0x0901) }, /* I-O DATA USB-ET/T */ { USB_DEVICE(0x0506, 0x03e8) }, /* 3Com 3C19250 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 6cfffeff6108..ceb78e2ea4f0 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -5303,7 +5303,7 @@ static void rtl8152_disconnect(struct usb_interface *intf) .bInterfaceProtocol = USB_CDC_PROTO_NONE /* table of devices that work with this driver */ -static struct usb_device_id rtl8152_table[] = { +static const struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index daaa88a66f40..5f565bd574da 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -112,7 +112,7 @@ #undef EEPROM_WRITE /* table of devices that work with this driver */ -static struct usb_device_id rtl8150_table[] = { +static const struct usb_device_id rtl8150_table[] = { {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8150)}, {USB_DEVICE(VENDOR_ID_MELCO, PRODUCT_ID_LUAKTX)}, {USB_DEVICE(VENDOR_ID_MICRONET, PRODUCT_ID_SP128AR)}, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 98f17b05c68b..e90de2186ffc 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -57,6 +57,11 @@ DECLARE_EWMA(pkt_len, 0, 64) #define VIRTNET_DRIVER_VERSION "1.0.0" +const unsigned long guest_offloads[] = { VIRTIO_NET_F_GUEST_TSO4, + VIRTIO_NET_F_GUEST_TSO6, + VIRTIO_NET_F_GUEST_ECN, + VIRTIO_NET_F_GUEST_UFO }; + struct virtnet_stats { struct u64_stats_sync tx_syncp; struct u64_stats_sync rx_syncp; @@ -164,10 +169,13 @@ struct virtnet_info { u8 ctrl_promisc; u8 ctrl_allmulti; u16 ctrl_vid; + u64 ctrl_offloads; /* Ethtool settings */ u8 duplex; u32 speed; + + unsigned long guest_offloads; }; struct padded_vnet_hdr { @@ -270,6 +278,23 @@ static void skb_xmit_done(struct virtqueue *vq) netif_wake_subqueue(vi->dev, vq2txq(vq)); } +#define MRG_CTX_HEADER_SHIFT 22 +static void *mergeable_len_to_ctx(unsigned int truesize, + unsigned int headroom) +{ + return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); +} + +static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) +{ + return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; +} + +static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) +{ + return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); +} + /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, @@ -390,19 +415,85 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi) return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; } +/* We copy the packet for XDP in the following cases: + * + * 1) Packet is scattered across multiple rx buffers. + * 2) Headroom space is insufficient. + * + * This is inefficient but it's a temporary condition that + * we hit right after XDP is enabled and until queue is refilled + * with large buffers with sufficient headroom - so it should affect + * at most queue size packets. + * Afterwards, the conditions to enable + * XDP should preclude the underlying device from sending packets + * across multiple buffers (num_buf > 1), and we make sure buffers + * have enough headroom. + */ +static struct page *xdp_linearize_page(struct receive_queue *rq, + u16 *num_buf, + struct page *p, + int offset, + int page_off, + unsigned int *len) +{ + struct page *page = alloc_page(GFP_ATOMIC); + + if (!page) + return NULL; + + memcpy(page_address(page) + page_off, page_address(p) + offset, *len); + page_off += *len; + + while (--*num_buf) { + unsigned int buflen; + void *buf; + int off; + + buf = virtqueue_get_buf(rq->vq, &buflen); + if (unlikely(!buf)) + goto err_buf; + + p = virt_to_head_page(buf); + off = buf - page_address(p); + + /* guard against a misconfigured or uncooperative backend that + * is sending packet larger than the MTU. + */ + if ((page_off + buflen) > PAGE_SIZE) { + put_page(p); + goto err_buf; + } + + memcpy(page_address(page) + page_off, + page_address(p) + off, buflen); + page_off += buflen; + put_page(p); + } + + /* Headroom does not contribute to packet length */ + *len = page_off - VIRTIO_XDP_HEADROOM; + return page; +err_buf: + __free_pages(page, 0); + return NULL; +} + static struct sk_buff *receive_small(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len) + void *buf, void *ctx, + unsigned int len) { struct sk_buff *skb; struct bpf_prog *xdp_prog; - unsigned int xdp_headroom = virtnet_get_headroom(vi); + unsigned int xdp_headroom = (unsigned long)ctx; unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; unsigned int headroom = vi->hdr_len + header_offset; unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + struct page *page = virt_to_head_page(buf); unsigned int delta = 0; + struct page *xdp_page; len -= vi->hdr_len; rcu_read_lock(); @@ -416,6 +507,27 @@ static struct sk_buff *receive_small(struct net_device *dev, if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) goto err_xdp; + if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { + int offset = buf - page_address(page) + header_offset; + unsigned int tlen = len + vi->hdr_len; + u16 num_buf = 1; + + xdp_headroom = virtnet_get_headroom(vi); + header_offset = VIRTNET_RX_PAD + xdp_headroom; + headroom = vi->hdr_len + header_offset; + buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + xdp_page = xdp_linearize_page(rq, &num_buf, page, + offset, header_offset, + &tlen); + if (!xdp_page) + goto err_xdp; + + buf = page_address(xdp_page); + put_page(page); + page = xdp_page; + } + xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; xdp.data = xdp.data_hard_start + xdp_headroom; xdp.data_end = xdp.data + len; @@ -444,7 +556,7 @@ static struct sk_buff *receive_small(struct net_device *dev, skb = build_skb(buf, buflen); if (!skb) { - put_page(virt_to_head_page(buf)); + put_page(page); goto err; } skb_reserve(skb, headroom - delta); @@ -460,7 +572,7 @@ err: err_xdp: rcu_read_unlock(); dev->stats.rx_dropped++; - put_page(virt_to_head_page(buf)); + put_page(page); xdp_xmit: return NULL; } @@ -485,66 +597,6 @@ err: return NULL; } -/* The conditions to enable XDP should preclude the underlying device from - * sending packets across multiple buffers (num_buf > 1). However per spec - * it does not appear to be illegal to do so but rather just against convention. - * So in order to avoid making a system unresponsive the packets are pushed - * into a page and the XDP program is run. This will be extremely slow and we - * push a warning to the user to fix this as soon as possible. Fixing this may - * require resolving the underlying hardware to determine why multiple buffers - * are being received or simply loading the XDP program in the ingress stack - * after the skb is built because there is no advantage to running it here - * anymore. - */ -static struct page *xdp_linearize_page(struct receive_queue *rq, - u16 *num_buf, - struct page *p, - int offset, - unsigned int *len) -{ - struct page *page = alloc_page(GFP_ATOMIC); - unsigned int page_off = VIRTIO_XDP_HEADROOM; - - if (!page) - return NULL; - - memcpy(page_address(page) + page_off, page_address(p) + offset, *len); - page_off += *len; - - while (--*num_buf) { - unsigned int buflen; - void *buf; - int off; - - buf = virtqueue_get_buf(rq->vq, &buflen); - if (unlikely(!buf)) - goto err_buf; - - p = virt_to_head_page(buf); - off = buf - page_address(p); - - /* guard against a misconfigured or uncooperative backend that - * is sending packet larger than the MTU. - */ - if ((page_off + buflen) > PAGE_SIZE) { - put_page(p); - goto err_buf; - } - - memcpy(page_address(page) + page_off, - page_address(p) + off, buflen); - page_off += buflen; - put_page(p); - } - - /* Headroom does not contribute to packet length */ - *len = page_off - VIRTIO_XDP_HEADROOM; - return page; -err_buf: - __free_pages(page, 0); - return NULL; -} - static struct sk_buff *receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -559,6 +611,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, struct sk_buff *head_skb, *curr_skb; struct bpf_prog *xdp_prog; unsigned int truesize; + unsigned int headroom = mergeable_ctx_to_headroom(ctx); head_skb = NULL; @@ -571,10 +624,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, u32 act; /* This happens when rx buffer size is underestimated */ - if (unlikely(num_buf > 1)) { + if (unlikely(num_buf > 1 || + headroom < virtnet_get_headroom(vi))) { /* linearize data for XDP */ xdp_page = xdp_linearize_page(rq, &num_buf, - page, offset, &len); + page, offset, + VIRTIO_XDP_HEADROOM, + &len); if (!xdp_page) goto err_xdp; offset = VIRTIO_XDP_HEADROOM; @@ -639,13 +695,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } rcu_read_unlock(); - if (unlikely(len > (unsigned long)ctx)) { + truesize = mergeable_ctx_to_truesize(ctx); + if (unlikely(len > truesize)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)ctx); dev->stats.rx_length_errors++; goto err_skb; } - truesize = (unsigned long)ctx; + head_skb = page_to_skb(vi, rq, page, offset, len, truesize); curr_skb = head_skb; @@ -665,13 +722,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } page = virt_to_head_page(buf); - if (unlikely(len > (unsigned long)ctx)) { + + truesize = mergeable_ctx_to_truesize(ctx); + if (unlikely(len > truesize)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)ctx); dev->stats.rx_length_errors++; goto err_skb; } - truesize = (unsigned long)ctx; num_skb_frags = skb_shinfo(curr_skb)->nr_frags; if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { @@ -754,7 +812,7 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, else if (vi->big_packets) skb = receive_big(dev, vi, rq, buf, len); else - skb = receive_small(dev, vi, rq, buf, len); + skb = receive_small(dev, vi, rq, buf, ctx, len); if (unlikely(!skb)) return 0; @@ -787,12 +845,18 @@ frame_err: return 0; } +/* Unlike mergeable buffers, all buffers are allocated to the + * same size, except for the headroom. For this reason we do + * not need to use mergeable_len_to_ctx here - it is enough + * to store the headroom as the context ignoring the truesize. + */ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { struct page_frag *alloc_frag = &rq->alloc_frag; char *buf; unsigned int xdp_headroom = virtnet_get_headroom(vi); + void *ctx = (void *)(unsigned long)xdp_headroom; int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; int err; @@ -806,10 +870,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, alloc_frag->offset += len; sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, vi->hdr_len + GOOD_PACKET_LEN); - err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); + err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) put_page(virt_to_head_page(buf)); - return err; } @@ -902,7 +965,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, } sg_init_one(rq->sg, buf, len); - ctx = (void *)(unsigned long)len; + ctx = mergeable_len_to_ctx(len, headroom); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) put_page(virt_to_head_page(buf)); @@ -1014,7 +1077,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget) void *buf; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); - if (vi->mergeable_rx_bufs) { + if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; while (received < budget && @@ -1813,7 +1876,6 @@ static void virtnet_freeze_down(struct virtio_device *vdev) } static int init_vqs(struct virtnet_info *vi); -static void _remove_vq_common(struct virtnet_info *vi); static int virtnet_restore_up(struct virtio_device *vdev) { @@ -1842,37 +1904,45 @@ static int virtnet_restore_up(struct virtio_device *vdev) return err; } -static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp) +static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) { - struct virtio_device *dev = vi->vdev; - int ret; + struct scatterlist sg; + vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); - virtio_config_disable(dev); - dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED; - virtnet_freeze_down(dev); - _remove_vq_common(vi); + sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); - virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); - virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, + VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { + dev_warn(&vi->dev->dev, "Fail to set guest offload. \n"); + return -EINVAL; + } - ret = virtio_finalize_features(dev); - if (ret) - goto err; + return 0; +} - vi->xdp_queue_pairs = xdp_qp; - ret = virtnet_restore_up(dev); - if (ret) - goto err; - ret = _virtnet_set_queues(vi, curr_qp); - if (ret) - goto err; +static int virtnet_clear_guest_offloads(struct virtnet_info *vi) +{ + u64 offloads = 0; - virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); - virtio_config_enable(dev); - return 0; -err: - virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); - return ret; + if (!vi->guest_offloads) + return 0; + + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) + offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM; + + return virtnet_set_guest_offloads(vi, offloads); +} + +static int virtnet_restore_guest_offloads(struct virtnet_info *vi) +{ + u64 offloads = vi->guest_offloads; + + if (!vi->guest_offloads) + return 0; + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) + offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM; + + return virtnet_set_guest_offloads(vi, offloads); } static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, @@ -1884,10 +1954,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, u16 xdp_qp = 0, curr_qp; int i, err; - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || - virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || - virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || - virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) { + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) + && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); return -EOPNOTSUPP; } @@ -1921,35 +1992,35 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, return PTR_ERR(prog); } - /* Changing the headroom in buffers is a disruptive operation because - * existing buffers must be flushed and reallocated. This will happen - * when a xdp program is initially added or xdp is disabled by removing - * the xdp program resulting in number of XDP queues changing. - */ - if (vi->xdp_queue_pairs != xdp_qp) { - err = virtnet_reset(vi, curr_qp + xdp_qp, xdp_qp); - if (err) { - dev_warn(&dev->dev, "XDP reset failure.\n"); - goto virtio_reset_err; - } - } + /* Make sure NAPI is not using any XDP TX queues for RX. */ + for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); + err = _virtnet_set_queues(vi, curr_qp + xdp_qp); + if (err) + goto err; + vi->xdp_queue_pairs = xdp_qp; for (i = 0; i < vi->max_queue_pairs; i++) { old_prog = rtnl_dereference(vi->rq[i].xdp_prog); rcu_assign_pointer(vi->rq[i].xdp_prog, prog); + if (i == 0) { + if (!old_prog) + virtnet_clear_guest_offloads(vi); + if (!prog) + virtnet_restore_guest_offloads(vi); + } if (old_prog) bpf_prog_put(old_prog); + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); } return 0; -virtio_reset_err: - /* On reset error do our best to unwind XDP changes inflight and return - * error up to user space for resolution. The underlying reset hung on - * us so not much we can do here. - */ +err: + for (i = 0; i < vi->max_queue_pairs; i++) + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); if (prog) bpf_prog_sub(prog, vi->max_queue_pairs - 1); return err; @@ -2182,7 +2253,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); if (!names) goto err_names; - if (vi->mergeable_rx_bufs) { + if (!vi->big_packets || vi->mergeable_rx_bufs) { ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL); if (!ctx) goto err_ctx; @@ -2428,7 +2499,7 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { - dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO + dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ @@ -2438,13 +2509,11 @@ static int virtnet_probe(struct virtio_device *vdev) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; - if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) - dev->hw_features |= NETIF_F_UFO; dev->features |= NETIF_F_GSO_ROBUST; if (gso) - dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); + dev->features |= dev->hw_features & NETIF_F_ALL_TSO; /* (!csum && gso) case will be fixed by register_netdev() */ } if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) @@ -2577,6 +2646,10 @@ static int virtnet_probe(struct virtio_device *vdev) netif_carrier_on(dev); } + for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) + if (virtio_has_feature(vi->vdev, guest_offloads[i])) + set_bit(guest_offloads[i], &vi->guest_offloads); + pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs); @@ -2597,15 +2670,6 @@ free: return err; } -static void _remove_vq_common(struct virtnet_info *vi) -{ - vi->vdev->config->reset(vi->vdev); - free_unused_bufs(vi); - _free_receive_bufs(vi); - free_receive_page_frags(vi); - virtnet_del_vqs(vi); -} - static void remove_vq_common(struct virtnet_info *vi) { vi->vdev->config->reset(vi->vdev); @@ -2637,8 +2701,7 @@ static void virtnet_remove(struct virtio_device *vdev) free_netdev(vi->dev); } -#ifdef CONFIG_PM_SLEEP -static int virtnet_freeze(struct virtio_device *vdev) +static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; @@ -2649,7 +2712,7 @@ static int virtnet_freeze(struct virtio_device *vdev) return 0; } -static int virtnet_restore(struct virtio_device *vdev) +static __maybe_unused int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int err; @@ -2665,7 +2728,6 @@ static int virtnet_restore(struct virtio_device *vdev) return 0; } -#endif static struct virtio_device_id id_table[] = { { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, @@ -2682,7 +2744,7 @@ static struct virtio_device_id id_table[] = { VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ VIRTIO_NET_F_CTRL_MAC_ADDR, \ - VIRTIO_NET_F_MTU + VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS static unsigned int features[] = { VIRTNET_FEATURES, diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 8a1eaf3c302a..abd2010c48ae 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1371,10 +1371,14 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { - if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { + NL_SET_ERR_MSG(extack, "Invalid hardware address"); return -EINVAL; - if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + } + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { + NL_SET_ERR_MSG(extack, "Invalid hardware address"); return -EADDRNOTAVAIL; + } } return 0; } @@ -1399,12 +1403,17 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, struct net *net; int err; - if (!data || !data[IFLA_VRF_TABLE]) + if (!data || !data[IFLA_VRF_TABLE]) { + NL_SET_ERR_MSG(extack, "VRF table id is missing"); return -EINVAL; + } vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); - if (vrf->tb_id == RT_TABLE_UNSPEC) + if (vrf->tb_id == RT_TABLE_UNSPEC) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE], + "Invalid VRF table id"); return -EINVAL; + } dev->priv_flags |= IFF_L3MDEV_MASTER; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e17baac70f43..35e84a9e1cfb 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2609,7 +2609,7 @@ static struct device_type vxlan_type = { * supply the listening VXLAN udp ports. Callers are expected * to implement the ndo_udp_tunnel_add. */ -static void vxlan_push_rx_ports(struct net_device *dev) +static void vxlan_offload_rx_ports(struct net_device *dev, bool push) { struct vxlan_sock *vs; struct net *net = dev_net(dev); @@ -2618,11 +2618,19 @@ static void vxlan_push_rx_ports(struct net_device *dev) spin_lock(&vn->sock_lock); for (i = 0; i < PORT_HASH_SIZE; ++i) { - hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) - udp_tunnel_push_rx_port(dev, vs->sock, - (vs->flags & VXLAN_F_GPE) ? - UDP_TUNNEL_TYPE_VXLAN_GPE : - UDP_TUNNEL_TYPE_VXLAN); + hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { + unsigned short type; + + if (vs->flags & VXLAN_F_GPE) + type = UDP_TUNNEL_TYPE_VXLAN_GPE; + else + type = UDP_TUNNEL_TYPE_VXLAN; + + if (push) + udp_tunnel_push_rx_port(dev, vs->sock, type); + else + udp_tunnel_drop_rx_port(dev, vs->sock, type); + } } spin_unlock(&vn->sock_lock); } @@ -3631,10 +3639,15 @@ static int vxlan_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); - if (event == NETDEV_UNREGISTER) + if (event == NETDEV_UNREGISTER) { + vxlan_offload_rx_ports(dev, false); vxlan_handle_lowerdev_unregister(vn, dev); - else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) - vxlan_push_rx_ports(dev); + } else if (event == NETDEV_REGISTER) { + vxlan_offload_rx_ports(dev, true); + } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || + event == NETDEV_UDP_TUNNEL_DROP_INFO) { + vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); + } return NOTIFY_DONE; } diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index da770af83036..b36dd792fbb2 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -787,8 +787,9 @@ static int ath10k_ahb_probe(struct platform_device *pdev) ar_pci->mem = ar_ahb->mem; ar_pci->mem_len = ar_ahb->mem_len; ar_pci->ar = ar; - ar_pci->bus_ops = &ath10k_ahb_bus_ops; + ar_pci->ce.bus_ops = &ath10k_ahb_bus_ops; ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr; + ar->ce_priv = &ar_pci->ce; ret = ath10k_pci_setup_resource(ar); if (ret) { diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 08b84c8c3614..a8afd690290f 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -16,7 +16,6 @@ */ #include "hif.h" -#include "pci.h" #include "ce.h" #include "debug.h" @@ -33,7 +32,7 @@ * Each ring consists of a number of descriptors which specify * an address, length, and meta-data. * - * Typically, one side of the PCIe interconnect (Host or Target) + * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target) * controls one ring and the other side controls the other ring. * The source side chooses when to initiate a transfer and it * chooses what to send (buffer address, length). The destination @@ -73,57 +72,71 @@ ath10k_get_ring_byte(unsigned int offset, return ((offset & addr_map->mask) >> (addr_map->lsb)); } +static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return ce->bus_ops->read32(ar, offset); +} + +static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + ce->bus_ops->write32(ar, offset, value); +} + static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->dst_wr_index_addr, n); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dst_wr_index_addr, n); } static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->dst_wr_index_addr); + return ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dst_wr_index_addr); } static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->sr_wr_index_addr, n); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_wr_index_addr, n); } static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->sr_wr_index_addr); + return ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_wr_index_addr); } static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->current_srri_addr); + return ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_srri_addr); } static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int addr) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->sr_base_addr, addr); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_base_addr, addr); } static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->sr_size_addr, n); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_size_addr, n); } static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, @@ -131,12 +144,13 @@ static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; - u32 ctrl1_addr = ath10k_pci_read32(ar, - ce_ctrl_addr + ctrl_regs->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr, - (ctrl1_addr & ~(ctrl_regs->dmax->mask)) | - ath10k_set_ring_byte(n, ctrl_regs->dmax)); + u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ctrl_regs->addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->dmax->mask)) | + ath10k_set_ring_byte(n, ctrl_regs->dmax)); } static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, @@ -144,11 +158,13 @@ static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; - u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr, - (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) | - ath10k_set_ring_byte(n, ctrl_regs->src_ring)); + u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ctrl_regs->addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) | + ath10k_set_ring_byte(n, ctrl_regs->src_ring)); } static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, @@ -156,34 +172,36 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; - u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr, - (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) | - ath10k_set_ring_byte(n, ctrl_regs->dst_ring)); + u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ctrl_regs->addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, + (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) | + ath10k_set_ring_byte(n, ctrl_regs->dst_ring)); } static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->current_drri_addr); + return ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_drri_addr); } static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, u32 ce_ctrl_addr, u32 addr) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->dr_base_addr, addr); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_base_addr, addr); } static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - ath10k_pci_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->dr_size_addr, n); + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_size_addr, n); } static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, @@ -191,11 +209,11 @@ static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr); + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr, - (addr & ~(srcr_wm->wm_high->mask)) | - (ath10k_set_ring_byte(n, srcr_wm->wm_high))); + ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, + (addr & ~(srcr_wm->wm_high->mask)) | + (ath10k_set_ring_byte(n, srcr_wm->wm_high))); } static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, @@ -203,11 +221,11 @@ static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr); + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr, - (addr & ~(srcr_wm->wm_low->mask)) | - (ath10k_set_ring_byte(n, srcr_wm->wm_low))); + ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, + (addr & ~(srcr_wm->wm_low->mask)) | + (ath10k_set_ring_byte(n, srcr_wm->wm_low))); } static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, @@ -215,11 +233,11 @@ static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr); + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr, - (addr & ~(dstr_wm->wm_high->mask)) | - (ath10k_set_ring_byte(n, dstr_wm->wm_high))); + ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, + (addr & ~(dstr_wm->wm_high->mask)) | + (ath10k_set_ring_byte(n, dstr_wm->wm_high))); } static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, @@ -227,66 +245,73 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, unsigned int n) { struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; - u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr); + u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); - ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr, - (addr & ~(dstr_wm->wm_low->mask)) | - (ath10k_set_ring_byte(n, dstr_wm->wm_low))); + ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, + (addr & ~(dstr_wm->wm_low->mask)) | + (ath10k_set_ring_byte(n, dstr_wm->wm_low))); } static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, u32 ce_ctrl_addr) { struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; - u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->host_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, - host_ie_addr | host_ie->copy_complete->mask); + u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->host_ie_addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr | host_ie->copy_complete->mask); } static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; - u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->host_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, - host_ie_addr & ~(host_ie->copy_complete->mask)); + u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->host_ie_addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr & ~(host_ie->copy_complete->mask)); } static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; - u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->host_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, - host_ie_addr & ~(wm_regs->wm_mask)); + u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->host_ie_addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, + host_ie_addr & ~(wm_regs->wm_mask)); } static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, u32 ce_ctrl_addr) { struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; - u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->misc_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, - misc_ie_addr | misc_regs->err_mask); + u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->misc_ie_addr); + + ath10k_ce_write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, + misc_ie_addr | misc_regs->err_mask); } static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; - u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->misc_ie_addr); - ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, - misc_ie_addr & ~(misc_regs->err_mask)); + u32 misc_ie_addr = ath10k_ce_read32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr); + + ath10k_ce_write32(ar, + ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, + misc_ie_addr & ~(misc_regs->err_mask)); } static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, @@ -295,7 +320,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, { struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; - ath10k_pci_write32(ar, ce_ctrl_addr + wm_regs->addr, mask); + ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask); } /* @@ -362,11 +387,11 @@ exit: void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_ring *src_ring = pipe->src_ring; u32 ctrl_addr = pipe->ctrl_addr; - lockdep_assert_held(&ar_pci->ce_lock); + lockdep_assert_held(&ce->ce_lock); /* * This function must be called only if there is an incomplete @@ -394,13 +419,13 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, unsigned int flags) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, buffer, nbytes, transfer_id, flags); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -408,14 +433,14 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int delta; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); delta = CE_RING_DELTA(pipe->src_ring->nentries_mask, pipe->src_ring->write_index, pipe->src_ring->sw_index - 1); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return delta; } @@ -423,13 +448,13 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_ring *dest_ring = pipe->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; unsigned int write_index = dest_ring->write_index; unsigned int sw_index = dest_ring->sw_index; - lockdep_assert_held(&ar_pci->ce_lock); + lockdep_assert_held(&ce->ce_lock); return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); } @@ -437,7 +462,7 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_ring *dest_ring = pipe->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; unsigned int write_index = dest_ring->write_index; @@ -446,7 +471,7 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); u32 ctrl_addr = pipe->ctrl_addr; - lockdep_assert_held(&ar_pci->ce_lock); + lockdep_assert_held(&ce->ce_lock); if ((pipe->id != 5) && CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) @@ -486,12 +511,12 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) { struct ath10k *ar = pipe->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -554,14 +579,14 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, unsigned int *nbytesp) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = ath10k_ce_completed_recv_next_nolock(ce_state, per_transfer_contextp, nbytesp); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -576,7 +601,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, unsigned int write_index; int ret; struct ath10k *ar; - struct ath10k_pci *ar_pci; + struct ath10k_ce *ce; dest_ring = ce_state->dest_ring; @@ -584,9 +609,9 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, return -EIO; ar = ce_state->ar; - ar_pci = ath10k_pci_priv(ar); + ce = ath10k_ce_priv(ar); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); nentries_mask = dest_ring->nentries_mask; sw_index = dest_ring->sw_index; @@ -614,7 +639,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, ret = -EIO; } - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -686,7 +711,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, unsigned int write_index; int ret; struct ath10k *ar; - struct ath10k_pci *ar_pci; + struct ath10k_ce *ce; src_ring = ce_state->src_ring; @@ -694,9 +719,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, return -EIO; ar = ce_state->ar; - ar_pci = ath10k_pci_priv(ar); + ce = ath10k_ce_priv(ar); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; @@ -727,7 +752,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, ret = -EIO; } - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -736,13 +761,13 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = ath10k_ce_completed_send_next_nolock(ce_state, per_transfer_contextp); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -755,17 +780,18 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, */ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; u32 ctrl_addr = ce_state->ctrl_addr; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); /* Clear the copy-complete interrupts that will be handled here. */ - ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->cc_mask); + ath10k_ce_engine_int_status_clear(ar, ctrl_addr, + wm_regs->cc_mask); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); if (ce_state->recv_cb) ce_state->recv_cb(ce_state); @@ -773,7 +799,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) if (ce_state->send_cb) ce_state->send_cb(ce_state); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); /* * Misc CE interrupts are not being handled, but still need @@ -781,7 +807,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) */ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); } /* @@ -795,7 +821,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) int ce_id; u32 intr_summary; - intr_summary = CE_INTERRUPT_SUMMARY(ar); + intr_summary = ath10k_ce_interrupt_summary(ar); for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) { if (intr_summary & (1 << ce_id)) @@ -847,22 +873,25 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar) void ath10k_ce_enable_interrupts(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ce_id; + struct ath10k_ce_pipe *ce_state; /* Skip the last copy engine, CE7 the diagnostic window, as that * uses polling and isn't initialized for interrupts. */ - for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) - ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]); + for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) { + ce_state = &ce->ce_states[ce_id]; + ath10k_ce_per_engine_handler_adjust(ce_state); + } } static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); @@ -898,8 +927,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); @@ -1081,8 +1110,8 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; int ret; /* @@ -1138,8 +1167,8 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; if (ce_state->src_ring) { dma_free_coherent(ar->dev, @@ -1168,38 +1197,38 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) void ath10k_ce_dump_registers(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_crash_data ce; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_crash_data ce_data; u32 addr, id; lockdep_assert_held(&ar->data_lock); ath10k_err(ar, "Copy Engine register dump:\n"); - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); for (id = 0; id < CE_COUNT; id++) { addr = ath10k_ce_base_address(ar, id); - ce.base_addr = cpu_to_le32(addr); + ce_data.base_addr = cpu_to_le32(addr); - ce.src_wr_idx = + ce_data.src_wr_idx = cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr)); - ce.src_r_idx = + ce_data.src_r_idx = cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr)); - ce.dst_wr_idx = + ce_data.dst_wr_idx = cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr)); - ce.dst_r_idx = + ce_data.dst_r_idx = cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr)); if (crash_data) - crash_data->ce_crash_data[id] = ce; + crash_data->ce_crash_data[id] = ce_data; ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id, - le32_to_cpu(ce.base_addr), - le32_to_cpu(ce.src_wr_idx), - le32_to_cpu(ce.src_r_idx), - le32_to_cpu(ce.dst_wr_idx), - le32_to_cpu(ce.dst_r_idx)); + le32_to_cpu(ce_data.base_addr), + le32_to_cpu(ce_data.src_wr_idx), + le32_to_cpu(ce_data.src_r_idx), + le32_to_cpu(ce_data.dst_wr_idx), + le32_to_cpu(ce_data.dst_r_idx)); } - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); } diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 95743a57525d..bdec794704d9 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -122,6 +122,24 @@ struct ath10k_ce_pipe { /* Copy Engine settable attributes */ struct ce_attr; +struct ath10k_bus_ops { + u32 (*read32)(struct ath10k *ar, u32 offset); + void (*write32)(struct ath10k *ar, u32 offset, u32 value); + int (*get_num_banks)(struct ath10k *ar); +}; + +static inline struct ath10k_ce *ath10k_ce_priv(struct ath10k *ar) +{ + return (struct ath10k_ce *)ar->ce_priv; +} + +struct ath10k_ce { + /* protects CE info */ + spinlock_t ce_lock; + const struct ath10k_bus_ops *bus_ops; + struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; +}; + /*==================Send====================*/ /* ath10k_ce_send flags */ @@ -291,9 +309,13 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 -#define CE_INTERRUPT_SUMMARY(ar) \ - CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ - ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \ - CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) +static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( + ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); +} #endif /* _CE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 75c5c903c8a6..8ff47458207c 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -2516,6 +2516,11 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ar->hw_ce_regs = &qcax_ce_regs; ar->hw_values = &qca4019_values; break; + case ATH10K_HW_WCN3990: + ar->regs = &wcn3990_regs; + ar->hw_ce_regs = &wcn3990_ce_regs; + ar->hw_values = &wcn3990_values; + break; default: ath10k_err(ar, "unsupported core hardware revision %d\n", hw_rev); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 1aa5cf12fce0..2b499af722ad 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -993,6 +993,8 @@ struct ath10k { u32 reg_ack_cts_timeout_orig; } fw_coverage; + void *ce_priv; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 389fcb7a9fd0..56404fe4e8f5 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -237,7 +237,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file, { struct ath10k *ar = file->private_data; char *buf; - size_t len = 0, buf_len = 4096; + size_t len = 0, buf_len = 8192; const char *name; ssize_t ret_cnt; bool enabled; diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index afb0c01cbb55..a860691d635d 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -192,6 +192,156 @@ const struct ath10k_hw_values qca4019_values = { .ce_desc_meta_data_lsb = 4, }; +const struct ath10k_hw_regs wcn3990_regs = { + .rtc_soc_base_address = 0x00000000, + .rtc_wmac_base_address = 0x00000000, + .soc_core_base_address = 0x00000000, + .ce_wrapper_base_address = 0x0024C000, + .ce0_base_address = 0x00240000, + .ce1_base_address = 0x00241000, + .ce2_base_address = 0x00242000, + .ce3_base_address = 0x00243000, + .ce4_base_address = 0x00244000, + .ce5_base_address = 0x00245000, + .ce6_base_address = 0x00246000, + .ce7_base_address = 0x00247000, + .ce8_base_address = 0x00248000, + .ce9_base_address = 0x00249000, + .ce10_base_address = 0x0024A000, + .ce11_base_address = 0x0024B000, + .soc_chip_id_address = 0x000000f0, + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00000100, + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, + .pcie_intr_fw_mask = 0x00100000, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = { + .msb = 0x00000010, + .lsb = 0x00000010, + .mask = GENMASK(17, 17), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = { + .msb = 0x00000012, + .lsb = 0x00000012, + .mask = GENMASK(18, 18), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = { + .msb = 0x00000000, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = { + .addr = 0x00000018, + .src_ring = &wcn3990_src_ring, + .dst_ring = &wcn3990_dst_ring, + .dmax = &wcn3990_dmax, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = { + .mask = GENMASK(0, 0), +}; + +static struct ath10k_hw_ce_host_ie wcn3990_host_ie = { + .copy_complete = &wcn3990_host_ie_cc, +}; + +static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = { + .dstr_lmask = 0x00000010, + .dstr_hmask = 0x00000008, + .srcr_lmask = 0x00000004, + .srcr_hmask = 0x00000002, + .cc_mask = 0x00000001, + .wm_mask = 0x0000001E, + .addr = 0x00000030, +}; + +static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = { + .axi_err = 0x00000100, + .dstr_add_err = 0x00000200, + .srcr_len_err = 0x00000100, + .dstr_mlen_vio = 0x00000080, + .dstr_overflow = 0x00000040, + .srcr_overflow = 0x00000020, + .err_mask = 0x000003E0, + .addr = 0x00000038, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = { + .msb = 0x00000000, + .lsb = 0x00000010, + .mask = GENMASK(31, 16), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = { + .addr = 0x0000004c, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &wcn3990_src_wm_low, + .wm_high = &wcn3990_src_wm_high, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = { + .lsb = 0x00000010, + .mask = GENMASK(31, 16), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { + .addr = 0x00000050, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &wcn3990_dst_wm_low, + .wm_high = &wcn3990_dst_wm_high, +}; + +struct ath10k_hw_ce_regs wcn3990_ce_regs = { + .sr_base_addr = 0x00000000, + .sr_size_addr = 0x00000008, + .dr_base_addr = 0x0000000c, + .dr_size_addr = 0x00000014, + .misc_ie_addr = 0x00000034, + .sr_wr_index_addr = 0x0000003c, + .dst_wr_index_addr = 0x00000040, + .current_srri_addr = 0x00000044, + .current_drri_addr = 0x00000048, + .ddr_addr_for_rri_low = 0x00000004, + .ddr_addr_for_rri_high = 0x00000008, + .ce_rri_low = 0x0024C004, + .ce_rri_high = 0x0024C008, + .host_ie_addr = 0x0000002c, + .ctrl1_regs = &wcn3990_ctrl1, + .host_ie = &wcn3990_host_ie, + .wm_regs = &wcn3990_wm_reg, + .misc_regs = &wcn3990_misc_reg, + .wm_srcr = &wcn3990_wm_src_ring, + .wm_dstr = &wcn3990_wm_dst_ring, +}; + +const struct ath10k_hw_values wcn3990_values = { + .rtc_state_val_on = 5, + .ce_count = 12, + .msi_assign_ce_max = 12, + .num_target_ce_config_wlan = 12, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, +}; + static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = { .msb = 0x00000010, .lsb = 0x00000010, diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 97dc1479f44e..19e43512af50 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -231,6 +231,7 @@ enum ath10k_hw_rev { ATH10K_HW_QCA9377, ATH10K_HW_QCA4019, ATH10K_HW_QCA9887, + ATH10K_HW_WCN3990, }; struct ath10k_hw_regs { @@ -247,6 +248,10 @@ struct ath10k_hw_regs { u32 ce5_base_address; u32 ce6_base_address; u32 ce7_base_address; + u32 ce8_base_address; + u32 ce9_base_address; + u32 ce10_base_address; + u32 ce11_base_address; u32 soc_reset_control_si0_rst_mask; u32 soc_reset_control_ce_rst_mask; u32 soc_chip_id_address; @@ -267,6 +272,7 @@ extern const struct ath10k_hw_regs qca988x_regs; extern const struct ath10k_hw_regs qca6174_regs; extern const struct ath10k_hw_regs qca99x0_regs; extern const struct ath10k_hw_regs qca4019_regs; +extern const struct ath10k_hw_regs wcn3990_regs; struct ath10k_hw_ce_regs_addr_map { u32 msb; @@ -362,6 +368,8 @@ extern const struct ath10k_hw_values qca6174_values; extern const struct ath10k_hw_values qca99x0_values; extern const struct ath10k_hw_values qca9888_values; extern const struct ath10k_hw_values qca4019_values; +extern const struct ath10k_hw_values wcn3990_values; +extern struct ath10k_hw_ce_regs wcn3990_ce_regs; extern struct ath10k_hw_ce_regs qcax_ce_regs; void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, @@ -375,6 +383,7 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, #define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984) #define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377) #define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019) +#define QCA_REV_WCN3990(ar) ((ar)->hw_rev == ATH10K_HW_WCN3990) /* Known peculiarities: * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 7ebfc409018d..a697caec6579 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -672,16 +672,16 @@ static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); - ar_pci->bus_ops->write32(ar, offset, value); + ce->bus_ops->write32(ar, offset, value); } inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); - return ar_pci->bus_ops->read32(ar, offset); + return ce->bus_ops->read32(ar, offset); } u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) @@ -761,7 +761,7 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) { struct ath10k *ar = pipe->hif_ce_state; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; struct sk_buff *skb; dma_addr_t paddr; @@ -784,9 +784,9 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) ATH10K_SKB_RXCB(skb)->paddr = paddr; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); if (ret) { dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); @@ -801,6 +801,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) { struct ath10k *ar = pipe->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; int ret, num; @@ -810,9 +811,9 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) if (!ce_pipe->dest_ring) return; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); num = __ath10k_ce_rx_num_free_bufs(ce_pipe); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); while (num >= 0) { ret = __ath10k_pci_rx_post_buf(pipe); @@ -882,6 +883,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret = 0; u32 *buf; unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; @@ -892,7 +894,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, void *data_buf = NULL; int i; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ce_diag = ar_pci->ce_diag; @@ -986,7 +988,7 @@ done: dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -1034,6 +1036,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, const void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret = 0; u32 *buf; unsigned int completed_nbytes, orig_nbytes, remaining_bytes; @@ -1043,7 +1046,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, dma_addr_t ce_data_base = 0; int i; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ce_diag = ar_pci->ce_diag; @@ -1147,7 +1150,7 @@ done: ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", address, ret); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -1342,6 +1345,7 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, struct ath10k_hif_sg_item *items, int n_items) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; @@ -1350,7 +1354,7 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, unsigned int write_index; int err, i = 0; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; @@ -1396,14 +1400,14 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, if (err) goto err; - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return 0; err: for (; i > 0; i--) __ath10k_ce_send_revert(ce_pipe); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return err; } @@ -1593,6 +1597,8 @@ void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) * to mask irq/MSI. */ break; + case ATH10K_HW_WCN3990: + break; } } @@ -1619,6 +1625,8 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) * to unmask irq/MSI. */ break; + case ATH10K_HW_WCN3990: + break; } } @@ -2000,9 +2008,9 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) static int ath10k_bus_get_num_banks(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); - return ar_pci->bus_ops->get_num_banks(ar); + return ce->bus_ops->get_num_banks(ar); } int ath10k_pci_init_config(struct ath10k *ar) @@ -2173,11 +2181,12 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pipe; + struct ath10k_ce *ce = ath10k_ce_priv(ar); int i, ret; for (i = 0; i < CE_COUNT; i++) { pipe = &ar_pci->pipe_info[i]; - pipe->ce_hdl = &ar_pci->ce_states[i]; + pipe->ce_hdl = &ce->ce_states[i]; pipe->pipe_num = i; pipe->hif_ce_state = ar; @@ -2825,7 +2834,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) * interrupts safer to check for pending interrupts for * immediate servicing. */ - if (CE_INTERRUPT_SUMMARY(ar)) { + if (ath10k_ce_interrupt_summary(ar)) { napi_reschedule(ctx); goto out; } @@ -3142,9 +3151,10 @@ static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) int ath10k_pci_setup_resource(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_init(&ar_pci->ce_lock); + spin_lock_init(&ce->ce_lock); spin_lock_init(&ar_pci->ps_lock); setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, @@ -3263,10 +3273,11 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar_pci->ar = ar; ar->dev_id = pci_dev->device; ar_pci->pci_ps = pci_ps; - ar_pci->bus_ops = &ath10k_pci_bus_ops; + ar_pci->ce.bus_ops = &ath10k_pci_bus_ops; ar_pci->pci_soft_reset = pci_soft_reset; ar_pci->pci_hard_reset = pci_hard_reset; ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr; + ar->ce_priv = &ar_pci->ce; ar->id.vendor = pdev->vendor; ar->id.device = pdev->device; diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index c1e08ad63940..424ff323b2dc 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -150,12 +150,6 @@ struct ath10k_pci_supp_chip { u32 rev_id; }; -struct ath10k_bus_ops { - u32 (*read32)(struct ath10k *ar, u32 offset); - void (*write32)(struct ath10k *ar, u32 offset, u32 value); - int (*get_num_banks)(struct ath10k *ar); -}; - enum ath10k_pci_irq_mode { ATH10K_PCI_IRQ_AUTO = 0, ATH10K_PCI_IRQ_LEGACY = 1, @@ -177,11 +171,7 @@ struct ath10k_pci { /* Copy Engine used for Diagnostic Accesses */ struct ath10k_ce_pipe *ce_diag; - /* FIXME: document what this really protects */ - spinlock_t ce_lock; - - /* Map CE id to ce_state */ - struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; + struct ath10k_ce ce; struct timer_list rx_post_retry; /* Due to HW quirks it is recommended to disable ASPM during device @@ -225,8 +215,6 @@ struct ath10k_pci { */ bool pci_ps; - const struct ath10k_bus_ops *bus_ops; - /* Chip specific pci reset routine used to do a safe reset */ int (*pci_soft_reset)(struct ath10k *ar); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 3efb404b83c0..96cd1ebd6a7e 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -3305,7 +3305,7 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, if (arvif->u.ap.noa_data) if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC)) skb_put_data(bcn, arvif->u.ap.noa_data, - arvif->u.ap.noa_len); + arvif->u.ap.noa_len); } static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c index 1447a8352383..2d3e5e263a32 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c @@ -78,10 +78,7 @@ int brcmf_debug_attach(struct brcmf_pub *drvr) return -ENODEV; drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder); - if (IS_ERR(drvr->dbgfs_dir)) - return PTR_ERR(drvr->dbgfs_dir); - - return 0; + return PTR_ERR_OR_ZERO(drvr->dbgfs_dir); } void brcmf_debug_detach(struct brcmf_pub *drvr) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index f878706613e6..e6e9b00b79d7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1951,7 +1951,7 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } -static struct pci_device_id brcmf_pcie_devid_table[] = { +static const struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 84143a02adce..54201c02fdb8 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -7837,7 +7837,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) { struct airo_info *ai = dev->ml_priv; int ridcode; int enabled; - static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); + int (*writer)(struct airo_info *, u16 rid, const void *, int, int); unsigned char *iobuf; /* Only super-user can write RIDs */ diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index aaaca4d08e2b..77f3f92b3c85 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -1724,7 +1724,7 @@ static const struct libipw_geo ipw_geos[] = { static int ipw2100_up(struct ipw2100_priv *priv, int deferred) { unsigned long flags; - int rc = 0; + int err = 0; u32 lock; u32 ord_len = sizeof(lock); @@ -1757,33 +1757,33 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) if (priv->status & STATUS_POWERED || (priv->status & STATUS_RESET_PENDING)) { /* Power cycle the card ... */ - if (ipw2100_power_cycle_adapter(priv)) { + err = ipw2100_power_cycle_adapter(priv); + if (err) { printk(KERN_WARNING DRV_NAME ": %s: Could not cycle adapter.\n", priv->net_dev->name); - rc = 1; goto exit; } } else priv->status |= STATUS_POWERED; /* Load the firmware, start the clocks, etc. */ - if (ipw2100_start_adapter(priv)) { + err = ipw2100_start_adapter(priv); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to start the firmware.\n", priv->net_dev->name); - rc = 1; goto exit; } ipw2100_initialize_ordinals(priv); /* Determine capabilities of this particular HW configuration */ - if (ipw2100_get_hw_features(priv)) { + err = ipw2100_get_hw_features(priv); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to determine HW features.\n", priv->net_dev->name); - rc = 1; goto exit; } @@ -1792,11 +1792,11 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) priv->ieee->freq_band = LIBIPW_24GHZ_BAND; lock = LOCK_NONE; - if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) { + err = ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to clear ordinal lock.\n", priv->net_dev->name); - rc = 1; goto exit; } @@ -1820,21 +1820,21 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) /* Send all of the commands that must be sent prior to * HOST_COMPLETE */ - if (ipw2100_adapter_setup(priv)) { + err = ipw2100_adapter_setup(priv); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to start the card.\n", priv->net_dev->name); - rc = 1; goto exit; } if (!deferred) { /* Enable the adapter - sends HOST_COMPLETE */ - if (ipw2100_enable_adapter(priv)) { + err = ipw2100_enable_adapter(priv); + if (err) { printk(KERN_ERR DRV_NAME ": " "%s: failed in call to enable adapter.\n", priv->net_dev->name); ipw2100_hw_stop_adapter(priv); - rc = 1; goto exit; } @@ -1844,7 +1844,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) } exit: - return rc; + return err; } static void ipw2100_down(struct ipw2100_priv *priv) @@ -4324,7 +4324,7 @@ static struct attribute *ipw2100_sysfs_entries[] = { NULL, }; -static struct attribute_group ipw2100_attribute_group = { +static const struct attribute_group ipw2100_attribute_group = { .attrs = ipw2100_sysfs_entries, }; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 9368abdf18e2..c311b1a994c1 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -11500,7 +11500,7 @@ static struct attribute *ipw_sysfs_entries[] = { NULL }; -static struct attribute_group ipw_attribute_group = { +static const struct attribute_group ipw_attribute_group = { .name = NULL, /* put in device directory */ .attrs = ipw_sysfs_entries, }; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 38bf403bb1e1..329f3a63dadd 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3464,7 +3464,7 @@ static struct attribute *il3945_sysfs_entries[] = { NULL }; -static struct attribute_group il3945_attribute_group = { +static const struct attribute_group il3945_attribute_group = { .name = NULL, /* put in device directory */ .attrs = il3945_sysfs_entries, }; diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 5b51fba75595..de9b6522c43f 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -4654,7 +4654,7 @@ static struct attribute *il_sysfs_entries[] = { NULL }; -static struct attribute_group il_attribute_group = { +static const struct attribute_group il_attribute_group = { .name = NULL, /* put in device directory */ .attrs = il_sysfs_entries, }; diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 20bd261223af..5dcb4a848dba 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -11,6 +11,8 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o +iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o +iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index 2ab2773655a8..ede47e3c5971 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -1437,22 +1437,6 @@ struct agg_tx_status { __le16 sequence; } __packed; -/* - * definitions for initial rate index field - * bits [3:0] initial rate index - * bits [6:4] rate table color, used for the initial rate - * bit-7 invalid rate indication - * i.e. rate was not chosen from rate table - * or rate table color was changed during frame retries - * refer tlc rate info - */ - -#define IWL50_TX_RES_INIT_RATE_INDEX_POS 0 -#define IWL50_TX_RES_INIT_RATE_INDEX_MSK 0x0f -#define IWL50_TX_RES_RATE_TABLE_COLOR_POS 4 -#define IWL50_TX_RES_RATE_TABLE_COLOR_MSK 0x70 -#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80 - /* refer to ra_tid */ #define IWLAGN_TX_RES_TID_POS 0 #define IWLAGN_TX_RES_TID_MSK 0x0f diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h new file mode 100644 index 000000000000..3684a3e180e5 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -0,0 +1,206 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_alive_h__ +#define __iwl_fw_api_alive_h__ + +/* alive response is_valid values */ +#define ALIVE_RESP_UCODE_OK BIT(0) +#define ALIVE_RESP_RFKILL BIT(1) + +/* alive response ver_type values */ +enum { + FW_TYPE_HW = 0, + FW_TYPE_PROT = 1, + FW_TYPE_AP = 2, + FW_TYPE_WOWLAN = 3, + FW_TYPE_TIMING = 4, + FW_TYPE_WIPAN = 5 +}; + +/* alive response ver_subtype values */ +enum { + FW_SUBTYPE_FULL_FEATURE = 0, + FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */ + FW_SUBTYPE_REDUCED = 2, + FW_SUBTYPE_ALIVE_ONLY = 3, + FW_SUBTYPE_WOWLAN = 4, + FW_SUBTYPE_AP_SUBTYPE = 5, + FW_SUBTYPE_WIPAN = 6, + FW_SUBTYPE_INITIALIZE = 9 +}; + +#define IWL_ALIVE_STATUS_ERR 0xDEAD +#define IWL_ALIVE_STATUS_OK 0xCAFE + +#define IWL_ALIVE_FLG_RFKILL BIT(0) + +struct iwl_lmac_alive { + __le32 ucode_minor; + __le32 ucode_major; + u8 ver_subtype; + u8 ver_type; + u8 mac; + u8 opt; + __le32 timestamp; + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ + __le32 cpu_register_ptr; + __le32 dbgm_config_ptr; + __le32 alive_counter_ptr; + __le32 scd_base_ptr; /* SRAM address for SCD */ + __le32 st_fwrd_addr; /* pointer to Store and forward */ + __le32 st_fwrd_size; +} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ + +struct iwl_umac_alive { + __le32 umac_minor; /* UMAC version: minor */ + __le32 umac_major; /* UMAC version: major */ + __le32 error_info_addr; /* SRAM address for UMAC error log */ + __le32 dbg_print_buff_addr; +} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ + +struct mvm_alive_resp_v3 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data; + struct iwl_umac_alive umac_data; +} __packed; /* ALIVE_RES_API_S_VER_3 */ + +struct mvm_alive_resp { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; +} __packed; /* ALIVE_RES_API_S_VER_4 */ + +/** + * enum iwl_extended_cfg_flag - commands driver may send before + * finishing init flow + * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command + * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands + * @IWL_INIT_PHY: driver is going to send PHY_DB commands + */ +enum iwl_extended_cfg_flags { + IWL_INIT_DEBUG_CFG, + IWL_INIT_NVM, + IWL_INIT_PHY, +}; + +/** + * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for + * before finishing init flows + * @init_flags: values from iwl_extended_cfg_flags + */ +struct iwl_init_extended_cfg_cmd { + __le32 init_flags; +} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */ + +/** + * struct iwl_radio_version_notif - information on the radio version + * ( RADIO_VERSION_NOTIFICATION = 0x68 ) + * @radio_flavor: radio flavor + * @radio_step: radio version step + * @radio_dash: radio version dash + */ +struct iwl_radio_version_notif { + __le32 radio_flavor; + __le32 radio_step; + __le32 radio_dash; +} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */ + +enum iwl_card_state_flags { + CARD_ENABLED = 0x00, + HW_CARD_DISABLED = 0x01, + SW_CARD_DISABLED = 0x02, + CT_KILL_CARD_DISABLED = 0x04, + HALT_CARD_DISABLED = 0x08, + CARD_DISABLED_MSK = 0x0f, + CARD_IS_RX_ON = 0x10, +}; + +/** + * struct iwl_radio_version_notif - information on the card state + * ( CARD_STATE_NOTIFICATION = 0xa1 ) + * @flags: &enum iwl_card_state_flags + */ +struct iwl_card_state_notif { + __le32 flags; +} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ + +/** + * struct iwl_fseq_ver_mismatch_nty - Notification about version + * + * This notification does not have a direct impact on the init flow. + * It means that another core (not WiFi) has initiated the FSEQ flow + * and updated the FSEQ version. The driver only prints an error when + * this occurs. + * + * @aux_read_fseq_ver: auxiliary read FSEQ version + * @wifi_fseq_ver: FSEQ version (embedded in WiFi) + */ +struct iwl_fseq_ver_mismatch_ntf { + __le32 aux_read_fseq_ver; + __le32 wifi_fseq_ver; +} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_alive_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h new file mode 100644 index 000000000000..d2717fafdf5b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h @@ -0,0 +1,144 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_binding_h__ +#define __iwl_fw_api_binding_h__ + +#define MAX_MACS_IN_BINDING (3) +#define MAX_BINDINGS (4) + +/** + * struct iwl_binding_cmd_v1 - configuring bindings + * ( BINDING_CONTEXT_CMD = 0x2b ) + * @id_and_color: ID and color of the relevant Binding, + * &enum iwl_ctxt_id_and_color + * @action: action to perform, one of FW_CTXT_ACTION_* + * @macs: array of MAC id and colors which belong to the binding, + * &enum iwl_ctxt_id_and_color + * @phy: PHY id and color which belongs to the binding, + * &enum iwl_ctxt_id_and_color + */ +struct iwl_binding_cmd_v1 { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* BINDING_DATA_API_S_VER_1 */ + __le32 macs[MAX_MACS_IN_BINDING]; + __le32 phy; +} __packed; /* BINDING_CMD_API_S_VER_1 */ + +/** + * struct iwl_binding_cmd - configuring bindings + * ( BINDING_CONTEXT_CMD = 0x2b ) + * @id_and_color: ID and color of the relevant Binding, + * &enum iwl_ctxt_id_and_color + * @action: action to perform, one of FW_CTXT_ACTION_* + * @macs: array of MAC id and colors which belong to the binding + * &enum iwl_ctxt_id_and_color + * @phy: PHY id and color which belongs to the binding + * &enum iwl_ctxt_id_and_color + * @lmac_id: the lmac id the binding belongs to + */ +struct iwl_binding_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* BINDING_DATA_API_S_VER_1 */ + __le32 macs[MAX_MACS_IN_BINDING]; + __le32 phy; + __le32 lmac_id; +} __packed; /* BINDING_CMD_API_S_VER_2 */ + +#define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1) +#define IWL_LMAC_24G_INDEX 0 +#define IWL_LMAC_5G_INDEX 1 + +/* The maximal number of fragments in the FW's schedule session */ +#define IWL_MVM_MAX_QUOTA 128 + +/** + * struct iwl_time_quota_data - configuration of time quota per binding + * @id_and_color: ID and color of the relevant Binding, + * &enum iwl_ctxt_id_and_color + * @quota: absolute time quota in TU. The scheduler will try to divide the + * remainig quota (after Time Events) according to this quota. + * @max_duration: max uninterrupted context duration in TU + */ +struct iwl_time_quota_data { + __le32 id_and_color; + __le32 quota; + __le32 max_duration; +} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ + +/** + * struct iwl_time_quota_cmd - configuration of time quota between bindings + * ( TIME_QUOTA_CMD = 0x2c ) + * @quotas: allocations per binding + * Note: on non-CDB the fourth one is the auxilary mac and is + * essentially zero. + * On CDB the fourth one is a regular binding. + */ +struct iwl_time_quota_cmd { + struct iwl_time_quota_data quotas[MAX_BINDINGS]; +} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */ + +#endif /* __iwl_fw_api_binding_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api.h b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h index 0e107f916ce3..ea4a3f04a83a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h @@ -59,8 +59,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ -#ifndef __iwl_fw_api_h__ -#define __iwl_fw_api_h__ +#ifndef __iwl_fw_api_cmdhdr_h__ +#define __iwl_fw_api_cmdhdr_h__ /** * DOC: Host command section @@ -112,15 +112,24 @@ static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) #define IWL_ALWAYS_LONG_GROUP 1 /** - * struct iwl_cmd_header + * struct iwl_cmd_header - (short) command header format * * This header format appears in the beginning of each command sent from the * driver, and each response/notification received from uCode. */ struct iwl_cmd_header { - u8 cmd; /* Command ID: REPLY_RXON, etc. */ + /** + * @cmd: Command ID: REPLY_RXON, etc. + */ + u8 cmd; + /** + * @group_id: group ID, for commands with groups + */ u8 group_id; - /* + /** + * @sequence: + * Sequence number for the command. + * * The driver sets up the sequence number to values of its choosing. * uCode does not use this value, but passes it back to the driver * when sending the response to each driver-originated command, so @@ -150,6 +159,13 @@ struct iwl_cmd_header { * driver, and each response/notification received from uCode. * this is the wide version that contains more information about the command * like length, version and command type + * + * @cmd: command ID, like in &struct iwl_cmd_header + * @group_id: group ID, like in &struct iwl_cmd_header + * @sequence: sequence, like in &struct iwl_cmd_header + * @length: length of the command + * @reserved: reserved + * @version: command version */ struct iwl_cmd_header_wide { u8 cmd; @@ -161,48 +177,6 @@ struct iwl_cmd_header_wide { } __packed; /** - * iwl_tx_queue_cfg_actions - TXQ config options - * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue - * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format - */ -enum iwl_tx_queue_cfg_actions { - TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0), - TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), -}; - -/** - * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command - * @sta_id: station id - * @tid: tid of the queue - * @flags: see &enum iwl_tx_queue_cfg_actions - * @cb_size: size of TFD cyclic buffer. Value is exponent - 3. - * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs) - * @byte_cnt_addr: address of byte count table - * @tfdq_addr: address of TFD circular buffer - */ -struct iwl_tx_queue_cfg_cmd { - u8 sta_id; - u8 tid; - __le16 flags; - __le32 cb_size; - __le64 byte_cnt_addr; - __le64 tfdq_addr; -} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */ - -/** - * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config - * @queue_number: queue number assigned to this RA -TID - * @flags: set on failure - * @write_pointer: initial value for write pointer - */ -struct iwl_tx_queue_cfg_rsp { - __le16 queue_number; - __le16 flags; - __le16 write_pointer; - __le16 reserved; -} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */ - -/** * struct iwl_calib_res_notif_phy_db - Receive phy db chunk after calibrations * @type: type of the result - mostly ignored * @length: length of the data @@ -226,4 +200,12 @@ struct iwl_phy_db_cmd { u8 data[]; } __packed; -#endif /* __iwl_fw_api_h__*/ +/** + * struct iwl_cmd_response - generic response struct for most commands + * @status: status of the command asked, changes for each one + */ +struct iwl_cmd_response { + __le32 status; +}; + +#endif /* __iwl_fw_api_cmdhdr_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index 8cd06aaa1f54..583f4189f55e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -64,8 +59,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_bt_coex_h__ -#define __fw_api_bt_coex_h__ +#ifndef __iwl_fw_api_coex_h__ +#define __iwl_fw_api_coex_h__ #include <linux/types.h> #include <linux/bitops.h> @@ -254,4 +249,4 @@ struct iwl_bt_coex_profile_notif { u8 reserved[3]; } __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */ -#endif /* __fw_api_bt_coex_h__ */ +#endif /* __iwl_fw_api_coex_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h new file mode 100644 index 000000000000..c7b8cffdf281 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -0,0 +1,664 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_commands_h__ +#define __iwl_fw_api_commands_h__ + +/** + * enum iwl_mvm_command_groups - command groups for the firmware + * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds + * @LONG_GROUP: legacy group with long header, also uses command IDs + * from &enum iwl_legacy_cmds + * @SYSTEM_GROUP: system group, uses command IDs from + * &enum iwl_system_subcmd_ids + * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from + * &enum iwl_mac_conf_subcmd_ids + * @PHY_OPS_GROUP: PHY operations group, uses command IDs from + * &enum iwl_phy_ops_subcmd_ids + * @DATA_PATH_GROUP: data path group, uses command IDs from + * &enum iwl_data_path_subcmd_ids + * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids + * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids + * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from + * &enum iwl_prot_offload_subcmd_ids + * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from + * &enum iwl_regulatory_and_nvm_subcmd_ids + * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds + */ +enum iwl_mvm_command_groups { + LEGACY_GROUP = 0x0, + LONG_GROUP = 0x1, + SYSTEM_GROUP = 0x2, + MAC_CONF_GROUP = 0x3, + PHY_OPS_GROUP = 0x4, + DATA_PATH_GROUP = 0x5, + NAN_GROUP = 0x7, + TOF_GROUP = 0x8, + PROT_OFFLOAD_GROUP = 0xb, + REGULATORY_AND_NVM_GROUP = 0xc, + DEBUG_GROUP = 0xf, +}; + +/** + * enum iwl_legacy_cmds - legacy group command IDs + */ +enum iwl_legacy_cmds { + /** + * @MVM_ALIVE: + * Alive data from the firmware, as described in + * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp. + */ + MVM_ALIVE = 0x1, + + /** + * @REPLY_ERROR: Cause an error in the firmware, for testing purposes. + */ + REPLY_ERROR = 0x2, + + /** + * @ECHO_CMD: Send data to the device to have it returned immediately. + */ + ECHO_CMD = 0x3, + + /** + * @INIT_COMPLETE_NOTIF: Notification that initialization is complete. + */ + INIT_COMPLETE_NOTIF = 0x4, + + /** + * @PHY_CONTEXT_CMD: + * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd. + */ + PHY_CONTEXT_CMD = 0x8, + + /** + * @DBG_CFG: Debug configuration command. + */ + DBG_CFG = 0x9, + + /** + * @ANTENNA_COUPLING_NOTIFICATION: + * Antenna coupling data, &struct iwl_mvm_antenna_coupling_notif + */ + ANTENNA_COUPLING_NOTIFICATION = 0xa, + + /** + * @SCAN_ITERATION_COMPLETE_UMAC: + * Firmware indicates a scan iteration completed, using + * &struct iwl_umac_scan_iter_complete_notif. + */ + SCAN_ITERATION_COMPLETE_UMAC = 0xb5, + + /** + * @SCAN_CFG_CMD: + * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config + */ + SCAN_CFG_CMD = 0xc, + + /** + * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac + */ + SCAN_REQ_UMAC = 0xd, + + /** + * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort + */ + SCAN_ABORT_UMAC = 0xe, + + /** + * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete + */ + SCAN_COMPLETE_UMAC = 0xf, + + /** + * @BA_WINDOW_STATUS_NOTIFICATION_ID: + * uses &struct iwl_ba_window_status_notif + */ + BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13, + + /** + * @ADD_STA_KEY: + * &struct iwl_mvm_add_sta_key_cmd_v1 or + * &struct iwl_mvm_add_sta_key_cmd. + */ + ADD_STA_KEY = 0x17, + + /** + * @ADD_STA: + * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7. + */ + ADD_STA = 0x18, + + /** + * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd + */ + REMOVE_STA = 0x19, + + /** + * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd + */ + FW_GET_ITEM_CMD = 0x1a, + + /** + * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2, + * response in &struct iwl_mvm_tx_resp or + * &struct iwl_mvm_tx_resp_v3 + */ + TX_CMD = 0x1c, + + /** + * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd + */ + TXPATH_FLUSH = 0x1e, + + /** + * @MGMT_MCAST_KEY: + * &struct iwl_mvm_mgmt_mcast_key_cmd or + * &struct iwl_mvm_mgmt_mcast_key_cmd_v1 + */ + MGMT_MCAST_KEY = 0x1f, + + /* scheduler config */ + /** + * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware, + * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp + * for newer (A000) hardware. + */ + SCD_QUEUE_CFG = 0x1d, + + /** + * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd + */ + WEP_KEY = 0x20, + + /** + * @SHARED_MEM_CFG: + * retrieve shared memory configuration - response in + * &struct iwl_shared_mem_cfg + */ + SHARED_MEM_CFG = 0x25, + + /** + * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd + */ + TDLS_CHANNEL_SWITCH_CMD = 0x27, + + /** + * @TDLS_CHANNEL_SWITCH_NOTIFICATION: + * uses &struct iwl_tdls_channel_switch_notif + */ + TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, + + /** + * @TDLS_CONFIG_CMD: + * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res + */ + TDLS_CONFIG_CMD = 0xa7, + + /** + * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd + */ + MAC_CONTEXT_CMD = 0x28, + + /** + * @TIME_EVENT_CMD: + * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp + */ + TIME_EVENT_CMD = 0x29, /* both CMD and response */ + + /** + * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif + */ + TIME_EVENT_NOTIFICATION = 0x2a, + + /** + * @BINDING_CONTEXT_CMD: + * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1 + */ + BINDING_CONTEXT_CMD = 0x2b, + + /** + * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd + */ + TIME_QUOTA_CMD = 0x2c, + + /** + * @NON_QOS_TX_COUNTER_CMD: + * command is &struct iwl_nonqos_seq_query_cmd + */ + NON_QOS_TX_COUNTER_CMD = 0x2d, + + /** + * @LQ_CMD: using &struct iwl_lq_cmd + */ + LQ_CMD = 0x4e, + + /** + * @FW_PAGING_BLOCK_CMD: + * &struct iwl_fw_paging_cmd + */ + FW_PAGING_BLOCK_CMD = 0x4f, + + /** + * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac + */ + SCAN_OFFLOAD_REQUEST_CMD = 0x51, + + /** + * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents + */ + SCAN_OFFLOAD_ABORT_CMD = 0x52, + + /** + * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req + */ + HOT_SPOT_CMD = 0x53, + + /** + * @SCAN_OFFLOAD_COMPLETE: + * notification, &struct iwl_periodic_scan_complete + */ + SCAN_OFFLOAD_COMPLETE = 0x6D, + + /** + * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD: + * update scan offload (scheduled scan) profiles/blacklist/etc. + */ + SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, + + /** + * @MATCH_FOUND_NOTIFICATION: scan match found + */ + MATCH_FOUND_NOTIFICATION = 0xd9, + + /** + * @SCAN_ITERATION_COMPLETE: + * uses &struct iwl_lmac_scan_complete_notif + */ + SCAN_ITERATION_COMPLETE = 0xe7, + + /* Phy */ + /** + * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd + */ + PHY_CONFIGURATION_CMD = 0x6a, + + /** + * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db + */ + CALIB_RES_NOTIF_PHY_DB = 0x6b, + + /** + * @PHY_DB_CMD: &struct iwl_phy_db_cmd + */ + PHY_DB_CMD = 0x6c, + + /** + * @TOF_CMD: &struct iwl_tof_config_cmd + */ + TOF_CMD = 0x10, + + /** + * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd + */ + TOF_NOTIFICATION = 0x11, + + /** + * @POWER_TABLE_CMD: &struct iwl_device_power_cmd + */ + POWER_TABLE_CMD = 0x77, + + /** + * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION: + * &struct iwl_uapsd_misbehaving_ap_notif + */ + PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, + + /** + * @LTR_CONFIG: &struct iwl_ltr_config_cmd + */ + LTR_CONFIG = 0xee, + + /** + * @REPLY_THERMAL_MNG_BACKOFF: + * Thermal throttling command + */ + REPLY_THERMAL_MNG_BACKOFF = 0x7e, + + /** + * @DC2DC_CONFIG_CMD: + * Set/Get DC2DC frequency tune + * Command is &struct iwl_dc2dc_config_cmd, + * response is &struct iwl_dc2dc_config_resp + */ + DC2DC_CONFIG_CMD = 0x83, + + /** + * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd + */ + NVM_ACCESS_CMD = 0x88, + + /** + * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif + */ + BEACON_NOTIFICATION = 0x90, + + /** + * @BEACON_TEMPLATE_CMD: + * Uses one of &struct iwl_mac_beacon_cmd_v6, + * &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd + * depending on the device version. + */ + BEACON_TEMPLATE_CMD = 0x91, + /** + * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd + */ + TX_ANT_CONFIGURATION_CMD = 0x98, + + /** + * @STATISTICS_CMD: &struct iwl_statistics_cmd + */ + STATISTICS_CMD = 0x9c, + + /** + * @STATISTICS_NOTIFICATION: + * one of &struct iwl_notif_statistics_v10, + * &struct iwl_notif_statistics_v11, + * &struct iwl_notif_statistics_cdb + */ + STATISTICS_NOTIFICATION = 0x9d, + + /** + * @EOSP_NOTIFICATION: + * Notify that a service period ended, + * &struct iwl_mvm_eosp_notification + */ + EOSP_NOTIFICATION = 0x9e, + + /** + * @REDUCE_TX_POWER_CMD: + * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd + */ + REDUCE_TX_POWER_CMD = 0x9f, + + /** + * @CARD_STATE_NOTIFICATION: + * Card state (RF/CT kill) notification, + * uses &struct iwl_card_state_notif + */ + CARD_STATE_NOTIFICATION = 0xa1, + + /** + * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif + */ + MISSED_BEACONS_NOTIFICATION = 0xa2, + + /** + * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd + */ + MAC_PM_POWER_TABLE = 0xa9, + + /** + * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif + */ + MFUART_LOAD_NOTIFICATION = 0xb1, + + /** + * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd + */ + RSS_CONFIG_CMD = 0xb3, + + /** + * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info + */ + REPLY_RX_PHY_CMD = 0xc0, + + /** + * @REPLY_RX_MPDU_CMD: + * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc + */ + REPLY_RX_MPDU_CMD = 0xc1, + + /** + * @FRAME_RELEASE: + * Frame release (reorder helper) notification, uses + * &struct iwl_frame_release + */ + FRAME_RELEASE = 0xc3, + + /** + * @BA_NOTIF: + * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif + * or &struct iwl_mvm_ba_notif depending on the HW + */ + BA_NOTIF = 0xc5, + + /* Location Aware Regulatory */ + /** + * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd + */ + MCC_UPDATE_CMD = 0xc8, + + /** + * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif + */ + MCC_CHUB_UPDATE_CMD = 0xc9, + + /** + * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker + */ + MARKER_CMD = 0xcb, + + /** + * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif + */ + BT_PROFILE_NOTIFICATION = 0xce, + + /** + * @BT_CONFIG: &struct iwl_bt_coex_cmd + */ + BT_CONFIG = 0x9b, + + /** + * @BT_COEX_UPDATE_CORUN_LUT: + * &struct iwl_bt_coex_corun_lut_update_cmd + */ + BT_COEX_UPDATE_CORUN_LUT = 0x5b, + + /** + * @BT_COEX_UPDATE_REDUCED_TXP: + * &struct iwl_bt_coex_reduced_txp_update_cmd + */ + BT_COEX_UPDATE_REDUCED_TXP = 0x5c, + + /** + * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd + */ + BT_COEX_CI = 0x5d, + + /** + * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd + */ + REPLY_SF_CFG_CMD = 0xd1, + /** + * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd + */ + REPLY_BEACON_FILTERING_CMD = 0xd2, + + /** + * @DTS_MEASUREMENT_NOTIFICATION: + * &struct iwl_dts_measurement_notif_v1 or + * &struct iwl_dts_measurement_notif_v2 + */ + DTS_MEASUREMENT_NOTIFICATION = 0xdd, + + /** + * @LDBG_CONFIG_CMD: configure continuous trace recording + */ + LDBG_CONFIG_CMD = 0xf6, + + /** + * @DEBUG_LOG_MSG: Debugging log data from firmware + */ + DEBUG_LOG_MSG = 0xf7, + + /** + * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd + */ + BCAST_FILTER_CMD = 0xcf, + + /** + * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd + */ + MCAST_FILTER_CMD = 0xd0, + + /** + * @D3_CONFIG_CMD: &struct iwl_d3_manager_config + */ + D3_CONFIG_CMD = 0xd3, + + /** + * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of + * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2, + * &struct iwl_proto_offload_cmd_v3_small, + * &struct iwl_proto_offload_cmd_v3_large + */ + PROT_OFFLOAD_CONFIG_CMD = 0xd4, + + /** + * @OFFLOADS_QUERY_CMD: + * No data in command, response in &struct iwl_wowlan_status + */ + OFFLOADS_QUERY_CMD = 0xd5, + + /** + * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config + */ + REMOTE_WAKE_CONFIG_CMD = 0xd6, + + /** + * @D0I3_END_CMD: End D0i3/D3 state, no command data + */ + D0I3_END_CMD = 0xed, + + /** + * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd + */ + WOWLAN_PATTERNS = 0xe0, + + /** + * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd + */ + WOWLAN_CONFIGURATION = 0xe1, + + /** + * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd + */ + WOWLAN_TSC_RSC_PARAM = 0xe2, + + /** + * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd + */ + WOWLAN_TKIP_PARAM = 0xe3, + + /** + * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd + */ + WOWLAN_KEK_KCK_MATERIAL = 0xe4, + + /** + * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status + */ + WOWLAN_GET_STATUSES = 0xe5, + + /** + * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: + * No command data, response is &struct iwl_scan_offload_profiles_query + */ + SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, +}; + +/** + * enum iwl_system_subcmd_ids - system group command IDs + */ +enum iwl_system_subcmd_ids { + /** + * @SHARED_MEM_CFG_CMD: + * response in &struct iwl_shared_mem_cfg or + * &struct iwl_shared_mem_cfg_v2 + */ + SHARED_MEM_CFG_CMD = 0x0, + + /** + * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd + */ + INIT_EXTENDED_CFG_CMD = 0x03, + + /** + * @FSEQ_VER_MISMATCH_NTF: Notification about fseq version + * mismatch during init. The format is specified in + * &struct iwl_fseq_ver_mismatch_ntf. + */ + FSEQ_VER_MISMATCH_NTF = 0xFF, +}; + +#endif /* __iwl_fw_api_commands_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h new file mode 100644 index 000000000000..ee1bd45b7021 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h @@ -0,0 +1,192 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_config_h__ +#define __iwl_fw_api_config_h__ + +/* + * struct iwl_dqa_enable_cmd + * @cmd_queue: the TXQ number of the command queue + */ +struct iwl_dqa_enable_cmd { + __le32 cmd_queue; +} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */ + +/* + * struct iwl_tx_ant_cfg_cmd + * @valid: valid antenna configuration + */ +struct iwl_tx_ant_cfg_cmd { + __le32 valid; +} __packed; + +/** + * struct iwl_calib_ctrl - Calibration control struct. + * Sent as part of the phy configuration command. + * @flow_trigger: bitmap for which calibrations to perform according to + * flow triggers, using &enum iwl_calib_cfg + * @event_trigger: bitmap for which calibrations to perform according to + * event triggers, using &enum iwl_calib_cfg + */ +struct iwl_calib_ctrl { + __le32 flow_trigger; + __le32 event_trigger; +} __packed; + +/* This enum defines the bitmap of various calibrations to enable in both + * init ucode and runtime ucode through CALIBRATION_CFG_CMD. + */ +enum iwl_calib_cfg { + IWL_CALIB_CFG_XTAL_IDX = BIT(0), + IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1), + IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2), + IWL_CALIB_CFG_PAPD_IDX = BIT(3), + IWL_CALIB_CFG_TX_PWR_IDX = BIT(4), + IWL_CALIB_CFG_DC_IDX = BIT(5), + IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6), + IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7), + IWL_CALIB_CFG_TX_IQ_IDX = BIT(8), + IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9), + IWL_CALIB_CFG_RX_IQ_IDX = BIT(10), + IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11), + IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12), + IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13), + IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14), + IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15), + IWL_CALIB_CFG_DAC_IDX = BIT(16), + IWL_CALIB_CFG_ABS_IDX = BIT(17), + IWL_CALIB_CFG_AGC_IDX = BIT(18), +}; + +/** + * struct iwl_phy_cfg_cmd - Phy configuration command + * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg + * @calib_control: calibration control data + */ +struct iwl_phy_cfg_cmd { + __le32 phy_cfg; + struct iwl_calib_ctrl calib_control; +} __packed; + +#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1)) +#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3)) +#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5)) +#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7)) +#define PHY_CFG_TX_CHAIN_A BIT(8) +#define PHY_CFG_TX_CHAIN_B BIT(9) +#define PHY_CFG_TX_CHAIN_C BIT(10) +#define PHY_CFG_RX_CHAIN_A BIT(12) +#define PHY_CFG_RX_CHAIN_B BIT(13) +#define PHY_CFG_RX_CHAIN_C BIT(14) + +/* + * enum iwl_dc2dc_config_id - flag ids + * + * Ids of dc2dc configuration flags + */ +enum iwl_dc2dc_config_id { + DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ + DCDC_FREQ_TUNE_SET = 0x2, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_dc2dc_config_cmd - configure dc2dc values + * + * (DC2DC_CONFIG_CMD = 0x83) + * + * Set/Get & configure dc2dc values. + * The command always returns the current dc2dc values. + * + * @flags: set/get dc2dc + * @enable_low_power_mode: not used. + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_cmd { + __le32 flags; + __le32 enable_low_power_mode; /* not used */ + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ + +/** + * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd + * + * Current dc2dc values returned by the FW. + * + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_resp { + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ + +/** + * struct iwl_mvm_antenna_coupling_notif - antenna coupling notification + * @isolation: antenna isolation value + */ +struct iwl_mvm_antenna_coupling_notif { + __le32 isolation; +} __packed; + +#endif /* __iwl_fw_api_config_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h new file mode 100644 index 000000000000..2f0d7c498b3e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h @@ -0,0 +1,94 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_context_h__ +#define __iwl_fw_api_context_h__ + +/** + * enum iwl_ctxt_id_and_color - ID and color fields in context dword + * @FW_CTXT_ID_POS: position of the ID + * @FW_CTXT_ID_MSK: mask of the ID + * @FW_CTXT_COLOR_POS: position of the color + * @FW_CTXT_COLOR_MSK: mask of the color + * @FW_CTXT_INVALID: value used to indicate unused/invalid + */ +enum iwl_ctxt_id_and_color { + FW_CTXT_ID_POS = 0, + FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS, + FW_CTXT_COLOR_POS = 8, + FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS, + FW_CTXT_INVALID = 0xffffffff, +}; + +#define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\ + ((_color) << FW_CTXT_COLOR_POS)) + +/* Possible actions on PHYs, MACs and Bindings */ +enum iwl_ctxt_action { + FW_CTXT_ACTION_STUB = 0, + FW_CTXT_ACTION_ADD, + FW_CTXT_ACTION_MODIFY, + FW_CTXT_ACTION_REMOVE, + FW_CTXT_ACTION_NUM +}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ + +#endif /* __iwl_fw_api_context_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index d4a4c28b7192..57f4bc242023 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -64,8 +59,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_d3_h__ -#define __fw_api_d3_h__ +#ifndef __iwl_fw_api_d3_h__ +#define __iwl_fw_api_d3_h__ /** * enum iwl_d3_wakeup_flags - D3 manager wakeup flags @@ -468,4 +463,4 @@ struct iwl_wowlan_remote_wake_config { /* TODO: NetDetect API */ -#endif /* __fw_api_d3_h__ */ +#endif /* __iwl_fw_api_d3_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h new file mode 100644 index 000000000000..aa76dcc148bd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -0,0 +1,127 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_datapath_h__ +#define __iwl_fw_api_datapath_h__ + +/** + * enum iwl_data_path_subcmd_ids - data path group commands + */ +enum iwl_data_path_subcmd_ids { + /** + * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd + */ + DQA_ENABLE_CMD = 0x0, + + /** + * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd + */ + UPDATE_MU_GROUPS_CMD = 0x1, + + /** + * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd + */ + TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, + + /** + * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification + */ + STA_PM_NOTIF = 0xFD, + + /** + * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif + */ + MU_GROUP_MGMT_NOTIF = 0xFE, + + /** + * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification + */ + RX_QUEUES_NOTIFICATION = 0xFF, +}; + +/** + * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration + * + * @reserved: reserved + * @membership_status: a bitmap of MU groups + * @user_position:the position of station in a group. If the station is in the + * group then bits (group * 2) is the position -1 + */ +struct iwl_mu_group_mgmt_cmd { + __le32 reserved; + __le32 membership_status[2]; + __le32 user_position[4]; +} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */ + +/** + * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification + * + * @membership_status: a bitmap of MU groups + * @user_position: the position of station in a group. If the station is in the + * group then bits (group * 2) is the position -1 + */ +struct iwl_mu_group_mgmt_notif { + __le32 membership_status[2]; + __le32 user_position[4]; +} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_datapath_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h new file mode 100644 index 000000000000..9f88b61536bc --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -0,0 +1,345 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_debug_h__ +#define __iwl_fw_api_debug_h__ + +/** + * enum iwl_debug_cmds - debug commands + */ +enum iwl_debug_cmds { + /** + * @LMAC_RD_WR: + * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and + * &struct iwl_dbg_mem_access_rsp + */ + LMAC_RD_WR = 0x0, + /** + * @UMAC_RD_WR: + * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and + * &struct iwl_dbg_mem_access_rsp + */ + UMAC_RD_WR = 0x1, + /** + * @MFU_ASSERT_DUMP_NTF: + * &struct iwl_mfu_assert_dump_notif + */ + MFU_ASSERT_DUMP_NTF = 0xFE, +}; + +/* Error response/notification */ +enum { + FW_ERR_UNKNOWN_CMD = 0x0, + FW_ERR_INVALID_CMD_PARAM = 0x1, + FW_ERR_SERVICE = 0x2, + FW_ERR_ARC_MEMORY = 0x3, + FW_ERR_ARC_CODE = 0x4, + FW_ERR_WATCH_DOG = 0x5, + FW_ERR_WEP_GRP_KEY_INDX = 0x10, + FW_ERR_WEP_KEY_SIZE = 0x11, + FW_ERR_OBSOLETE_FUNC = 0x12, + FW_ERR_UNEXPECTED = 0xFE, + FW_ERR_FATAL = 0xFF +}; + +/** + * struct iwl_error_resp - FW error indication + * ( REPLY_ERROR = 0x2 ) + * @error_type: one of FW_ERR_* + * @cmd_id: the command ID for which the error occurred + * @reserved1: reserved + * @bad_cmd_seq_num: sequence number of the erroneous command + * @error_service: which service created the error, applicable only if + * error_type = 2, otherwise 0 + * @timestamp: TSF in usecs. + */ +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; + __le32 error_service; + __le64 timestamp; +} __packed; + +#define TX_FIFO_MAX_NUM_9000 8 +#define TX_FIFO_MAX_NUM 15 +#define RX_FIFO_MAX_NUM 2 +#define TX_FIFO_INTERNAL_MAX_NUM 6 + +/** + * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information + * + * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not + * accessible) + * @shared_mem_size: shared memory size + * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to + * 0x0 as accessible only via DBGM RDAT) + * @sample_buff_size: internal sample buff size + * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre + * 8000 HW set to 0x0 as not accessible) + * @txfifo_size: size of TXF0 ... TXF7 + * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 + * @page_buff_addr: used by UMAC and performance debug (page miss analysis), + * when paging is not supported this should be 0 + * @page_buff_size: size of %page_buff_addr + * @rxfifo_addr: Start address of rxFifo + * @internal_txfifo_addr: start address of internalFifo + * @internal_txfifo_size: internal fifos' size + * + * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG + * set, the last 3 members don't exist. + */ +struct iwl_shared_mem_cfg_v2 { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM_9000]; + __le32 rxfifo_size[RX_FIFO_MAX_NUM]; + __le32 page_buff_addr; + __le32 page_buff_size; + __le32 rxfifo_addr; + __le32 internal_txfifo_addr; + __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ + +/** + * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration + * + * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) + * @txfifo_size: size of TX FIFOs + * @rxfifo1_addr: RXF1 addr + * @rxfifo1_size: RXF1 size + */ +struct iwl_shared_mem_lmac_cfg { + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM]; + __le32 rxfifo1_addr; + __le32 rxfifo1_size; + +} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ + +/** + * struct iwl_shared_mem_cfg - Shared memory configuration information + * + * @shared_mem_addr: shared memory address + * @shared_mem_size: shared memory size + * @sample_buff_addr: internal sample (mon/adc) buff addr + * @sample_buff_size: internal sample buff size + * @rxfifo2_addr: start addr of RXF2 + * @rxfifo2_size: size of RXF2 + * @page_buff_addr: used by UMAC and performance debug (page miss analysis), + * when paging is not supported this should be 0 + * @page_buff_size: size of %page_buff_addr + * @lmac_num: number of LMACs (1 or 2) + * @lmac_smem: per - LMAC smem data + */ +struct iwl_shared_mem_cfg { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 rxfifo2_addr; + __le32 rxfifo2_size; + __le32 page_buff_addr; + __le32 page_buff_size; + __le32 lmac_num; + struct iwl_shared_mem_lmac_cfg lmac_smem[2]; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ + +/** + * struct iwl_mfuart_load_notif - mfuart image version & status + * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) + * @installed_ver: installed image version + * @external_ver: external image version + * @status: MFUART loading status + * @duration: MFUART loading time + * @image_size: MFUART image size in bytes +*/ +struct iwl_mfuart_load_notif { + __le32 installed_ver; + __le32 external_ver; + __le32 status; + __le32 duration; + /* image size valid only in v2 of the command */ + __le32 image_size; +} __packed; /* MFU_LOADER_NTFY_API_S_VER_2 */ + +/** + * struct iwl_mfu_assert_dump_notif - mfuart dump logs + * ( MFU_ASSERT_DUMP_NTF = 0xfe ) + * @assert_id: mfuart assert id that cause the notif + * @curr_reset_num: number of asserts since uptime + * @index_num: current chunk id + * @parts_num: total number of chunks + * @data_size: number of data bytes sent + * @data: data buffer + */ +struct iwl_mfu_assert_dump_notif { + __le32 assert_id; + __le32 curr_reset_num; + __le16 index_num; + __le16 parts_num; + __le32 data_size; + __le32 data[0]; +} __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */ + +/** + * enum iwl_mvm_marker_id - marker ids + * + * The ids for different type of markers to insert into the usniffer logs + * + * @MARKER_ID_TX_FRAME_LATENCY: TX latency marker + */ +enum iwl_mvm_marker_id { + MARKER_ID_TX_FRAME_LATENCY = 1, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_mvm_marker - mark info into the usniffer logs + * + * (MARKER_CMD = 0xcb) + * + * Mark the UTC time stamp into the usniffer logs together with additional + * metadata, so the usniffer output can be parsed. + * In the command response the ucode will return the GP2 time. + * + * @dw_len: The amount of dwords following this byte including this byte. + * @marker_id: A unique marker id (iwl_mvm_marker_id). + * @reserved: reserved. + * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC + * @metadata: additional meta data that will be written to the unsiffer log + */ +struct iwl_mvm_marker { + u8 dw_len; + u8 marker_id; + __le16 reserved; + __le64 timestamp; + __le32 metadata[0]; +} __packed; /* MARKER_API_S_VER_1 */ + +/* Operation types for the debug mem access */ +enum { + DEBUG_MEM_OP_READ = 0, + DEBUG_MEM_OP_WRITE = 1, + DEBUG_MEM_OP_WRITE_BYTES = 2, +}; + +#define DEBUG_MEM_MAX_SIZE_DWORDS 32 + +/** + * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory + * @op: DEBUG_MEM_OP_* + * @addr: address to read/write from/to + * @len: in dwords, to read/write + * @data: for write opeations, contains the source buffer + */ +struct iwl_dbg_mem_access_cmd { + __le32 op; + __le32 addr; + __le32 len; + __le32 data[]; +} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */ + +/* Status responses for the debug mem access */ +enum { + DEBUG_MEM_STATUS_SUCCESS = 0x0, + DEBUG_MEM_STATUS_FAILED = 0x1, + DEBUG_MEM_STATUS_LOCKED = 0x2, + DEBUG_MEM_STATUS_HIDDEN = 0x3, + DEBUG_MEM_STATUS_LENGTH = 0x4, +}; + +/** + * struct iwl_dbg_mem_access_rsp - Response to debug mem commands + * @status: DEBUG_MEM_STATUS_* + * @len: read dwords (0 for write operations) + * @data: contains the read DWs + */ +struct iwl_dbg_mem_access_rsp { + __le32 status; + __le32 len; + __le32 data[]; +} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ + +#define CONT_REC_COMMAND_SIZE 80 +#define ENABLE_CONT_RECORDING 0x15 +#define DISABLE_CONT_RECORDING 0x16 + +/* + * struct iwl_continuous_record_mode - recording mode + */ +struct iwl_continuous_record_mode { + __le16 enable_recording; +} __packed; + +/* + * struct iwl_continuous_record_cmd - enable/disable continuous recording + */ +struct iwl_continuous_record_cmd { + struct iwl_continuous_record_mode record_mode; + u8 pad[CONT_REC_COMMAND_SIZE - + sizeof(struct iwl_continuous_record_mode)]; +} __packed; + +#endif /* __iwl_fw_api_debug_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h new file mode 100644 index 000000000000..befc3b126041 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h @@ -0,0 +1,183 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_filter_h__ +#define __iwl_fw_api_filter_h__ + +#include "fw/api/mac.h" + +#define MAX_PORT_ID_NUM 2 +#define MAX_MCAST_FILTERING_ADDRESSES 256 + +/** + * struct iwl_mcast_filter_cmd - configure multicast filter. + * @filter_own: Set 1 to filter out multicast packets sent by station itself + * @port_id: Multicast MAC addresses array specifier. This is a strange way + * to identify network interface adopted in host-device IF. + * It is used by FW as index in array of addresses. This array has + * MAX_PORT_ID_NUM members. + * @count: Number of MAC addresses in the array + * @pass_all: Set 1 to pass all multicast packets. + * @bssid: current association BSSID. + * @reserved: reserved + * @addr_list: Place holder for array of MAC addresses. + * IMPORTANT: add padding if necessary to ensure DWORD alignment. + */ +struct iwl_mcast_filter_cmd { + u8 filter_own; + u8 port_id; + u8 count; + u8 pass_all; + u8 bssid[6]; + u8 reserved[2]; + u8 addr_list[0]; +} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ + +#define MAX_BCAST_FILTERS 8 +#define MAX_BCAST_FILTER_ATTRS 2 + +/** + * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet + * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. + * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. + * start of ip payload). + */ +enum iwl_mvm_bcast_filter_attr_offset { + BCAST_FILTER_OFFSET_PAYLOAD_START = 0, + BCAST_FILTER_OFFSET_IP_END = 1, +}; + +/** + * struct iwl_fw_bcast_filter_attr - broadcast filter attribute + * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. + * @offset: starting offset of this pattern. + * @reserved1: reserved + * @val: value to match - big endian (MSB is the first + * byte to match from offset pos). + * @mask: mask to match (big endian). + */ +struct iwl_fw_bcast_filter_attr { + u8 offset_type; + u8 offset; + __le16 reserved1; + __be32 val; + __be32 mask; +} __packed; /* BCAST_FILTER_ATT_S_VER_1 */ + +/** + * enum iwl_mvm_bcast_filter_frame_type - filter frame type + * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. + * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames + */ +enum iwl_mvm_bcast_filter_frame_type { + BCAST_FILTER_FRAME_TYPE_ALL = 0, + BCAST_FILTER_FRAME_TYPE_IPV4 = 1, +}; + +/** + * struct iwl_fw_bcast_filter - broadcast filter + * @discard: discard frame (1) or let it pass (0). + * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. + * @reserved1: reserved + * @num_attrs: number of valid attributes in this filter. + * @attrs: attributes of this filter. a filter is considered matched + * only when all its attributes are matched (i.e. AND relationship) + */ +struct iwl_fw_bcast_filter { + u8 discard; + u8 frame_type; + u8 num_attrs; + u8 reserved1; + struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; +} __packed; /* BCAST_FILTER_S_VER_1 */ + +/** + * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. + * @default_discard: default action for this mac (discard (1) / pass (0)). + * @reserved1: reserved + * @attached_filters: bitmap of relevant filters for this mac. + */ +struct iwl_fw_bcast_mac { + u8 default_discard; + u8 reserved1; + __le16 attached_filters; +} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ + +/** + * struct iwl_bcast_filter_cmd - broadcast filtering configuration + * @disable: enable (0) / disable (1) + * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) + * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) + * @reserved1: reserved + * @filters: broadcast filters + * @macs: broadcast filtering configuration per-mac + */ +struct iwl_bcast_filter_cmd { + u8 disable; + u8 max_bcast_filters; + u8 max_macs; + u8 reserved1; + struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; + struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; +} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ + +#endif /* __iwl_fw_api_filter_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h new file mode 100644 index 000000000000..39c89e85fd2f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -0,0 +1,152 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_mac_cfg_h__ +#define __iwl_fw_api_mac_cfg_h__ + +/** + * enum iwl_mac_conf_subcmd_ids - mac configuration command IDs + */ +enum iwl_mac_conf_subcmd_ids { + /** + * @LINK_QUALITY_MEASUREMENT_CMD: &struct iwl_link_qual_msrmnt_cmd + */ + LINK_QUALITY_MEASUREMENT_CMD = 0x1, + + /** + * @LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF: + * &struct iwl_link_qual_msrmnt_notif + */ + LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE, + + /** + * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif + */ + CHANNEL_SWITCH_NOA_NOTIF = 0xFF, +}; + +#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16 + +enum iwl_lqm_cmd_operatrions { + LQM_CMD_OPERATION_START_MEASUREMENT = 0x01, + LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02, +}; + +enum iwl_lqm_status { + LQM_STATUS_SUCCESS = 0, + LQM_STATUS_TIMEOUT = 1, + LQM_STATUS_ABORT = 2, +}; + +/** + * struct iwl_link_qual_msrmnt_cmd - Link Quality Measurement command + * @cmd_operation: command operation to be performed (start or stop) + * as defined above. + * @mac_id: MAC ID the measurement applies to. + * @measurement_time: time of the total measurement to be performed, in uSec. + * @timeout: maximum time allowed until a response is sent, in uSec. + */ +struct iwl_link_qual_msrmnt_cmd { + __le32 cmd_operation; + __le32 mac_id; + __le32 measurement_time; + __le32 timeout; +} __packed /* LQM_CMD_API_S_VER_1 */; + +/** + * struct iwl_link_qual_msrmnt_notif - Link Quality Measurement notification + * + * @frequent_stations_air_time: an array containing the total air time + * (in uSec) used by the most frequently transmitting stations. + * @number_of_stations: the number of uniqe stations included in the array + * (a number between 0 to 16) + * @total_air_time_other_stations: the total air time (uSec) used by all the + * stations which are not included in the above report. + * @time_in_measurement_window: the total time in uSec in which a measurement + * took place. + * @tx_frame_dropped: the number of TX frames dropped due to retry limit during + * measurement + * @mac_id: MAC ID the measurement applies to. + * @status: return status. may be one of the LQM_STATUS_* defined above. + * @reserved: reserved. + */ +struct iwl_link_qual_msrmnt_notif { + __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT]; + __le32 number_of_stations; + __le32 total_air_time_other_stations; + __le32 time_in_measurement_window; + __le32 tx_frame_dropped; + __le32 mac_id; + __le32 status; + u8 reserved[12]; +} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */ + +/** + * struct iwl_channel_switch_noa_notif - Channel switch NOA notification + * + * @id_and_color: ID and color of the MAC + */ +struct iwl_channel_switch_noa_notif { + __le32 id_and_color; +} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_mac_cfg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 0c3350ad2f2f..f2e31e040a7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -16,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -31,6 +27,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -60,8 +57,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_mac_h__ -#define __fw_api_mac_h__ +#ifndef __iwl_fw_api_mac_h__ +#define __iwl_fw_api_mac_h__ /* * The first MAC indices (starting from 0) are available to the driver, @@ -76,8 +73,6 @@ #define IWL_MVM_STATION_COUNT 16 #define IWL_MVM_INVALID_STA 0xFF -#define IWL_MVM_TDLS_STA_COUNT 4 - enum iwl_ac { AC_BK, AC_BE, @@ -393,4 +388,22 @@ struct iwl_nonqos_seq_query_cmd { __le16 reserved; } __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */ -#endif /* __fw_api_mac_h__ */ +/** + * struct iwl_missed_beacons_notif - information on missed beacons + * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) + * @mac_id: interface ID + * @consec_missed_beacons_since_last_rx: number of consecutive missed + * beacons since last RX. + * @consec_missed_beacons: number of consecutive missed beacons + * @num_expected_beacons: number of expected beacons + * @num_recvd_beacons: number of received beacons + */ +struct iwl_missed_beacons_notif { + __le32 mac_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ + +#endif /* __iwl_fw_api_mac_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h new file mode 100644 index 000000000000..00bc7a25dece --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -0,0 +1,386 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_nvm_reg_h__ +#define __iwl_fw_api_nvm_reg_h__ + +/** + * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands + */ +enum iwl_regulatory_and_nvm_subcmd_ids { + /** + * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd + */ + NVM_ACCESS_COMPLETE = 0x0, + + /** + * @NVM_GET_INFO: + * Command is &struct iwl_nvm_get_info, + * response is &struct iwl_nvm_get_info_rsp + */ + NVM_GET_INFO = 0x2, +}; + +/** + * enum iwl_nvm_access_op - NVM access opcode + * @IWL_NVM_READ: read NVM + * @IWL_NVM_WRITE: write NVM + */ +enum iwl_nvm_access_op { + IWL_NVM_READ = 0, + IWL_NVM_WRITE = 1, +}; + +/** + * enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD + * @NVM_ACCESS_TARGET_CACHE: access the cache + * @NVM_ACCESS_TARGET_OTP: access the OTP + * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM + */ +enum iwl_nvm_access_target { + NVM_ACCESS_TARGET_CACHE = 0, + NVM_ACCESS_TARGET_OTP = 1, + NVM_ACCESS_TARGET_EEPROM = 2, +}; + +/** + * enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD + * @NVM_SECTION_TYPE_SW: software section + * @NVM_SECTION_TYPE_REGULATORY: regulatory section + * @NVM_SECTION_TYPE_CALIBRATION: calibration section + * @NVM_SECTION_TYPE_PRODUCTION: production section + * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section + * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section + * @NVM_MAX_NUM_SECTIONS: number of sections + */ +enum iwl_nvm_section_type { + NVM_SECTION_TYPE_SW = 1, + NVM_SECTION_TYPE_REGULATORY = 3, + NVM_SECTION_TYPE_CALIBRATION = 4, + NVM_SECTION_TYPE_PRODUCTION = 5, + NVM_SECTION_TYPE_MAC_OVERRIDE = 11, + NVM_SECTION_TYPE_PHY_SKU = 12, + NVM_MAX_NUM_SECTIONS = 13, +}; + +/** + * struct iwl_nvm_access_cmd - Request the device to send an NVM section + * @op_code: &enum iwl_nvm_access_op + * @target: &enum iwl_nvm_access_target + * @type: &enum iwl_nvm_section_type + * @offset: offset in bytes into the section + * @length: in bytes, to read/write + * @data: if write operation, the data to write. On read its empty + */ +struct iwl_nvm_access_cmd { + u8 op_code; + u8 target; + __le16 type; + __le16 offset; + __le16 length; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ + +/** + * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD + * @offset: offset in bytes into the section + * @length: in bytes, either how much was written or read + * @type: NVM_SECTION_TYPE_* + * @status: 0 for success, fail otherwise + * @data: if read operation, the data returned. Empty on write. + */ +struct iwl_nvm_access_resp { + __le16 offset; + __le16 length; + __le16 type; + __le16 status; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */ + +/* + * struct iwl_nvm_get_info - request to get NVM data + */ +struct iwl_nvm_get_info { + __le32 reserved; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */ + +/** + * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp + * @NVM_GENERAL_FLAGS_EMPTY_OTP: 1 if OTP is empty + */ +enum iwl_nvm_info_general_flags { + NVM_GENERAL_FLAGS_EMPTY_OTP = BIT(0), +}; + +/** + * struct iwl_nvm_get_info_general - general NVM data + * @flags: bit 0: 1 - empty, 0 - non-empty + * @nvm_version: nvm version + * @board_type: board type + * @reserved: reserved + */ +struct iwl_nvm_get_info_general { + __le32 flags; + __le16 nvm_version; + u8 board_type; + u8 reserved; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */ + +/** + * struct iwl_nvm_get_info_sku - mac information + * @enable_24g: band 2.4G enabled + * @enable_5g: band 5G enabled + * @enable_11n: 11n enabled + * @enable_11ac: 11ac enabled + * @mimo_disable: MIMO enabled + * @ext_crypto: Extended crypto enabled + */ +struct iwl_nvm_get_info_sku { + __le32 enable_24g; + __le32 enable_5g; + __le32 enable_11n; + __le32 enable_11ac; + __le32 mimo_disable; + __le32 ext_crypto; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */ + +/** + * struct iwl_nvm_get_info_phy - phy information + * @tx_chains: BIT 0 chain A, BIT 1 chain B + * @rx_chains: BIT 0 chain A, BIT 1 chain B + */ +struct iwl_nvm_get_info_phy { + __le32 tx_chains; + __le32 rx_chains; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ + +#define IWL_NUM_CHANNELS (51) + +/** + * struct iwl_nvm_get_info_regulatory - regulatory information + * @lar_enabled: is LAR enabled + * @channel_profile: regulatory data of this channel + * @reserved: reserved + */ +struct iwl_nvm_get_info_regulatory { + __le32 lar_enabled; + __le16 channel_profile[IWL_NUM_CHANNELS]; + __le16 reserved; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ + +/** + * struct iwl_nvm_get_info_rsp - response to get NVM data + * @general: general NVM data + * @mac_sku: data relating to MAC sku + * @phy_sku: data relating to PHY sku + * @regulatory: regulatory data + */ +struct iwl_nvm_get_info_rsp { + struct iwl_nvm_get_info_general general; + struct iwl_nvm_get_info_sku mac_sku; + struct iwl_nvm_get_info_phy phy_sku; + struct iwl_nvm_get_info_regulatory regulatory; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */ + +/** + * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed + * @reserved: reserved + */ +struct iwl_nvm_access_complete_cmd { + __le32 reserved; +} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ + +/** + * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: the source from where we got the MCC, see iwl_mcc_source + * @reserved: reserved for alignment + */ +struct iwl_mcc_update_cmd_v1 { + __le16 mcc; + u8 source_id; + u8 reserved; +} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */ + +/** + * struct iwl_mcc_update_cmd - Request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: the source from where we got the MCC, see iwl_mcc_source + * @reserved: reserved for alignment + * @key: integrity key for MCC API OEM testing + * @reserved2: reserved + */ +struct iwl_mcc_update_cmd { + __le16 mcc; + u8 source_id; + u8 reserved; + __le32 key; + u8 reserved2[20]; +} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ + +/** + * struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD. + * Contains the new channel control profile map, if changed, and the new MCC + * (mobile country code). + * The new MCC may be different than what was requested in MCC_UPDATE_CMD. + * @status: see &enum iwl_mcc_update_status + * @mcc: the new applied MCC + * @cap: capabilities for all channels which matches the MCC + * @source_id: the MCC source, see iwl_mcc_source + * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 + * channels, depending on platform) + * @channels: channel control data map, DWORD for each channel. Only the first + * 16bits are used. + */ +struct iwl_mcc_update_resp_v1 { + __le32 status; + __le16 mcc; + u8 cap; + u8 source_id; + __le32 n_channels; + __le32 channels[0]; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */ + +/** + * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. + * Contains the new channel control profile map, if changed, and the new MCC + * (mobile country code). + * The new MCC may be different than what was requested in MCC_UPDATE_CMD. + * @status: see &enum iwl_mcc_update_status + * @mcc: the new applied MCC + * @cap: capabilities for all channels which matches the MCC + * @source_id: the MCC source, see iwl_mcc_source + * @time: time elapsed from the MCC test start (in 30 seconds TU) + * @reserved: reserved. + * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 + * channels, depending on platform) + * @channels: channel control data map, DWORD for each channel. Only the first + * 16bits are used. + */ +struct iwl_mcc_update_resp { + __le32 status; + __le16 mcc; + u8 cap; + u8 source_id; + __le16 time; + __le16 reserved; + __le32 n_channels; + __le32 channels[0]; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */ + +/** + * struct iwl_mcc_chub_notif - chub notifies of mcc change + * (MCC_CHUB_UPDATE_CMD = 0xc9) + * The Chub (Communication Hub, CommsHUB) is a HW component that connects to + * the cellular and connectivity cores that gets updates of the mcc, and + * notifies the ucode directly of any mcc change. + * The ucode requests the driver to request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: identity of the change originator, see iwl_mcc_source + * @reserved1: reserved for alignment + */ +struct iwl_mcc_chub_notif { + __le16 mcc; + u8 source_id; + u8 reserved1; +} __packed; /* LAR_MCC_NOTIFY_S */ + +enum iwl_mcc_update_status { + MCC_RESP_NEW_CHAN_PROFILE, + MCC_RESP_SAME_CHAN_PROFILE, + MCC_RESP_INVALID, + MCC_RESP_NVM_DISABLED, + MCC_RESP_ILLEGAL, + MCC_RESP_LOW_PRIORITY, + MCC_RESP_TEST_MODE_ACTIVE, + MCC_RESP_TEST_MODE_NOT_ACTIVE, + MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE, +}; + +enum iwl_mcc_source { + MCC_SOURCE_OLD_FW = 0, + MCC_SOURCE_ME = 1, + MCC_SOURCE_BIOS = 2, + MCC_SOURCE_3G_LTE_HOST = 3, + MCC_SOURCE_3G_LTE_DEVICE = 4, + MCC_SOURCE_WIFI = 5, + MCC_SOURCE_RESERVED = 6, + MCC_SOURCE_DEFAULT = 7, + MCC_SOURCE_UNINITIALIZED = 8, + MCC_SOURCE_MCC_API = 9, + MCC_SOURCE_GET_CURRENT = 0x10, + MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, +}; + +#endif /* __iwl_fw_api_nvm_reg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h new file mode 100644 index 000000000000..53cab993068f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h @@ -0,0 +1,101 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_offload_h__ +#define __iwl_fw_api_offload_h__ + +/** + * enum iwl_prot_offload_subcmd_ids - protocol offload commands + */ +enum iwl_prot_offload_subcmd_ids { + /** + * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif + */ + STORED_BEACON_NTF = 0xFF, +}; + +#define MAX_STORED_BEACON_SIZE 600 + +/** + * struct iwl_stored_beacon_notif - Stored beacon notification + * + * @system_time: system time on air rise + * @tsf: TSF on air rise + * @beacon_timestamp: beacon on air rise + * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition + * @channel: channel this beacon was received on + * @rates: rate in ucode internal format + * @byte_count: frame's byte count + * @data: beacon data, length in @byte_count + */ +struct iwl_stored_beacon_notif { + __le32 system_time; + __le64 tsf; + __le32 beacon_timestamp; + __le16 band; + __le16 channel; + __le32 rates; + __le32 byte_count; + u8 data[MAX_STORED_BEACON_SIZE]; +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ + +#endif /* __iwl_fw_api_offload_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h new file mode 100644 index 000000000000..e76f9cd4473d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h @@ -0,0 +1,108 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_paging_h__ +#define __iwl_fw_api_paging_h__ + +#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ + +/** + * struct iwl_fw_paging_cmd - paging layout + * + * Send to FW the paging layout in the driver. + * + * @flags: various flags for the command + * @block_size: the block size in powers of 2 + * @block_num: number of blocks specified in the command. + * @device_phy_addr: virtual addresses from device side + */ +struct iwl_fw_paging_cmd { + __le32 flags; + __le32 block_size; + __le32 block_num; + __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; +} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ + +/** + * enum iwl_fw_item_id - FW item IDs + * + * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload + * download + */ +enum iwl_fw_item_id { + IWL_FW_ITEM_ID_PAGING = 3, +}; + +/** + * struct iwl_fw_get_item_cmd - get an item from the fw + * @item_id: ID of item to obtain, see &enum iwl_fw_item_id + */ +struct iwl_fw_get_item_cmd { + __le32 item_id; +} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ + +struct iwl_fw_get_item_resp { + __le32 item_id; + __le32 item_byte_cnt; + __le32 item_val; +} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ + +#endif /* __iwl_fw_api_paging_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h new file mode 100644 index 000000000000..45f61c6af14e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -0,0 +1,164 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_phy_ctxt_h__ +#define __iwl_fw_api_phy_ctxt_h__ + +/* Supported bands */ +#define PHY_BAND_5 (0) +#define PHY_BAND_24 (1) + +/* Supported channel width, vary if there is VHT support */ +#define PHY_VHT_CHANNEL_MODE20 (0x0) +#define PHY_VHT_CHANNEL_MODE40 (0x1) +#define PHY_VHT_CHANNEL_MODE80 (0x2) +#define PHY_VHT_CHANNEL_MODE160 (0x3) + +/* + * Control channel position: + * For legacy set bit means upper channel, otherwise lower. + * For VHT - bit-2 marks if the control is lower/upper relative to center-freq + * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. + * center_freq + * | + * 40Mhz |_______|_______| + * 80Mhz |_______|_______|_______|_______| + * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| + * code 011 010 001 000 | 100 101 110 111 + */ +#define PHY_VHT_CTRL_POS_1_BELOW (0x0) +#define PHY_VHT_CTRL_POS_2_BELOW (0x1) +#define PHY_VHT_CTRL_POS_3_BELOW (0x2) +#define PHY_VHT_CTRL_POS_4_BELOW (0x3) +#define PHY_VHT_CTRL_POS_1_ABOVE (0x4) +#define PHY_VHT_CTRL_POS_2_ABOVE (0x5) +#define PHY_VHT_CTRL_POS_3_ABOVE (0x6) +#define PHY_VHT_CTRL_POS_4_ABOVE (0x7) + +/* + * @band: PHY_BAND_* + * @channel: channel number + * @width: PHY_[VHT|LEGACY]_CHANNEL_* + * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* + */ +struct iwl_fw_channel_info { + u8 band; + u8 channel; + u8 width; + u8 ctrl_pos; +} __packed; + +#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) +#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ + (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) +#define PHY_RX_CHAIN_VALID_POS (1) +#define PHY_RX_CHAIN_VALID_MSK \ + (0x7 << PHY_RX_CHAIN_VALID_POS) +#define PHY_RX_CHAIN_FORCE_SEL_POS (4) +#define PHY_RX_CHAIN_FORCE_SEL_MSK \ + (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) +#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) +#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ + (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) +#define PHY_RX_CHAIN_CNT_POS (10) +#define PHY_RX_CHAIN_CNT_MSK \ + (0x3 << PHY_RX_CHAIN_CNT_POS) +#define PHY_RX_CHAIN_MIMO_CNT_POS (12) +#define PHY_RX_CHAIN_MIMO_CNT_MSK \ + (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) +#define PHY_RX_CHAIN_MIMO_FORCE_POS (14) +#define PHY_RX_CHAIN_MIMO_FORCE_MSK \ + (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) + +/* TODO: fix the value, make it depend on firmware at runtime? */ +#define NUM_PHY_CTX 3 + +/* TODO: complete missing documentation */ +/** + * struct iwl_phy_context_cmd - config of the PHY context + * ( PHY_CONTEXT_CMD = 0x8 ) + * @id_and_color: ID and color of the relevant Binding + * @action: action to perform, one of FW_CTXT_ACTION_* + * @apply_time: 0 means immediate apply and context switch. + * other value means apply new params after X usecs + * @tx_param_color: ??? + * @ci: channel info + * @txchain_info: ??? + * @rxchain_info: ??? + * @acquisition_data: ??? + * @dsp_cfg_flags: set to 0 + */ +struct iwl_phy_context_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* PHY_CONTEXT_DATA_API_S_VER_1 */ + __le32 apply_time; + __le32 tx_param_color; + struct iwl_fw_channel_info ci; + __le32 txchain_info; + __le32 rxchain_info; + __le32 acquisition_data; + __le32 dsp_cfg_flags; +} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ + +#endif /* __iwl_fw_api_phy_ctxt_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h new file mode 100644 index 000000000000..9cc59e00bd95 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h @@ -0,0 +1,258 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_phy_h__ +#define __iwl_fw_api_phy_h__ + +/** + * enum iwl_phy_ops_subcmd_ids - PHY group commands + */ +enum iwl_phy_ops_subcmd_ids { + /** + * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE: + * Uses either &struct iwl_dts_measurement_cmd or + * &struct iwl_ext_dts_measurement_cmd + */ + CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, + + /** + * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd + */ + CTDP_CONFIG_CMD = 0x03, + + /** + * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd + */ + TEMP_REPORTING_THRESHOLDS_CMD = 0x04, + + /** + * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd + */ + GEO_TX_POWER_LIMIT = 0x05, + + /** + * @CT_KILL_NOTIFICATION: &struct ct_kill_notif + */ + CT_KILL_NOTIFICATION = 0xFE, + + /** + * @DTS_MEASUREMENT_NOTIF_WIDE: + * &struct iwl_dts_measurement_notif_v1 or + * &struct iwl_dts_measurement_notif_v2 + */ + DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, +}; + +/* DTS measurements */ + +enum iwl_dts_measurement_flags { + DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0), + DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1), +}; + +/** + * struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements + * + * @flags: indicates which measurements we want as specified in + * &enum iwl_dts_measurement_flags + */ +struct iwl_dts_measurement_cmd { + __le32 flags; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ + +/** +* enum iwl_dts_control_measurement_mode - DTS measurement type +* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read +* back (latest value. Not waiting for new value). Use automatic +* SW DTS configuration. +* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings, +* trigger DTS reading and provide read back temperature read +* when available. +* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read +* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result, +* without measurement trigger. +*/ +enum iwl_dts_control_measurement_mode { + DTS_AUTOMATIC = 0, + DTS_REQUEST_READ = 1, + DTS_OVER_WRITE = 2, + DTS_DIRECT_WITHOUT_MEASURE = 3, +}; + +/** +* enum iwl_dts_used - DTS to use or used for measurement in the DTS request +* @DTS_USE_TOP: Top +* @DTS_USE_CHAIN_A: chain A +* @DTS_USE_CHAIN_B: chain B +* @DTS_USE_CHAIN_C: chain C +* @XTAL_TEMPERATURE: read temperature from xtal +*/ +enum iwl_dts_used { + DTS_USE_TOP = 0, + DTS_USE_CHAIN_A = 1, + DTS_USE_CHAIN_B = 2, + DTS_USE_CHAIN_C = 3, + XTAL_TEMPERATURE = 4, +}; + +/** +* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode +* @DTS_BIT6_MODE: bit 6 mode +* @DTS_BIT8_MODE: bit 8 mode +*/ +enum iwl_dts_bit_mode { + DTS_BIT6_MODE = 0, + DTS_BIT8_MODE = 1, +}; + +/** + * struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements + * @control_mode: see &enum iwl_dts_control_measurement_mode + * @temperature: used when over write DTS mode is selected + * @sensor: set temperature sensor to use. See &enum iwl_dts_used + * @avg_factor: average factor to DTS in request DTS read mode + * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode + * @step_duration: step duration for the DTS + */ +struct iwl_ext_dts_measurement_cmd { + __le32 control_mode; + __le32 temperature; + __le32 sensor; + __le32 avg_factor; + __le32 bit_mode; + __le32 step_duration; +} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ + +/** + * struct iwl_dts_measurement_notif_v1 - measurements notification + * + * @temp: the measured temperature + * @voltage: the measured voltage + */ +struct iwl_dts_measurement_notif_v1 { + __le32 temp; + __le32 voltage; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/ + +/** + * struct iwl_dts_measurement_notif_v2 - measurements notification + * + * @temp: the measured temperature + * @voltage: the measured voltage + * @threshold_idx: the trip index that was crossed + */ +struct iwl_dts_measurement_notif_v2 { + __le32 temp; + __le32 voltage; + __le32 threshold_idx; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */ + +/** + * struct ct_kill_notif - CT-kill entry notification + * + * @temperature: the current temperature in celsius + * @reserved: reserved + */ +struct ct_kill_notif { + __le16 temperature; + __le16 reserved; +} __packed; /* GRP_PHY_CT_KILL_NTF */ + +/** +* enum ctdp_cmd_operation - CTDP command operations +* @CTDP_CMD_OPERATION_START: update the current budget +* @CTDP_CMD_OPERATION_STOP: stop ctdp +* @CTDP_CMD_OPERATION_REPORT: get the average budget +*/ +enum iwl_mvm_ctdp_cmd_operation { + CTDP_CMD_OPERATION_START = 0x1, + CTDP_CMD_OPERATION_STOP = 0x2, + CTDP_CMD_OPERATION_REPORT = 0x4, +};/* CTDP_CMD_OPERATION_TYPE_E */ + +/** + * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget + * + * @operation: see &enum iwl_mvm_ctdp_cmd_operation + * @budget: the budget in milliwatt + * @window_size: defined in API but not used + */ +struct iwl_mvm_ctdp_cmd { + __le32 operation; + __le32 budget; + __le32 window_size; +} __packed; + +#define IWL_MAX_DTS_TRIPS 8 + +/** + * struct temp_report_ths_cmd - set temperature thresholds + * + * @num_temps: number of temperature thresholds passed + * @thresholds: array with the thresholds to be configured + */ +struct temp_report_ths_cmd { + __le32 num_temps; + __le16 thresholds[IWL_MAX_DTS_TRIPS]; +} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */ + +#endif /* __iwl_fw_api_phy_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 7da57ef2454e..a06afb5605d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -65,8 +60,8 @@ * *****************************************************************************/ -#ifndef __fw_api_power_h__ -#define __fw_api_power_h__ +#ifndef __iwl_fw_api_power_h__ +#define __iwl_fw_api_power_h__ /* Power Management Commands, Responses, Notifications */ @@ -224,7 +219,7 @@ struct iwl_device_power_cmd { /** * struct iwl_mac_power_cmd - New power command containing uAPSD support * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response) - * @id_and_color: MAC contex identifier, &enum iwl_mvm_id_and_color + * @id_and_color: MAC contex identifier, &enum iwl_ctxt_id_and_color * @flags: Power table command flags from POWER_FLAGS_* * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. * Minimum allowed:- 3 * DTIM. Keep alive period must be @@ -528,4 +523,4 @@ struct iwl_beacon_filter_cmd { #define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT) #define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3) -#endif +#endif /* __iwl_fw_api_power_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index bdf1228d050b..a13fd8a1be62 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -62,10 +57,10 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_rs_h__ -#define __fw_api_rs_h__ +#ifndef __iwl_fw_api_rs_h__ +#define __iwl_fw_api_rs_h__ -#include "fw-api-mac.h" +#include "mac.h" /* * These serve as indexes into @@ -410,4 +405,4 @@ struct iwl_lq_cmd { __le32 ss_params; }; /* LINK_QUALITY_CMD_API_S_VER_1 */ -#endif /* __fw_api_rs_h__ */ +#endif /* __iwl_fw_api_rs_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 59038ade08d8..e7565f37ece9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -65,8 +60,8 @@ * *****************************************************************************/ -#ifndef __fw_api_rx_h__ -#define __fw_api_rx_h__ +#ifndef __iwl_fw_api_rx_h__ +#define __iwl_fw_api_rx_h__ /* API for pre-9000 hardware */ @@ -571,4 +566,24 @@ struct iwl_mvm_pm_state_notification { __le16 reserved; } __packed; /* PEER_PM_NTFY_API_S_VER_1 */ -#endif /* __fw_api_rx_h__ */ +#define BA_WINDOW_STREAMS_MAX 16 +#define BA_WINDOW_STATUS_TID_MSK 0x000F +#define BA_WINDOW_STATUS_STA_ID_POS 4 +#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0 +#define BA_WINDOW_STATUS_VALID_MSK BIT(9) + +/** + * struct iwl_ba_window_status_notif - reordering window's status notification + * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63] + * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid + * @start_seq_num: the start sequence number of the bitmap + * @mpdu_rx_count: the number of received MPDUs since entering D0i3 + */ +struct iwl_ba_window_status_notif { + __le64 bitmap[BA_WINDOW_STREAMS_MAX]; + __le16 ra_tid[BA_WINDOW_STREAMS_MAX]; + __le32 start_seq_num[BA_WINDOW_STREAMS_MAX]; + __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; +} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 1cd7cc087936..5a40092febfb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -65,8 +60,8 @@ * *****************************************************************************/ -#ifndef __fw_api_scan_h__ -#define __fw_api_scan_h__ +#ifndef __iwl_fw_api_scan_h__ +#define __iwl_fw_api_scan_h__ /* Scan Commands, Responses, Notifications */ @@ -789,4 +784,4 @@ struct iwl_umac_scan_iter_complete_notif { struct iwl_scan_results_notif results[]; } __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */ -#endif +#endif /* __iwl_fw_api_scan_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h new file mode 100644 index 000000000000..e517b55f1bc6 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h @@ -0,0 +1,138 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_sf_h__ +#define __iwl_fw_api_sf_h__ + +/* Smart Fifo state */ +enum iwl_sf_state { + SF_LONG_DELAY_ON = 0, /* should never be called by driver */ + SF_FULL_ON, + SF_UNINIT, + SF_INIT_OFF, + SF_HW_NUM_STATES +}; + +/* Smart Fifo possible scenario */ +enum iwl_sf_scenario { + SF_SCENARIO_SINGLE_UNICAST, + SF_SCENARIO_AGG_UNICAST, + SF_SCENARIO_MULTICAST, + SF_SCENARIO_BA_RESP, + SF_SCENARIO_TX_RESP, + SF_NUM_SCENARIO +}; + +#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */ +#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ + +/* smart FIFO default values */ +#define SF_W_MARK_SISO 6144 +#define SF_W_MARK_MIMO2 8192 +#define SF_W_MARK_MIMO3 6144 +#define SF_W_MARK_LEGACY 4096 +#define SF_W_MARK_SCAN 4096 + +/* SF Scenarios timers for default configuration (aligned to 32 uSec) */ +#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */ +#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */ + +/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */ +#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */ +#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */ +#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */ +#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */ +#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */ +#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */ +#define SF_BA_IDLE_TIMER 320 /* 300 uSec */ +#define SF_BA_AGING_TIMER 2016 /* 2 mSec */ +#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */ +#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */ + +#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */ + +#define SF_CFG_DUMMY_NOTIF_OFF BIT(16) + +/** + * struct iwl_sf_cfg_cmd - Smart Fifo configuration command. + * @state: smart fifo state, types listed in &enum iwl_sf_state. + * @watermark: Minimum allowed available free space in RXF for transient state. + * @long_delay_timeouts: aging and idle timer values for each scenario + * in long delay state. + * @full_on_timeouts: timer values for each scenario in full on state. + */ +struct iwl_sf_cfg_cmd { + __le32 state; + __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; + __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; + __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; +} __packed; /* SF_CFG_API_S_VER_2 */ + +#endif /* __iwl_fw_api_sf_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h index 81f0a3463bac..af369eba3795 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -64,8 +59,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_sta_h__ -#define __fw_api_sta_h__ +#ifndef __iwl_fw_api_sta_h__ +#define __iwl_fw_api_sta_h__ /** * enum iwl_sta_flags - flags for the ADD_STA host command @@ -291,7 +286,7 @@ struct iwl_mvm_keyinfo { * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. * @mac_id_n_color: the Mac context this station belongs to, - * see &enum iwl_mvm_id_and_color + * see &enum iwl_ctxt_id_and_color * @addr: station's MAC address * @reserved2: reserved * @sta_id: index of station in uCode's station table @@ -372,7 +367,7 @@ enum iwl_sta_type { * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. * @mac_id_n_color: the Mac context this station belongs to, - * see &enum iwl_mvm_id_and_color + * see &enum iwl_ctxt_id_and_color * @addr: station's MAC address * @reserved2: reserved * @sta_id: index of station in uCode's station table @@ -575,4 +570,4 @@ struct iwl_mvm_eosp_notification { __le32 sta_id; } __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */ -#endif /* __fw_api_sta_h__ */ +#endif /* __iwl_fw_api_sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h index c7531da508fd..53cb622aa9ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -18,11 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -64,9 +59,9 @@ * *****************************************************************************/ -#ifndef __fw_api_stats_h__ -#define __fw_api_stats_h__ -#include "fw-api-mac.h" +#ifndef __iwl_fw_api_stats_h__ +#define __iwl_fw_api_stats_h__ +#include "mac.h" struct mvm_statistics_dbg { __le32 burst_check; @@ -476,4 +471,4 @@ struct iwl_statistics_cmd { __le32 flags; } __packed; /* STATISTICS_CMD_API_S_VER_1 */ -#endif /* __fw_api_stats_h__ */ +#endif /* __iwl_fw_api_stats_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h new file mode 100644 index 000000000000..7c6c2462d0e8 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h @@ -0,0 +1,208 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_tdls_h__ +#define __iwl_fw_api_tdls_h__ + +#include "fw/api/tx.h" +#include "fw/api/phy-ctxt.h" + +#define IWL_MVM_TDLS_STA_COUNT 4 + +/* Type of TDLS request */ +enum iwl_tdls_channel_switch_type { + TDLS_SEND_CHAN_SW_REQ = 0, + TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH, + TDLS_MOVE_CH, +}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */ + +/** + * struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch + * @frame_timestamp: GP2 timestamp of channel-switch request/response packet + * received from peer + * @max_offchan_duration: What amount of microseconds out of a DTIM is given + * to the TDLS off-channel communication. For instance if the DTIM is + * 200TU and the TDLS peer is to be given 25% of the time, the value + * given will be 50TU, or 50 * 1024 if translated into microseconds. + * @switch_time: switch time the peer sent in its channel switch timing IE + * @switch_timeout: switch timeout the peer sent in its channel switch timing IE + */ +struct iwl_tdls_channel_switch_timing { + __le32 frame_timestamp; /* GP2 time of peer packet Rx */ + __le32 max_offchan_duration; /* given in micro-seconds */ + __le32 switch_time; /* given in micro-seconds */ + __le32 switch_timeout; /* given in micro-seconds */ +} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */ + +#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200 + +/** + * struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template + * + * A template representing a TDLS channel-switch request or response frame + * + * @switch_time_offset: offset to the channel switch timing IE in the template + * @tx_cmd: Tx parameters for the frame + * @data: frame data + */ +struct iwl_tdls_channel_switch_frame { + __le32 switch_time_offset; + struct iwl_tx_cmd tx_cmd; + u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ + +/** + * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command + * + * The command is sent to initiate a channel switch and also in response to + * incoming TDLS channel-switch request/response packets from remote peers. + * + * @switch_type: see &enum iwl_tdls_channel_switch_type + * @peer_sta_id: station id of TDLS peer + * @ci: channel we switch to + * @timing: timing related data for command + * @frame: channel-switch request/response template, depending to switch_type + */ +struct iwl_tdls_channel_switch_cmd { + u8 switch_type; + __le32 peer_sta_id; + struct iwl_fw_channel_info ci; + struct iwl_tdls_channel_switch_timing timing; + struct iwl_tdls_channel_switch_frame frame; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ + +/** + * struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification + * + * @status: non-zero on success + * @offchannel_duration: duration given in microseconds + * @sta_id: peer currently performing the channel-switch with + */ +struct iwl_tdls_channel_switch_notif { + __le32 status; + __le32 offchannel_duration; + __le32 sta_id; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */ + +/** + * struct iwl_tdls_sta_info - TDLS station info + * + * @sta_id: station id of the TDLS peer + * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx + * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer + * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise + */ +struct iwl_tdls_sta_info { + u8 sta_id; + u8 tx_to_peer_tid; + __le16 tx_to_peer_ssn; + __le32 is_initiator; +} __packed; /* TDLS_STA_INFO_VER_1 */ + +/** + * struct iwl_tdls_config_cmd - TDLS basic config command + * + * @id_and_color: MAC id and color being configured + * @tdls_peer_count: amount of currently connected TDLS peers + * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx + * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP + * @sta_info: per-station info. Only the first tdls_peer_count entries are set + * @pti_req_data_offset: offset of network-level data for the PTI template + * @pti_req_tx_cmd: Tx parameters for PTI request template + * @pti_req_template: PTI request template data + */ +struct iwl_tdls_config_cmd { + __le32 id_and_color; /* mac id and color */ + u8 tdls_peer_count; + u8 tx_to_ap_tid; + __le16 tx_to_ap_ssn; + struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; + + __le32 pti_req_data_offset; + struct iwl_tx_cmd pti_req_tx_cmd; + u8 pti_req_template[0]; +} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ + +/** + * struct iwl_tdls_config_sta_info_res - TDLS per-station config information + * + * @sta_id: station id of the TDLS peer + * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to + * the peer + */ +struct iwl_tdls_config_sta_info_res { + __le16 sta_id; + __le16 tx_to_peer_last_seq; +} __packed; /* TDLS_STA_INFO_RSP_VER_1 */ + +/** + * struct iwl_tdls_config_res - TDLS config information from FW + * + * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP + * @sta_info: per-station TDLS config information + */ +struct iwl_tdls_config_res { + __le32 tx_to_ap_last_seq; + struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; +} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ + +#endif /* __iwl_fw_api_tdls_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h new file mode 100644 index 000000000000..3721a3ed358b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -0,0 +1,386 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_time_event_h__ +#define __iwl_fw_api_time_event_h__ + +#include "fw/api/phy-ctxt.h" + +/* Time Event types, according to MAC type */ +enum iwl_time_event_type { + /* BSS Station Events */ + TE_BSS_STA_AGGRESSIVE_ASSOC, + TE_BSS_STA_ASSOC, + TE_BSS_EAP_DHCP_PROT, + TE_BSS_QUIET_PERIOD, + + /* P2P Device Events */ + TE_P2P_DEVICE_DISCOVERABLE, + TE_P2P_DEVICE_LISTEN, + TE_P2P_DEVICE_ACTION_SCAN, + TE_P2P_DEVICE_FULL_SCAN, + + /* P2P Client Events */ + TE_P2P_CLIENT_AGGRESSIVE_ASSOC, + TE_P2P_CLIENT_ASSOC, + TE_P2P_CLIENT_QUIET_PERIOD, + + /* P2P GO Events */ + TE_P2P_GO_ASSOC_PROT, + TE_P2P_GO_REPETITIVET_NOA, + TE_P2P_GO_CT_WINDOW, + + /* WiDi Sync Events */ + TE_WIDI_TX_SYNC, + + /* Channel Switch NoA */ + TE_CHANNEL_SWITCH_PERIOD, + + TE_MAX +}; /* MAC_EVENT_TYPE_API_E_VER_1 */ + +/* Time event - defines for command API v1 */ + +/* + * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed. + * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only + * the first fragment is scheduled. + * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only + * the first 2 fragments are scheduled. + * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any + * number of fragments are valid. + * + * Other than the constant defined above, specifying a fragmentation value 'x' + * means that the event can be fragmented but only the first 'x' will be + * scheduled. + */ +enum { + TE_V1_FRAG_NONE = 0, + TE_V1_FRAG_SINGLE = 1, + TE_V1_FRAG_DUAL = 2, + TE_V1_FRAG_ENDLESS = 0xffffffff +}; + +/* If a Time Event can be fragmented, this is the max number of fragments */ +#define TE_V1_FRAG_MAX_MSK 0x0fffffff +/* Repeat the time event endlessly (until removed) */ +#define TE_V1_REPEAT_ENDLESS 0xffffffff +/* If a Time Event has bounded repetitions, this is the maximal value */ +#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff + +/* Time Event dependencies: none, on another TE, or in a specific time */ +enum { + TE_V1_INDEPENDENT = 0, + TE_V1_DEP_OTHER = BIT(0), + TE_V1_DEP_TSF = BIT(1), + TE_V1_EVENT_SOCIOPATHIC = BIT(2), +}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ + +/* + * @TE_V1_NOTIF_NONE: no notifications + * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start + * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end + * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use + * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use. + * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start + * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end + * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use. + * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use. + * + * Supported Time event notifications configuration. + * A notification (both event and fragment) includes a status indicating weather + * the FW was able to schedule the event or not. For fragment start/end + * notification the status is always success. There is no start/end fragment + * notification for monolithic events. + */ +enum { + TE_V1_NOTIF_NONE = 0, + TE_V1_NOTIF_HOST_EVENT_START = BIT(0), + TE_V1_NOTIF_HOST_EVENT_END = BIT(1), + TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2), + TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3), + TE_V1_NOTIF_HOST_FRAG_START = BIT(4), + TE_V1_NOTIF_HOST_FRAG_END = BIT(5), + TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6), + TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), +}; /* MAC_EVENT_ACTION_API_E_VER_2 */ + +/* Time event - defines for command API */ + +/* + * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. + * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only + * the first fragment is scheduled. + * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only + * the first 2 fragments are scheduled. + * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any + * number of fragments are valid. + * + * Other than the constant defined above, specifying a fragmentation value 'x' + * means that the event can be fragmented but only the first 'x' will be + * scheduled. + */ +enum { + TE_V2_FRAG_NONE = 0, + TE_V2_FRAG_SINGLE = 1, + TE_V2_FRAG_DUAL = 2, + TE_V2_FRAG_MAX = 0xfe, + TE_V2_FRAG_ENDLESS = 0xff +}; + +/* Repeat the time event endlessly (until removed) */ +#define TE_V2_REPEAT_ENDLESS 0xff +/* If a Time Event has bounded repetitions, this is the maximal value */ +#define TE_V2_REPEAT_MAX 0xfe + +#define TE_V2_PLACEMENT_POS 12 +#define TE_V2_ABSENCE_POS 15 + +/** + * enum iwl_time_event_policy - Time event policy values + * A notification (both event and fragment) includes a status indicating weather + * the FW was able to schedule the event or not. For fragment start/end + * notification the status is always success. There is no start/end fragment + * notification for monolithic events. + * + * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable + * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start + * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end + * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use + * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use. + * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start + * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end + * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. + * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. + * @T2_V2_START_IMMEDIATELY: start time event immediately + * @TE_V2_DEP_OTHER: depends on another time event + * @TE_V2_DEP_TSF: depends on a specific time + * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC + * @TE_V2_ABSENCE: are we present or absent during the Time Event. + */ +enum iwl_time_event_policy { + TE_V2_DEFAULT_POLICY = 0x0, + + /* notifications (event start/stop, fragment start/stop) */ + TE_V2_NOTIF_HOST_EVENT_START = BIT(0), + TE_V2_NOTIF_HOST_EVENT_END = BIT(1), + TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2), + TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3), + + TE_V2_NOTIF_HOST_FRAG_START = BIT(4), + TE_V2_NOTIF_HOST_FRAG_END = BIT(5), + TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), + TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), + T2_V2_START_IMMEDIATELY = BIT(11), + + /* placement characteristics */ + TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), + TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1), + TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2), + + /* are we present or absent during the Time Event. */ + TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS), +}; + +/** + * struct iwl_time_event_cmd - configuring Time Events + * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also + * with version 1. determined by IWL_UCODE_TLV_FLAGS) + * ( TIME_EVENT_CMD = 0x29 ) + * @id_and_color: ID and color of the relevant MAC, + * &enum iwl_ctxt_id_and_color + * @action: action to perform, one of &enum iwl_ctxt_action + * @id: this field has two meanings, depending on the action: + * If the action is ADD, then it means the type of event to add. + * For all other actions it is the unique event ID assigned when the + * event was added by the FW. + * @apply_time: When to start the Time Event (in GP2) + * @max_delay: maximum delay to event's start (apply time), in TU + * @depends_on: the unique ID of the event we depend on (if any) + * @interval: interval between repetitions, in TU + * @duration: duration of event in TU + * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS + * @max_frags: maximal number of fragments the Time Event can be divided to + * @policy: defines whether uCode shall notify the host or other uCode modules + * on event and/or fragment start and/or end + * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF + * TE_EVENT_SOCIOPATHIC + * using TE_ABSENCE and using TE_NOTIF_*, + * &enum iwl_time_event_policy + */ +struct iwl_time_event_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + __le32 id; + /* MAC_TIME_EVENT_DATA_API_S_VER_2 */ + __le32 apply_time; + __le32 max_delay; + __le32 depends_on; + __le32 interval; + __le32 duration; + u8 repeat; + u8 max_frags; + __le16 policy; +} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */ + +/** + * struct iwl_time_event_resp - response structure to iwl_time_event_cmd + * @status: bit 0 indicates success, all others specify errors + * @id: the Time Event type + * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE + * @id_and_color: ID and color of the relevant MAC, + * &enum iwl_ctxt_id_and_color + */ +struct iwl_time_event_resp { + __le32 status; + __le32 id; + __le32 unique_id; + __le32 id_and_color; +} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */ + +/** + * struct iwl_time_event_notif - notifications of time event start/stop + * ( TIME_EVENT_NOTIFICATION = 0x2a ) + * @timestamp: action timestamp in GP2 + * @session_id: session's unique id + * @unique_id: unique id of the Time Event itself + * @id_and_color: ID and color of the relevant MAC + * @action: &enum iwl_time_event_policy + * @status: true if scheduled, false otherwise (not executed) + */ +struct iwl_time_event_notif { + __le32 timestamp; + __le32 session_id; + __le32 unique_id; + __le32 id_and_color; + __le32 action; + __le32 status; +} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ + +/* + * Aux ROC command + * + * Command requests the firmware to create a time event for a certain duration + * and remain on the given channel. This is done by using the Aux framework in + * the FW. + * The command was first used for Hot Spot issues - but can be used regardless + * to Hot Spot. + * + * ( HOT_SPOT_CMD 0x53 ) + * + * @id_and_color: ID and color of the MAC + * @action: action to perform, one of FW_CTXT_ACTION_* + * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the + * event_unique_id should be the id of the time event assigned by ucode. + * Otherwise ignore the event_unique_id. + * @sta_id_and_color: station id and color, resumed during "Remain On Channel" + * activity. + * @channel_info: channel info + * @node_addr: Our MAC Address + * @reserved: reserved for alignment + * @apply_time: GP2 value to start (should always be the current GP2 value) + * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max + * time by which start of the event is allowed to be postponed. + * @duration: event duration in TU To calculate event duration: + * timeEventDuration = min(duration, remainingQuota) + */ +struct iwl_hs20_roc_req { + /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ + __le32 id_and_color; + __le32 action; + __le32 event_unique_id; + __le32 sta_id_and_color; + struct iwl_fw_channel_info channel_info; + u8 node_addr[ETH_ALEN]; + __le16 reserved; + __le32 apply_time; + __le32 apply_time_max_delay; + __le32 duration; +} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ + +/* + * values for AUX ROC result values + */ +enum iwl_mvm_hot_spot { + HOT_SPOT_RSP_STATUS_OK, + HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS, + HOT_SPOT_MAX_NUM_OF_SESSIONS, +}; + +/* + * Aux ROC command response + * + * In response to iwl_hs20_roc_req the FW sends this command to notify the + * driver the uid of the timevent. + * + * ( HOT_SPOT_CMD 0x53 ) + * + * @event_unique_id: Unique ID of time event assigned by ucode + * @status: Return status 0 is success, all the rest used for specific errors + */ +struct iwl_hs20_roc_res { + __le32 event_unique_id; + __le32 status; +} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ + +#endif /* __iwl_fw_api_time_event_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h index 8658a983c463..7328a1606146 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h @@ -16,11 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -60,8 +55,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ -#ifndef __fw_api_tof_h__ -#define __fw_api_tof_h__ +#ifndef __iwl_fw_api_tof_h__ +#define __iwl_fw_api_tof_h__ /* ToF sub-group command IDs */ enum iwl_mvm_tof_sub_grp_ids { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 97d7eed32622..95dbed609f3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -17,11 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -62,8 +57,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#ifndef __fw_api_tx_h__ -#define __fw_api_tx_h__ +#ifndef __iwl_fw_api_tx_h__ +#define __iwl_fw_api_tx_h__ /** * enum iwl_tx_flags - bitmasks for tx_flags in TX command @@ -771,7 +766,8 @@ struct iwl_mac_beacon_cmd_v6 { } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */ /** - * struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA + * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA + * @tx: the tx commands associated with the beacon frame * @template_id: currently equal to the mac context id of the coresponding * mac. * @tim_idx: the offset of the tim IE in the beacon @@ -780,23 +776,14 @@ struct iwl_mac_beacon_cmd_v6 { * @csa_offset: offset to the CSA IE if present * @frame: the template of the beacon frame */ -struct iwl_mac_beacon_cmd_data { +struct iwl_mac_beacon_cmd_v7 { + struct iwl_tx_cmd tx; __le32 template_id; __le32 tim_idx; __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[0]; -}; - -/** - * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA - * @tx: the tx commands associated with the beacon frame - * @data: see &iwl_mac_beacon_cmd_data - */ -struct iwl_mac_beacon_cmd_v7 { - struct iwl_tx_cmd tx; - struct iwl_mac_beacon_cmd_data data; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ /** @@ -804,13 +791,24 @@ struct iwl_mac_beacon_cmd_v7 { * @byte_cnt: byte count of the beacon frame * @flags: for future use * @reserved: reserved - * @data: see &iwl_mac_beacon_cmd_data + * @template_id: currently equal to the mac context id of the coresponding + * mac. + * @tim_idx: the offset of the tim IE in the beacon + * @tim_size: the length of the tim IE + * @ecsa_offset: offset to the ECSA IE if present + * @csa_offset: offset to the CSA IE if present + * @frame: the template of the beacon frame */ struct iwl_mac_beacon_cmd { __le16 byte_cnt; __le16 flags; __le64 reserved; - struct iwl_mac_beacon_cmd_data data; + __le32 template_id; + __le32 tim_idx; + __le32 tim_size; + __le32 ecsa_offset; + __le32 csa_offset; + struct ieee80211_hdr frame[0]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */ struct iwl_beacon_notif { @@ -914,4 +912,4 @@ struct iwl_scd_txq_cfg_rsp { u8 scd_queue; } __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */ -#endif /* __fw_api_tx_h__ */ +#endif /* __iwl_fw_api_tx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h new file mode 100644 index 000000000000..87b4434224a1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -0,0 +1,163 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_txq_h__ +#define __iwl_fw_api_txq_h__ + +/* + * DQA queue numbers + * + * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW + * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames + * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames + * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames + * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure + * that we are never left without the possibility to connect to an AP. + * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. + * Each MGMT queue is mapped to a single STA + * MGMT frames are frames that return true on ieee80211_is_mgmt() + * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames + * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe + * responses + * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. + * DATA frames are intended for !ieee80211_is_mgmt() frames, but if + * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues + * as well + * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames + */ +enum iwl_mvm_dqa_txq { + IWL_MVM_DQA_CMD_QUEUE = 0, + IWL_MVM_DQA_AUX_QUEUE = 1, + IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, + IWL_MVM_DQA_GCAST_QUEUE = 3, + IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, + IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, + IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, + IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, + IWL_MVM_DQA_MIN_DATA_QUEUE = 10, + IWL_MVM_DQA_MAX_DATA_QUEUE = 31, +}; + +enum iwl_mvm_tx_fifo { + IWL_MVM_TX_FIFO_BK = 0, + IWL_MVM_TX_FIFO_BE, + IWL_MVM_TX_FIFO_VI, + IWL_MVM_TX_FIFO_VO, + IWL_MVM_TX_FIFO_MCAST = 5, + IWL_MVM_TX_FIFO_CMD = 7, +}; + +enum iwl_gen2_tx_fifo { + IWL_GEN2_TX_FIFO_CMD = 0, + IWL_GEN2_EDCA_TX_FIFO_BK, + IWL_GEN2_EDCA_TX_FIFO_BE, + IWL_GEN2_EDCA_TX_FIFO_VI, + IWL_GEN2_EDCA_TX_FIFO_VO, + IWL_GEN2_TRIG_TX_FIFO_BK, + IWL_GEN2_TRIG_TX_FIFO_BE, + IWL_GEN2_TRIG_TX_FIFO_VI, + IWL_GEN2_TRIG_TX_FIFO_VO, +}; + +/** + * enum iwl_tx_queue_cfg_actions - TXQ config options + * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue + * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format + */ +enum iwl_tx_queue_cfg_actions { + TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0), + TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), +}; + +/** + * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command + * @sta_id: station id + * @tid: tid of the queue + * @flags: see &enum iwl_tx_queue_cfg_actions + * @cb_size: size of TFD cyclic buffer. Value is exponent - 3. + * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs) + * @byte_cnt_addr: address of byte count table + * @tfdq_addr: address of TFD circular buffer + */ +struct iwl_tx_queue_cfg_cmd { + u8 sta_id; + u8 tid; + __le16 flags; + __le32 cb_size; + __le64 byte_cnt_addr; + __le64 tfdq_addr; +} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */ + +/** + * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config + * @queue_number: queue number assigned to this RA -TID + * @flags: set on failure + * @write_pointer: initial value for write pointer + * @reserved: reserved + */ +struct iwl_tx_queue_cfg_rsp { + __le16 queue_number; + __le16 flags; + __le16 write_pointer; + __le16 reserved; +} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */ + +#endif /* __iwl_fw_api_txq_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c new file mode 100644 index 000000000000..6f75985eea66 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c @@ -0,0 +1,88 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "fw/api/commands.h" +#include "fw/api/alive.h" + +static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data; + + IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n", + __le32_to_cpu(fseq->aux_read_fseq_ver), + __le32_to_cpu(fseq->wifi_fseq_ver)); +} + +void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); + + switch (cmd) { + case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF): + iwl_fwrt_fseq_ver_mismatch(fwrt, rxb); + break; + default: + break; + } +} +IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 1602b360353c..77245fcba996 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -63,22 +63,37 @@ * *****************************************************************************/ #include <linux/devcoredump.h> - -#include "fw-dbg.h" +#include "iwl-drv.h" +#include "runtime.h" +#include "dbg.h" #include "iwl-io.h" -#include "mvm.h" #include "iwl-prph.h" #include "iwl-csr.h" +/** + * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump + * + * @fwrt_ptr: pointer to the buffer coming from fwrt + * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the + * transport's data. + * @trans_len: length of the valid data in trans_ptr + * @fwrt_len: length of the valid data in fwrt_ptr + */ +struct iwl_fw_dump_ptrs { + struct iwl_trans_dump_data *trans_ptr; + void *fwrt_ptr; + u32 fwrt_len; +}; + #define RADIO_REG_MAX_READ 0x2ad -static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data) +static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data) { u8 *pos = (void *)(*dump_data)->data; unsigned long flags; int i; - if (!iwl_trans_grab_nic_access(mvm->trans, &flags)) + if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG); @@ -88,20 +103,20 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm, u32 rd_cmd = RADIO_RSP_RD_CMD; rd_cmd |= i << RADIO_RSP_ADDR_POS; - iwl_write_prph_no_grab(mvm->trans, RSP_RADIO_CMD, rd_cmd); - *pos = (u8)iwl_read_prph_no_grab(mvm->trans, RSP_RADIO_RDDAT); + iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd); + *pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT); pos++; } *dump_data = iwl_fw_error_next_data(*dump_data); - iwl_trans_release_nic_access(mvm->trans, &flags); + iwl_trans_release_nic_access(fwrt->trans, &flags); } -static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data, - int size, u32 offset, int fifo_num) +static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data, + int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; @@ -122,41 +137,41 @@ static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm, fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_D_SPACE + offset)); fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_WR_PTR + offset)); fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_RD_PTR + offset)); fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_FENCE_PTR + offset)); fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset)); /* Lock fence */ - iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1); + iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1); /* Set fence pointer to the same place like WR pointer */ - iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1); + iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1); /* Set fence offset */ - iwl_trans_write_prph(mvm->trans, + iwl_trans_write_prph(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (i = 0; i < fifo_len; i++) - fifo_data[i] = iwl_trans_read_prph(mvm->trans, + fifo_data[i] = iwl_trans_read_prph(fwrt->trans, RXF_FIFO_RD_FENCE_INC + offset); *dump_data = iwl_fw_error_next_data(*dump_data); } -static void iwl_mvm_dump_txf(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data, - int size, u32 offset, int fifo_num) +static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data, + int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; @@ -177,91 +192,91 @@ static void iwl_mvm_dump_txf(struct iwl_mvm *mvm, fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FIFO_ITEM_CNT + offset)); fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_WR_PTR + offset)); fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_RD_PTR + offset)); fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FENCE_PTR + offset)); fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_LOCK_FENCE + offset)); /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ - iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset, + iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset, TXF_WR_PTR + offset); /* Dummy-read to advance the read pointer to the head */ - iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset); + iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (i = 0; i < fifo_len; i++) - fifo_data[i] = iwl_trans_read_prph(mvm->trans, + fifo_data[i] = iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); *dump_data = iwl_fw_error_next_data(*dump_data); } -static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data) +static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data) { struct iwl_fw_error_dump_fifo *fifo_hdr; - struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg; + struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; u32 *fifo_data; u32 fifo_len; unsigned long flags; int i, j; - if (!iwl_trans_grab_nic_access(mvm->trans, &flags)) + if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; /* Pull RXF1 */ - iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); + iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); /* Pull RXF2 */ - iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size, - RXF_DIFF_FROM_PREV, 1); + iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, + RXF_DIFF_FROM_PREV, 1); /* Pull LMAC2 RXF1 */ - if (mvm->smem_cfg.num_lmacs > 1) - iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size, - LMAC2_PRPH_OFFSET, 2); + if (fwrt->smem_cfg.num_lmacs > 1) + iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, + LMAC2_PRPH_OFFSET, 2); /* Pull TXF data from LMAC1 */ - for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) { + for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i); - iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i], - 0, i); + iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); + iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], + 0, i); } /* Pull TXF data from LMAC2 */ - if (mvm->smem_cfg.num_lmacs > 1) { - for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) { + if (fwrt->smem_cfg.num_lmacs > 1) { + for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, + iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM + LMAC2_PRPH_OFFSET, i); - iwl_mvm_dump_txf(mvm, dump_data, - cfg->lmac[1].txfifo_size[i], - LMAC2_PRPH_OFFSET, - i + cfg->num_txfifo_entries); + iwl_fwrt_dump_txf(fwrt, dump_data, + cfg->lmac[1].txfifo_size[i], + LMAC2_PRPH_OFFSET, + i + cfg->num_txfifo_entries); } } - if (fw_has_capa(&mvm->fw->ucode_capa, + if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ for (i = 0; - i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size); + i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) { fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->smem_cfg.internal_txfifo_size[i]; + fifo_len = fwrt->smem_cfg.internal_txfifo_size[i]; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) @@ -276,52 +291,45 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, fifo_hdr->fifo_num = cpu_to_le32(i); /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i + - mvm->smem_cfg.num_txfifo_entries); + iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i + + fwrt->smem_cfg.num_txfifo_entries); fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FIFO_ITEM_CNT)); fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_WR_PTR)); fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_RD_PTR)); fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FENCE_PTR)); fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_LOCK_FENCE)); /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */ - iwl_trans_write_prph(mvm->trans, + iwl_trans_write_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_ADDR, TXF_CPU2_WR_PTR); /* Dummy-read to advance the read pointer to head */ - iwl_trans_read_prph(mvm->trans, + iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (j = 0; j < fifo_len; j++) fifo_data[j] = - iwl_trans_read_prph(mvm->trans, + iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); *dump_data = iwl_fw_error_next_data(*dump_data); } } - iwl_trans_release_nic_access(mvm->trans, &flags); -} - -void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm) -{ - if (mvm->fw_dump_desc != &iwl_mvm_dump_desc_assert) - kfree(mvm->fw_dump_desc); - mvm->fw_dump_desc = NULL; + iwl_trans_release_nic_access(fwrt->trans, &flags); } #define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ @@ -531,37 +539,32 @@ static struct scatterlist *alloc_sgtable(int size) return table; } -void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) +void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; struct iwl_fw_error_dump_mem *dump_mem; struct iwl_fw_error_dump_trigger_desc *dump_trig; - struct iwl_mvm_dump_ptrs *fw_error_dump; + struct iwl_fw_dump_ptrs *fw_error_dump; struct scatterlist *sg_dump_data; u32 sram_len, sram_ofs; - const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = mvm->fw->dbg_mem_tlv; + const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv; u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; - u32 smem_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->smem_len; - u32 sram2_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->dccm2_len; + u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; + u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ? + 0 : fwrt->trans->cfg->dccm2_len; bool monitor_dump_only = false; int i; - if (!IWL_MVM_COLLECT_FW_ERR_DUMP && - !mvm->trans->dbg_dest_tlv) - return; - - lockdep_assert_held(&mvm->mutex); - /* there's no point in fw dump if the bus is dead */ - if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { - IWL_ERR(mvm, "Skip fw error dump since bus is dead\n"); + if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { + IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); goto out; } - if (mvm->fw_dump_trig && - mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) + if (fwrt->dump.trig && + fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) monitor_dump_only = true; fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); @@ -569,20 +572,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) goto out; /* SRAM - include stack CCM if driver knows the values for it */ - if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) { + if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; - img = &mvm->fw->img[mvm->cur_ucode]; + img = &fwrt->fw->img[fwrt->cur_fw_img]; sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; } else { - sram_ofs = mvm->cfg->dccm_offset; - sram_len = mvm->cfg->dccm_len; + sram_ofs = fwrt->trans->cfg->dccm_offset; + sram_len = fwrt->trans->cfg->dccm_len; } /* reading RXF/TXF sizes */ - if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { - struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg; + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { + struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; fifo_data_len = 0; @@ -621,7 +624,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } } - if (fw_has_capa(&mvm->fw->ucode_capa, + if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); @@ -638,7 +641,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } /* Make room for PRPH registers */ - if (!mvm->trans->cfg->gen2) { + if (!fwrt->trans->cfg->gen2) { for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) { /* The range includes both boundaries */ @@ -652,7 +655,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } } - if (!mvm->trans->cfg->gen2 && mvm->cfg->mq_rx_supported) { + if (!fwrt->trans->cfg->gen2 && + fwrt->trans->cfg->mq_rx_supported) { for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { /* The range includes both boundaries */ @@ -666,7 +670,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } } - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } @@ -686,16 +690,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; /* Make room for MEM segments */ - for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { + for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { file_len += sizeof(*dump_data) + sizeof(*dump_mem) + le32_to_cpu(fw_dbg_mem[i].len); } /* Make room for fw's virtual image pages, if it exists */ - if (!mvm->trans->cfg->gen2 && - mvm->fw->img[mvm->cur_ucode].paging_mem_size && - mvm->fw_paging_db[0].fw_paging_block) - file_len += mvm->num_of_paging_blk * + if (!fwrt->trans->cfg->gen2 && + fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && + fwrt->fw_paging_db[0].fw_paging_block) + file_len += fwrt->num_of_paging_blk * (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE); @@ -706,11 +710,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) sizeof(*dump_info); } - if (mvm->fw_dump_desc) + if (fwrt->dump.desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + - mvm->fw_dump_desc->len; + fwrt->dump.desc->len; - if (!mvm->fw->n_dbg_mem_tlv) + if (!fwrt->fw->n_dbg_mem_tlv) file_len += sram_len + sizeof(*dump_mem); dump_file = vzalloc(file_len); @@ -719,7 +723,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) goto out; } - fw_error_dump->op_mode_ptr = dump_file; + fw_error_dump->fwrt_ptr = dump_file; dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; @@ -728,32 +732,32 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_info = (void *)dump_data->data; dump_info->device_family = - mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? + fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); - dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev)); - memcpy(dump_info->fw_human_readable, mvm->fw->human_readable, + dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); + memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, sizeof(dump_info->fw_human_readable)); - strncpy(dump_info->dev_human_readable, mvm->cfg->name, + strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, sizeof(dump_info->dev_human_readable)); - strncpy(dump_info->bus_human_readable, mvm->dev->bus->name, + strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->bus_human_readable)); dump_data = iwl_fw_error_next_data(dump_data); /* We only dump the FIFOs if the FW is in error state */ - if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { - iwl_mvm_dump_fifos(mvm, &dump_data); + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { + iwl_fw_dump_fifos(fwrt, &dump_data); if (radio_len) - iwl_mvm_read_radio_reg(mvm, &dump_data); + iwl_read_radio_regs(fwrt, &dump_data); } - if (mvm->fw_dump_desc) { + if (fwrt->dump.desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + - mvm->fw_dump_desc->len); + fwrt->dump.desc->len); dump_trig = (void *)dump_data->data; - memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc, - sizeof(*dump_trig) + mvm->fw_dump_desc->len); + memcpy(dump_trig, &fwrt->dump.desc->trig_desc, + sizeof(*dump_trig) + fwrt->dump.desc->len); dump_data = iwl_fw_error_next_data(dump_data); } @@ -762,18 +766,18 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (monitor_dump_only) goto dump_trans_data; - if (!mvm->fw->n_dbg_mem_tlv) { + if (!fwrt->fw->n_dbg_mem_tlv) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); dump_mem->offset = cpu_to_le32(sram_ofs); - iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, + iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data, sram_len); dump_data = iwl_fw_error_next_data(dump_data); } - for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { + for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { u32 len = le32_to_cpu(fw_dbg_mem[i].len); u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); bool success; @@ -786,13 +790,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) { case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR): - iwl_trans_read_mem_bytes(mvm->trans, ofs, + iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); success = true; break; case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH): - success = iwl_read_prph_block(mvm->trans, ofs, len, + success = iwl_read_prph_block(fwrt->trans, ofs, len, (void *)dump_mem->data); break; default: @@ -813,8 +817,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM); - dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset); - iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset, + dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset); + iwl_trans_read_mem_bytes(fwrt->trans, + fwrt->trans->cfg->smem_offset, dump_mem->data, smem_len); dump_data = iwl_fw_error_next_data(dump_data); } @@ -824,28 +829,29 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset); - iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset, + dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset); + iwl_trans_read_mem_bytes(fwrt->trans, + fwrt->trans->cfg->dccm2_offset, dump_mem->data, sram2_len); dump_data = iwl_fw_error_next_data(dump_data); } /* Dump fw's virtual image */ - if (!mvm->trans->cfg->gen2 && - mvm->fw->img[mvm->cur_ucode].paging_mem_size && - mvm->fw_paging_db[0].fw_paging_block) { - for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { + if (!fwrt->trans->cfg->gen2 && + fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && + fwrt->fw_paging_db[0].fw_paging_block) { + for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) { struct iwl_fw_error_dump_paging *paging; struct page *pages = - mvm->fw_paging_db[i].fw_paging_block; - dma_addr_t addr = mvm->fw_paging_db[i].fw_paging_phys; + fwrt->fw_paging_db[i].fw_paging_block; + dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); dump_data->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE); paging = (void *)dump_data->data; paging->index = cpu_to_le32(i); - dma_sync_single_for_cpu(mvm->trans->dev, addr, + dma_sync_single_for_cpu(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL); memcpy(paging->data, page_address(pages), @@ -855,20 +861,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } if (prph_len) { - iwl_dump_prph(mvm->trans, &dump_data, + iwl_dump_prph(fwrt->trans, &dump_data, iwl_prph_dump_addr_comm, ARRAY_SIZE(iwl_prph_dump_addr_comm)); - if (mvm->cfg->mq_rx_supported) - iwl_dump_prph(mvm->trans, &dump_data, + if (fwrt->trans->cfg->mq_rx_supported) + iwl_dump_prph(fwrt->trans, &dump_data, iwl_prph_dump_addr_9000, ARRAY_SIZE(iwl_prph_dump_addr_9000)); } dump_trans_data: - fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, - mvm->fw_dump_trig); - fw_error_dump->op_mode_len = file_len; + fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, + fwrt->dump.trig); + fw_error_dump->fwrt_len = file_len; if (fw_error_dump->trans_ptr) file_len += fw_error_dump->trans_ptr->len; dump_file->file_len = cpu_to_le32(file_len); @@ -877,68 +883,72 @@ dump_trans_data: if (sg_dump_data) { sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), - fw_error_dump->op_mode_ptr, - fw_error_dump->op_mode_len, 0); + fw_error_dump->fwrt_ptr, + fw_error_dump->fwrt_len, 0); if (fw_error_dump->trans_ptr) sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump->trans_ptr->data, fw_error_dump->trans_ptr->len, - fw_error_dump->op_mode_len); - dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len, + fw_error_dump->fwrt_len); + dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } - vfree(fw_error_dump->op_mode_ptr); + vfree(fw_error_dump->fwrt_ptr); vfree(fw_error_dump->trans_ptr); kfree(fw_error_dump); out: - iwl_mvm_free_fw_dump_desc(mvm); - mvm->fw_dump_trig = NULL; - clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status); + iwl_fw_free_dump_desc(fwrt); + fwrt->dump.trig = NULL; + clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); } +IWL_EXPORT_SYMBOL(iwl_fw_error_dump); -const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = { +const struct iwl_fw_dump_desc iwl_dump_desc_assert = { .trig_desc = { .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), }, }; +IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); -int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, - const struct iwl_mvm_dump_desc *desc, - const struct iwl_fw_dbg_trigger_tlv *trigger) +int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, + const struct iwl_fw_dump_desc *desc, + const struct iwl_fw_dbg_trigger_tlv *trigger) { unsigned int delay = 0; if (trigger) delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); - if (WARN(mvm->trans->state == IWL_TRANS_NO_FW, + if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW, "Can't collect dbg data when FW isn't alive\n")) return -EIO; - if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status)) + if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) return -EBUSY; - if (WARN_ON(mvm->fw_dump_desc)) - iwl_mvm_free_fw_dump_desc(mvm); + if (WARN_ON(fwrt->dump.desc)) + iwl_fw_free_dump_desc(fwrt); - IWL_WARN(mvm, "Collecting data: trigger %d fired.\n", + IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", le32_to_cpu(desc->trig_desc.type)); - mvm->fw_dump_desc = desc; - mvm->fw_dump_trig = trigger; + fwrt->dump.desc = desc; + fwrt->dump.trig = trigger; - schedule_delayed_work(&mvm->fw_dump_wk, delay); + schedule_delayed_work(&fwrt->dump.wk, delay); return 0; } +IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); -int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - const struct iwl_fw_dbg_trigger_tlv *trigger) +int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + const struct iwl_fw_dbg_trigger_tlv *trigger) { - struct iwl_mvm_dump_desc *desc; + struct iwl_fw_dump_desc *desc; desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); if (!desc) @@ -948,12 +958,13 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); - return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger); + return iwl_fw_dbg_collect_desc(fwrt, desc, trigger); } +IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); -int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trigger, - const char *fmt, ...) +int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *fmt, ...) { u16 occurrences = le16_to_cpu(trigger->occurrences); int ret, len = 0; @@ -978,8 +989,8 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, len = strlen(buf) + 1; } - ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len, - trigger); + ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, + trigger); if (ret) return ret; @@ -987,37 +998,42 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, trigger->occurrences = cpu_to_le16(occurrences - 1); return 0; } +IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig); -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) +int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) { u8 *ptr; int ret; int i; - if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv), + if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv), "Invalid configuration %d\n", conf_id)) return -EINVAL; /* EARLY START - firmware's configuration is hard coded */ - if ((!mvm->fw->dbg_conf_tlv[conf_id] || - !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && + if ((!fwrt->fw->dbg_conf_tlv[conf_id] || + !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && conf_id == FW_DBG_START_FROM_ALIVE) return 0; - if (!mvm->fw->dbg_conf_tlv[conf_id]) + if (!fwrt->fw->dbg_conf_tlv[conf_id]) return -EINVAL; - if (mvm->fw_dbg_conf != FW_DBG_INVALID) - IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n", - mvm->fw_dbg_conf); + if (fwrt->dump.conf != FW_DBG_INVALID) + IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n", + fwrt->dump.conf); /* Send all HCMDs for configuring the FW debug */ - ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd; - for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { + ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd; + for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; + struct iwl_host_cmd hcmd = { + .id = cmd->id, + .len = { le16_to_cpu(cmd->len), }, + .data = { cmd->data, }, + }; - ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0, - le16_to_cpu(cmd->len), cmd->data); + ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); if (ret) return ret; @@ -1025,7 +1041,59 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) ptr += le16_to_cpu(cmd->len); } - mvm->fw_dbg_conf = conf_id; + fwrt->dump.conf = conf_id; return 0; } +IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); + +void iwl_fw_error_dump_wk(struct work_struct *work) +{ + struct iwl_fw_runtime *fwrt = + container_of(work, struct iwl_fw_runtime, dump.wk.work); + + if (fwrt->ops && fwrt->ops->dump_start && + fwrt->ops->dump_start(fwrt->ops_ctx)) + return; + + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + /* stop recording */ + iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100); + + iwl_fw_error_dump(fwrt); + + /* start recording again if the firmware is not crashed */ + if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && + fwrt->fw->dbg_dest_tlv) { + iwl_clear_bits_prph(fwrt->trans, + MON_BUFF_SAMPLE_CTL, 0x100); + iwl_clear_bits_prph(fwrt->trans, + MON_BUFF_SAMPLE_CTL, 0x1); + iwl_set_bits_prph(fwrt->trans, + MON_BUFF_SAMPLE_CTL, 0x1); + } + } else { + u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE); + u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL); + + /* stop recording */ + iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0); + udelay(100); + iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0); + /* wait before we collect the data till the DBGC stop */ + udelay(500); + + iwl_fw_error_dump(fwrt); + + /* start recording again if the firmware is not crashed */ + if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && + fwrt->fw->dbg_dest_tlv) { + iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample); + iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); + } + } + + if (fwrt->ops && fwrt->ops->dump_end) + fwrt->ops->dump_end(fwrt->ops_ctx); +} + diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 4a5287a0c617..0f810ea89d31 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -63,24 +63,46 @@ * *****************************************************************************/ -#ifndef __mvm_fw_dbg_h__ -#define __mvm_fw_dbg_h__ -#include "fw/file.h" -#include "fw/error-dump.h" -#include "mvm.h" - -void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); -void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm); -int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, - const struct iwl_mvm_dump_desc *desc, - const struct iwl_fw_dbg_trigger_tlv *trigger); -int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - const struct iwl_fw_dbg_trigger_tlv *trigger); -int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trigger, - const char *fmt, ...) __printf(3, 4); -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id); +#ifndef __iwl_fw_dbg_h__ +#define __iwl_fw_dbg_h__ +#include <linux/workqueue.h> +#include <net/cfg80211.h> +#include "runtime.h" +#include "file.h" +#include "error-dump.h" + +/** + * struct iwl_fw_dump_desc - describes the dump + * @len: length of trig_desc->data + * @trig_desc: the description of the dump + */ +struct iwl_fw_dump_desc { + size_t len; + /* must be last */ + struct iwl_fw_error_dump_trigger_desc trig_desc; +}; + +extern const struct iwl_fw_dump_desc iwl_dump_desc_assert; + +static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) +{ + if (fwrt->dump.desc != &iwl_dump_desc_assert) + kfree(fwrt->dump.desc); + fwrt->dump.desc = NULL; +} + +void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); +int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, + const struct iwl_fw_dump_desc *desc, + const struct iwl_fw_dbg_trigger_tlv *trigger); +int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + const struct iwl_fw_dbg_trigger_tlv *trigger); +int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *fmt, ...) __printf(3, 4); +int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id); #define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ @@ -101,25 +123,25 @@ _iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id) static inline bool iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, - struct ieee80211_vif *vif) + struct wireless_dev *wdev) { u32 trig_vif = le32_to_cpu(trig->vif_type); return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || - ieee80211_vif_type_p2p(vif) == trig_vif; + wdev->iftype == trig_vif; } static inline bool -iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm, +iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trig) { return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && - (mvm->fw_dbg_conf == FW_DBG_INVALID || - (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids)))); + (fwrt->dump.conf == FW_DBG_INVALID || + (BIT(fwrt->dump.conf) & le32_to_cpu(trig->stop_conf_ids)))); } static inline bool -iwl_fw_dbg_no_trig_window(struct iwl_mvm *mvm, +iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trig) { unsigned long wind_jiff = @@ -127,49 +149,66 @@ iwl_fw_dbg_no_trig_window(struct iwl_mvm *mvm, u32 id = le32_to_cpu(trig->id); /* If this is the first event checked, jump to update start ts */ - if (mvm->fw_dbg_non_collect_ts_start[id] && - (time_after(mvm->fw_dbg_non_collect_ts_start[id] + wind_jiff, + if (fwrt->dump.non_collect_ts_start[id] && + (time_after(fwrt->dump.non_collect_ts_start[id] + wind_jiff, jiffies))) return true; - mvm->fw_dbg_non_collect_ts_start[id] = jiffies; + fwrt->dump.non_collect_ts_start[id] = jiffies; return false; } static inline bool -iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, +iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, + struct wireless_dev *wdev, struct iwl_fw_dbg_trigger_tlv *trig) { - if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif)) + if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev)) return false; - if (iwl_fw_dbg_no_trig_window(mvm, trig)) { - IWL_WARN(mvm, "Trigger %d occurred while no-collect window.\n", + if (iwl_fw_dbg_no_trig_window(fwrt, trig)) { + IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n", trig->id); return false; } - return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig); + return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig); } static inline void -_iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, +_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, + struct wireless_dev *wdev, struct iwl_fw_dbg_trigger_tlv *trigger) { if (!trigger) return; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) + if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trigger)) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); + iwl_fw_dbg_collect_trig(fwrt, trigger, NULL); } -#define iwl_fw_dbg_trigger_simple_stop(mvm, vif, trig) \ - _iwl_fw_dbg_trigger_simple_stop((mvm), (vif), \ - iwl_fw_dbg_get_trigger((mvm)->fw,\ +#define iwl_fw_dbg_trigger_simple_stop(fwrt, wdev, trig) \ + _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \ + iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) -#endif /* __mvm_fw_dbg_h__ */ +static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) +{ + fwrt->dump.conf = FW_DBG_INVALID; +} + +void iwl_fw_error_dump_wk(struct work_struct *work); + +static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt) +{ + flush_delayed_work(&fwrt->dump.wk); +} + +static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt) +{ + cancel_delayed_work_sync(&fwrt->dump.wk); +} + +#endif /* __iwl_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c new file mode 100644 index 000000000000..bfe5316bbb6a --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -0,0 +1,75 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "dbg.h" + +void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, + const struct iwl_fw *fw, + const struct iwl_fw_runtime_ops *ops, void *ops_ctx) +{ + memset(fwrt, 0, sizeof(*fwrt)); + fwrt->trans = trans; + fwrt->fw = fw; + fwrt->dev = trans->dev; + fwrt->dump.conf = FW_DBG_INVALID; + fwrt->ops = ops; + fwrt->ops_ctx = ops_ctx; + INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); +} +IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c new file mode 100644 index 000000000000..1610722b8099 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -0,0 +1,414 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "fw/api/commands.h" + +void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt) +{ + int i; + + if (!fwrt->fw_paging_db[0].fw_paging_block) + return; + + for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { + struct iwl_fw_paging *paging = &fwrt->fw_paging_db[i]; + + if (!paging->fw_paging_block) { + IWL_DEBUG_FW(fwrt, + "Paging: block %d already freed, continue to next page\n", + i); + + continue; + } + dma_unmap_page(fwrt->trans->dev, paging->fw_paging_phys, + paging->fw_paging_size, DMA_BIDIRECTIONAL); + + __free_pages(paging->fw_paging_block, + get_order(paging->fw_paging_size)); + paging->fw_paging_block = NULL; + } + kfree(fwrt->trans->paging_download_buf); + fwrt->trans->paging_download_buf = NULL; + fwrt->trans->paging_db = NULL; + + memset(fwrt->fw_paging_db, 0, sizeof(fwrt->fw_paging_db)); +} +IWL_EXPORT_SYMBOL(iwl_free_fw_paging); + +static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt, + const struct fw_img *image) +{ + struct page *block; + dma_addr_t phys = 0; + int blk_idx, order, num_of_pages, size, dma_enabled; + + if (fwrt->fw_paging_db[0].fw_paging_block) + return 0; + + dma_enabled = is_device_dma_capable(fwrt->trans->dev); + + /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ + BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); + + num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; + fwrt->num_of_paging_blk = + DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); + fwrt->num_of_pages_in_last_blk = + num_of_pages - + NUM_OF_PAGE_PER_GROUP * (fwrt->num_of_paging_blk - 1); + + IWL_DEBUG_FW(fwrt, + "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", + fwrt->num_of_paging_blk, + fwrt->num_of_pages_in_last_blk); + + /* + * Allocate CSS and paging blocks in dram. + */ + for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) { + /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ + size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; + order = get_order(size); + block = alloc_pages(GFP_KERNEL, order); + if (!block) { + /* free all the previous pages since we failed */ + iwl_free_fw_paging(fwrt); + return -ENOMEM; + } + + fwrt->fw_paging_db[blk_idx].fw_paging_block = block; + fwrt->fw_paging_db[blk_idx].fw_paging_size = size; + + if (dma_enabled) { + phys = dma_map_page(fwrt->trans->dev, block, 0, + PAGE_SIZE << order, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(fwrt->trans->dev, phys)) { + /* + * free the previous pages and the current one + * since we failed to map_page. + */ + iwl_free_fw_paging(fwrt); + return -ENOMEM; + } + fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys; + } else { + fwrt->fw_paging_db[blk_idx].fw_paging_phys = + PAGING_ADDR_SIG | + blk_idx << BLOCK_2_EXP_SIZE; + } + + if (!blk_idx) + IWL_DEBUG_FW(fwrt, + "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", + order); + else + IWL_DEBUG_FW(fwrt, + "Paging: allocated 32K bytes (order %d) for firmware paging.\n", + order); + } + + return 0; +} + +static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, + const struct fw_img *image) +{ + int sec_idx, idx; + u32 offset = 0; + + /* + * find where is the paging image start point: + * if CPU2 exist and it's in paging format, then the image looks like: + * CPU1 sections (2 or more) + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 + * CPU2 sections (not paged) + * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 + * non paged to CPU2 paging sec + * CPU2 paging CSS + * CPU2 paging image (including instruction and data) + */ + for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { + if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { + sec_idx++; + break; + } + } + + /* + * If paging is enabled there should be at least 2 more sections left + * (one for CSS and one for Paging data) + */ + if (sec_idx >= image->num_sec - 1) { + IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n"); + iwl_free_fw_paging(fwrt); + return -EINVAL; + } + + /* copy the CSS block to the dram */ + IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n", + sec_idx); + + memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), + image->sec[sec_idx].data, + fwrt->fw_paging_db[0].fw_paging_size); + dma_sync_single_for_device(fwrt->trans->dev, + fwrt->fw_paging_db[0].fw_paging_phys, + fwrt->fw_paging_db[0].fw_paging_size, + DMA_BIDIRECTIONAL); + + IWL_DEBUG_FW(fwrt, + "Paging: copied %d CSS bytes to first block\n", + fwrt->fw_paging_db[0].fw_paging_size); + + sec_idx++; + + /* + * copy the paging blocks to the dram + * loop index start from 1 since that CSS block already copied to dram + * and CSS index is 0. + * loop stop at num_of_paging_blk since that last block is not full. + */ + for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) { + struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + + memcpy(page_address(block->fw_paging_block), + image->sec[sec_idx].data + offset, + block->fw_paging_size); + dma_sync_single_for_device(fwrt->trans->dev, + block->fw_paging_phys, + block->fw_paging_size, + DMA_BIDIRECTIONAL); + + IWL_DEBUG_FW(fwrt, + "Paging: copied %d paging bytes to block %d\n", + fwrt->fw_paging_db[idx].fw_paging_size, + idx); + + offset += fwrt->fw_paging_db[idx].fw_paging_size; + } + + /* copy the last paging block */ + if (fwrt->num_of_pages_in_last_blk > 0) { + struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + + memcpy(page_address(block->fw_paging_block), + image->sec[sec_idx].data + offset, + FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk); + dma_sync_single_for_device(fwrt->trans->dev, + block->fw_paging_phys, + block->fw_paging_size, + DMA_BIDIRECTIONAL); + + IWL_DEBUG_FW(fwrt, + "Paging: copied %d pages in the last block %d\n", + fwrt->num_of_pages_in_last_blk, idx); + } + + return 0; +} + +static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt, + const struct fw_img *fw) +{ + int ret; + + ret = iwl_alloc_fw_paging_mem(fwrt, fw); + if (ret) + return ret; + + return iwl_fill_paging_mem(fwrt, fw); +} + +/* send paging cmd to FW in case CPU2 has paging image */ +static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt, + const struct fw_img *fw) +{ + struct iwl_fw_paging_cmd paging_cmd = { + .flags = cpu_to_le32(PAGING_CMD_IS_SECURED | + PAGING_CMD_IS_ENABLED | + (fwrt->num_of_pages_in_last_blk << + PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), + .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), + .block_num = cpu_to_le32(fwrt->num_of_paging_blk), + }; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .len = { sizeof(paging_cmd), }, + .data = { &paging_cmd, }, + }; + int blk_idx; + + /* loop for for all paging blocks + CSS block */ + for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) { + dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys; + __le32 phy_addr; + + addr = addr >> PAGE_2_EXP_SIZE; + phy_addr = cpu_to_le32(addr); + paging_cmd.device_phy_addr[blk_idx] = phy_addr; + } + + return iwl_trans_send_cmd(fwrt->trans, &hcmd); +} + +/* + * Send paging item cmd to FW in case CPU2 has paging image + */ +static int iwl_trans_get_paging_item(struct iwl_fw_runtime *fwrt) +{ + int ret; + struct iwl_fw_get_item_cmd fw_get_item_cmd = { + .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), + }; + struct iwl_fw_get_item_resp *item_resp; + struct iwl_host_cmd cmd = { + .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &fw_get_item_cmd, }, + .len = { sizeof(fw_get_item_cmd), }, + }; + + ret = iwl_trans_send_cmd(fwrt->trans, &cmd); + if (ret) { + IWL_ERR(fwrt, + "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", + ret); + return ret; + } + + item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; + if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { + IWL_ERR(fwrt, + "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", + le32_to_cpu(item_resp->item_id)); + ret = -EIO; + goto exit; + } + + /* Add an extra page for headers */ + fwrt->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE + + FW_PAGING_SIZE, + GFP_KERNEL); + if (!fwrt->trans->paging_download_buf) { + ret = -ENOMEM; + goto exit; + } + fwrt->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); + fwrt->trans->paging_db = fwrt->fw_paging_db; + IWL_DEBUG_FW(fwrt, + "Paging: got paging request address (paging_req_addr 0x%08x)\n", + fwrt->trans->paging_req_addr); + +exit: + iwl_free_resp(&cmd); + + return ret; +} + +int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type) +{ + const struct fw_img *fw = &fwrt->fw->img[type]; + int ret; + + if (fwrt->trans->cfg->gen2) + return 0; + + /* + * Configure and operate fw paging mechanism. + * The driver configures the paging flow only once. + * The CPU2 paging image is included in the IWL_UCODE_INIT image. + */ + if (!fw->paging_mem_size) + return 0; + + /* + * When dma is not enabled, the driver needs to copy / write + * the downloaded / uploaded page to / from the smem. + * This gets the location of the place were the pages are + * stored. + */ + if (!is_device_dma_capable(fwrt->trans->dev)) { + ret = iwl_trans_get_paging_item(fwrt); + if (ret) { + IWL_ERR(fwrt, "failed to get FW paging item\n"); + return ret; + } + } + + ret = iwl_save_fw_paging(fwrt, fw); + if (ret) { + IWL_ERR(fwrt, "failed to save the FW paging image\n"); + return ret; + } + + ret = iwl_send_paging_cmd(fwrt, fw); + if (ret) { + IWL_ERR(fwrt, "failed to send the paging cmd\n"); + iwl_free_fw_paging(fwrt); + return ret; + } + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_init_paging); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h new file mode 100644 index 000000000000..66bea6545690 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -0,0 +1,156 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_runtime_h__ +#define __iwl_fw_runtime_h__ + +#include "iwl-config.h" +#include "iwl-trans.h" +#include "img.h" +#include "fw/api/debug.h" +#include "fw/api/paging.h" + +struct iwl_fw_runtime_ops { + int (*dump_start)(void *ctx); + void (*dump_end)(void *ctx); +}; + +#define MAX_NUM_LMAC 2 +struct iwl_fwrt_shared_mem_cfg { + int num_lmacs; + int num_txfifo_entries; + struct { + u32 txfifo_size[TX_FIFO_MAX_NUM]; + u32 rxfifo1_size; + } lmac[MAX_NUM_LMAC]; + u32 rxfifo2_size; + u32 internal_txfifo_addr; + u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +}; + +enum iwl_fw_runtime_status { + IWL_FWRT_STATUS_DUMPING = 0, +}; + +/** + * struct iwl_fw_runtime - runtime data for firmware + * @fw: firmware image + * @cfg: NIC configuration + * @dev: device pointer + * @ops: user ops + * @ops_ctx: user ops context + * @status: status flags + * @fw_paging_db: paging database + * @num_of_paging_blk: number of paging blocks + * @num_of_pages_in_last_blk: number of pages in the last block + * @smem_cfg: saved firmware SMEM configuration + * @cur_fw_img: current firmware image, must be maintained by + * the driver by calling &iwl_fw_set_current_image() + * @dump: debug dump data + */ +struct iwl_fw_runtime { + struct iwl_trans *trans; + const struct iwl_fw *fw; + struct device *dev; + + const struct iwl_fw_runtime_ops *ops; + void *ops_ctx; + + unsigned long status; + + /* Paging */ + struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; + u16 num_of_paging_blk; + u16 num_of_pages_in_last_blk; + + enum iwl_ucode_type cur_fw_img; + + /* memory configuration */ + struct iwl_fwrt_shared_mem_cfg smem_cfg; + + /* debug */ + struct { + const struct iwl_fw_dump_desc *desc; + const struct iwl_fw_dbg_trigger_tlv *trig; + struct delayed_work wk; + + u8 conf; + + /* ts of the beginning of a non-collect fw dbg data period */ + unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1]; + } dump; +}; + +void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, + const struct iwl_fw *fw, + const struct iwl_fw_runtime_ops *ops, void *ops_ctx); + +static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, + enum iwl_ucode_type cur_fw_img) +{ + fwrt->cur_fw_img = cur_fw_img; +} + +int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type); +void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); + +void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); + +void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, + struct iwl_rx_cmd_buffer *rxb); + +#endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c new file mode 100644 index 000000000000..065a951cefba --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -0,0 +1,152 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "fw/api/commands.h" + +static void iwl_parse_shared_mem_a000(struct iwl_fw_runtime *fwrt, + struct iwl_rx_packet *pkt) +{ + struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; + int i, lmac; + int lmac_num = le32_to_cpu(mem_cfg->lmac_num); + + if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) + return; + + fwrt->smem_cfg.num_lmacs = lmac_num; + fwrt->smem_cfg.num_txfifo_entries = + ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size); + fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size); + + for (lmac = 0; lmac < lmac_num; lmac++) { + struct iwl_shared_mem_lmac_cfg *lmac_cfg = + &mem_cfg->lmac_smem[lmac]; + + for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) + fwrt->smem_cfg.lmac[lmac].txfifo_size[i] = + le32_to_cpu(lmac_cfg->txfifo_size[i]); + fwrt->smem_cfg.lmac[lmac].rxfifo1_size = + le32_to_cpu(lmac_cfg->rxfifo1_size); + } +} + +static void iwl_parse_shared_mem(struct iwl_fw_runtime *fwrt, + struct iwl_rx_packet *pkt) +{ + struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data; + int i; + + fwrt->smem_cfg.num_lmacs = 1; + + fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size); + for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) + fwrt->smem_cfg.lmac[0].txfifo_size[i] = + le32_to_cpu(mem_cfg->txfifo_size[i]); + + fwrt->smem_cfg.lmac[0].rxfifo1_size = + le32_to_cpu(mem_cfg->rxfifo_size[0]); + fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]); + + /* new API has more data, from rxfifo_addr field and on */ + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { + BUILD_BUG_ON(sizeof(fwrt->smem_cfg.internal_txfifo_size) != + sizeof(mem_cfg->internal_txfifo_size)); + + for (i = 0; + i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); + i++) + fwrt->smem_cfg.internal_txfifo_size[i] = + le32_to_cpu(mem_cfg->internal_txfifo_size[i]); + } +} + +void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) +{ + struct iwl_host_cmd cmd = { + .flags = CMD_WANT_SKB, + .data = { NULL, }, + .len = { 0, }, + }; + struct iwl_rx_packet *pkt; + + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) + cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); + else + cmd.id = SHARED_MEM_CFG; + + if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd))) + return; + + pkt = cmd.resp_pkt; + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) + iwl_parse_shared_mem_a000(fwrt, pkt); + else + iwl_parse_shared_mem(fwrt, pkt); + + IWL_DEBUG_INFO(fwrt, "SHARED MEM CFG: got memory offsets/sizes\n"); + + iwl_free_resp(&cmd); +} +IWL_EXPORT_SYMBOL(iwl_get_shared_mem_conf); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index eb6842abb4c7..e90abbfba718 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -76,7 +76,8 @@ #include "iwl-config.h" #include "fw/img.h" #include "iwl-op-mode.h" -#include "fw/api.h" +#include "fw/api/cmdhdr.h" +#include "fw/api/txq.h" /** * DOC: Transport layer - what is it ? diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 83ac807e547d..00e6737dda72 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -6,7 +6,7 @@ iwlmvm-y += power.o coex.o iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o -iwlmvm-y += tof.o fw-dbg.o +iwlmvm-y += tof.o iwlmvm-$(CONFIG_PM) += d3.o ccflags-y += -I$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 34dd5c40ce77..6c5c6510428a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -67,7 +67,7 @@ #include <linux/etherdevice.h> #include <net/mac80211.h> -#include "fw-api-coex.h" +#include "fw/api/coex.h" #include "iwl-modparams.h" #include "mvm.h" #include "iwl-debug.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 6fda8627b726..a922a351c916 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -111,7 +111,6 @@ #define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 #define IWL_MVM_HW_CSUM_DISABLE 0 #define IWL_MVM_PARSE_NVM 0 -#define IWL_MVM_COLLECT_FW_ERR_DUMP 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 @@ -141,5 +140,6 @@ #define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */ #define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */ #define IWL_MVM_RS_TPC_TX_POWER_STEP 3 +#define IWL_MVM_ENABLE_EBS 1 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index a7ac281e5cde..71a01df96f8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -65,7 +65,7 @@ * *****************************************************************************/ #include "mvm.h" -#include "fw-api-tof.h" +#include "fw/api/tof.h" #include "debugfs.h" static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index c1c9c489edc9..29f1d1807415 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -69,7 +69,6 @@ #include <linux/netdevice.h> #include "mvm.h" -#include "fw-dbg.h" #include "sta.h" #include "iwl-io.h" #include "debugfs.h" @@ -84,7 +83,7 @@ static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, int pos, budget; if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); @@ -105,7 +104,7 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, int ret; if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); @@ -122,7 +121,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, u32 flush_arg; if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (kstrtou32(buf, 0, &flush_arg)) @@ -155,7 +154,7 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, int sta_id, drain, ret; if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) @@ -192,7 +191,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, return -EINVAL; /* default is to dump the entire data segment */ - img = &mvm->fw->img[mvm->cur_ucode]; + img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; len = img->sec[IWL_UCODE_SECTION_DATA].len; @@ -224,7 +223,7 @@ static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, if (!iwl_mvm_firmware_running(mvm)) return -EINVAL; - img = &mvm->fw->img[mvm->cur_ucode]; + img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset; img_len = img->sec[IWL_UCODE_SECTION_DATA].len; @@ -1123,7 +1122,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, int pos = 0; mutex_lock(&mvm->mutex); - conf = mvm->fw_dbg_conf; + conf = mvm->fwrt.dump.conf; mutex_unlock(&mvm->mutex); pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); @@ -1190,7 +1189,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, return -EINVAL; mutex_lock(&mvm->mutex); - ret = iwl_mvm_start_fw_dbg_conf(mvm, conf_id); + ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id); mutex_unlock(&mvm->mutex); return ret ?: count; @@ -1211,8 +1210,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, if (count == 0) return 0; - iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf, - (count - 1), NULL); + iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, + (count - 1), NULL); iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index aad265dcfaf5..69336f38ac58 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -68,2823 +68,32 @@ #ifndef __fw_api_h__ #define __fw_api_h__ -#include "fw-api-rs.h" -#include "fw-api-rx.h" -#include "fw-api-tx.h" -#include "fw-api-sta.h" -#include "fw-api-mac.h" -#include "fw-api-power.h" -#include "fw-api-d3.h" -#include "fw-api-coex.h" -#include "fw-api-scan.h" -#include "fw-api-stats.h" -#include "fw-api-tof.h" - -/* Tx queue numbers for non-DQA mode */ -enum { - IWL_MVM_OFFCHANNEL_QUEUE = 8, - IWL_MVM_CMD_QUEUE = 9, -}; - -/* - * DQA queue numbers - * - * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW - * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames - * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames - * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames - * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure - * that we are never left without the possibility to connect to an AP. - * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. - * Each MGMT queue is mapped to a single STA - * MGMT frames are frames that return true on ieee80211_is_mgmt() - * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames - * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe - * responses - * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. - * DATA frames are intended for !ieee80211_is_mgmt() frames, but if - * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues - * as well - * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames - */ -enum iwl_mvm_dqa_txq { - IWL_MVM_DQA_CMD_QUEUE = 0, - IWL_MVM_DQA_AUX_QUEUE = 1, - IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, - IWL_MVM_DQA_GCAST_QUEUE = 3, - IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, - IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, - IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, - IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, - IWL_MVM_DQA_MIN_DATA_QUEUE = 10, - IWL_MVM_DQA_MAX_DATA_QUEUE = 31, -}; - -enum iwl_mvm_tx_fifo { - IWL_MVM_TX_FIFO_BK = 0, - IWL_MVM_TX_FIFO_BE, - IWL_MVM_TX_FIFO_VI, - IWL_MVM_TX_FIFO_VO, - IWL_MVM_TX_FIFO_MCAST = 5, - IWL_MVM_TX_FIFO_CMD = 7, -}; - - -/** - * enum iwl_legacy_cmds - legacy group command IDs - */ -enum iwl_legacy_cmds { - /** - * @MVM_ALIVE: - * Alive data from the firmware, as described in - * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp. - */ - MVM_ALIVE = 0x1, - - /** - * @REPLY_ERROR: Cause an error in the firmware, for testing purposes. - */ - REPLY_ERROR = 0x2, - - /** - * @ECHO_CMD: Send data to the device to have it returned immediately. - */ - ECHO_CMD = 0x3, - - /** - * @INIT_COMPLETE_NOTIF: Notification that initialization is complete. - */ - INIT_COMPLETE_NOTIF = 0x4, - - /** - * @PHY_CONTEXT_CMD: - * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd. - */ - PHY_CONTEXT_CMD = 0x8, - - /** - * @DBG_CFG: Debug configuration command. - */ - DBG_CFG = 0x9, - - /** - * @ANTENNA_COUPLING_NOTIFICATION: - * Antenna coupling data, &struct iwl_mvm_antenna_coupling_notif - */ - ANTENNA_COUPLING_NOTIFICATION = 0xa, - - /** - * @SCAN_ITERATION_COMPLETE_UMAC: - * Firmware indicates a scan iteration completed, using - * &struct iwl_umac_scan_iter_complete_notif. - */ - SCAN_ITERATION_COMPLETE_UMAC = 0xb5, - - /** - * @SCAN_CFG_CMD: - * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config - */ - SCAN_CFG_CMD = 0xc, - - /** - * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac - */ - SCAN_REQ_UMAC = 0xd, - - /** - * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort - */ - SCAN_ABORT_UMAC = 0xe, - - /** - * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete - */ - SCAN_COMPLETE_UMAC = 0xf, - - /** - * @BA_WINDOW_STATUS_NOTIFICATION_ID: - * uses &struct iwl_ba_window_status_notif - */ - BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13, - - /** - * @ADD_STA_KEY: - * &struct iwl_mvm_add_sta_key_cmd_v1 or - * &struct iwl_mvm_add_sta_key_cmd. - */ - ADD_STA_KEY = 0x17, - - /** - * @ADD_STA: - * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7. - */ - ADD_STA = 0x18, - - /** - * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd - */ - REMOVE_STA = 0x19, - - /** - * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd - */ - FW_GET_ITEM_CMD = 0x1a, - - /** - * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2, - * response in &struct iwl_mvm_tx_resp or - * &struct iwl_mvm_tx_resp_v3 - */ - TX_CMD = 0x1c, - - /** - * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd - */ - TXPATH_FLUSH = 0x1e, - - /** - * @MGMT_MCAST_KEY: - * &struct iwl_mvm_mgmt_mcast_key_cmd or - * &struct iwl_mvm_mgmt_mcast_key_cmd_v1 - */ - MGMT_MCAST_KEY = 0x1f, - - /* scheduler config */ - /** - * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware, - * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp - * for newer (A000) hardware. - */ - SCD_QUEUE_CFG = 0x1d, - - /** - * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd - */ - WEP_KEY = 0x20, - - /** - * @SHARED_MEM_CFG: - * retrieve shared memory configuration - response in - * &struct iwl_shared_mem_cfg - */ - SHARED_MEM_CFG = 0x25, - - /** - * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd - */ - TDLS_CHANNEL_SWITCH_CMD = 0x27, - - /** - * @TDLS_CHANNEL_SWITCH_NOTIFICATION: - * uses &struct iwl_tdls_channel_switch_notif - */ - TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, - - /** - * @TDLS_CONFIG_CMD: - * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res - */ - TDLS_CONFIG_CMD = 0xa7, - - /** - * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd - */ - MAC_CONTEXT_CMD = 0x28, - - /** - * @TIME_EVENT_CMD: - * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp - */ - TIME_EVENT_CMD = 0x29, /* both CMD and response */ - - /** - * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif - */ - TIME_EVENT_NOTIFICATION = 0x2a, - - /** - * @BINDING_CONTEXT_CMD: - * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1 - */ - BINDING_CONTEXT_CMD = 0x2b, - - /** - * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd - */ - TIME_QUOTA_CMD = 0x2c, - - /** - * @NON_QOS_TX_COUNTER_CMD: - * command is &struct iwl_nonqos_seq_query_cmd - */ - NON_QOS_TX_COUNTER_CMD = 0x2d, - - /** - * @LQ_CMD: using &struct iwl_lq_cmd - */ - LQ_CMD = 0x4e, - - /** - * @FW_PAGING_BLOCK_CMD: - * &struct iwl_fw_paging_cmd - */ - FW_PAGING_BLOCK_CMD = 0x4f, - - /** - * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac - */ - SCAN_OFFLOAD_REQUEST_CMD = 0x51, - - /** - * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents - */ - SCAN_OFFLOAD_ABORT_CMD = 0x52, - - /** - * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req - */ - HOT_SPOT_CMD = 0x53, - - /** - * @SCAN_OFFLOAD_COMPLETE: - * notification, &struct iwl_periodic_scan_complete - */ - SCAN_OFFLOAD_COMPLETE = 0x6D, - - /** - * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD: - * update scan offload (scheduled scan) profiles/blacklist/etc. - */ - SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, - - /** - * @MATCH_FOUND_NOTIFICATION: scan match found - */ - MATCH_FOUND_NOTIFICATION = 0xd9, - - /** - * @SCAN_ITERATION_COMPLETE: - * uses &struct iwl_lmac_scan_complete_notif - */ - SCAN_ITERATION_COMPLETE = 0xe7, - - /* Phy */ - /** - * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd - */ - PHY_CONFIGURATION_CMD = 0x6a, - - /** - * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db - */ - CALIB_RES_NOTIF_PHY_DB = 0x6b, - - /** - * @PHY_DB_CMD: &struct iwl_phy_db_cmd - */ - PHY_DB_CMD = 0x6c, - - /** - * @TOF_CMD: &struct iwl_tof_config_cmd - */ - TOF_CMD = 0x10, - - /** - * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd - */ - TOF_NOTIFICATION = 0x11, - - /** - * @POWER_TABLE_CMD: &struct iwl_device_power_cmd - */ - POWER_TABLE_CMD = 0x77, - - /** - * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION: - * &struct iwl_uapsd_misbehaving_ap_notif - */ - PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, - - /** - * @LTR_CONFIG: &struct iwl_ltr_config_cmd - */ - LTR_CONFIG = 0xee, - - /** - * @REPLY_THERMAL_MNG_BACKOFF: - * Thermal throttling command - */ - REPLY_THERMAL_MNG_BACKOFF = 0x7e, - - /** - * @DC2DC_CONFIG_CMD: - * Set/Get DC2DC frequency tune - * Command is &struct iwl_dc2dc_config_cmd, - * response is &struct iwl_dc2dc_config_resp - */ - DC2DC_CONFIG_CMD = 0x83, - - /** - * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd - */ - NVM_ACCESS_CMD = 0x88, - - /** - * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif - */ - BEACON_NOTIFICATION = 0x90, - - /** - * @BEACON_TEMPLATE_CMD: - * Uses one of &struct iwl_mac_beacon_cmd_v6, - * &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd - * depending on the device version. - */ - BEACON_TEMPLATE_CMD = 0x91, - /** - * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd - */ - TX_ANT_CONFIGURATION_CMD = 0x98, - - /** - * @STATISTICS_CMD: - * one of &struct iwl_statistics_cmd, - * &struct iwl_notif_statistics_v11, - * &struct iwl_notif_statistics_v10, - * &struct iwl_notif_statistics_cdb - */ - STATISTICS_CMD = 0x9c, - - /** - * @STATISTICS_NOTIFICATION: - * one of &struct iwl_notif_statistics_v10, - * &struct iwl_notif_statistics_v11, - * &struct iwl_notif_statistics_cdb - */ - STATISTICS_NOTIFICATION = 0x9d, - - /** - * @EOSP_NOTIFICATION: - * Notify that a service period ended, - * &struct iwl_mvm_eosp_notification - */ - EOSP_NOTIFICATION = 0x9e, - - /** - * @REDUCE_TX_POWER_CMD: - * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd - */ - REDUCE_TX_POWER_CMD = 0x9f, - - /** - * @CARD_STATE_NOTIFICATION: - * Card state (RF/CT kill) notification, - * uses &struct iwl_card_state_notif - */ - CARD_STATE_NOTIFICATION = 0xa1, - - /** - * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif - */ - MISSED_BEACONS_NOTIFICATION = 0xa2, - - /** - * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd - */ - MAC_PM_POWER_TABLE = 0xa9, - - /** - * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif - */ - MFUART_LOAD_NOTIFICATION = 0xb1, - - /** - * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd - */ - RSS_CONFIG_CMD = 0xb3, - - /** - * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info - */ - REPLY_RX_PHY_CMD = 0xc0, - - /** - * @REPLY_RX_MPDU_CMD: - * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc - */ - REPLY_RX_MPDU_CMD = 0xc1, - - /** - * @FRAME_RELEASE: - * Frame release (reorder helper) notification, uses - * &struct iwl_frame_release - */ - FRAME_RELEASE = 0xc3, - - /** - * @BA_NOTIF: - * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif - * or &struct iwl_mvm_ba_notif depending on the HW - */ - BA_NOTIF = 0xc5, - - /* Location Aware Regulatory */ - /** - * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd - */ - MCC_UPDATE_CMD = 0xc8, - - /** - * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif - */ - MCC_CHUB_UPDATE_CMD = 0xc9, - - /** - * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker - */ - MARKER_CMD = 0xcb, - - /** - * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif - */ - BT_PROFILE_NOTIFICATION = 0xce, - - /** - * @BT_CONFIG: &struct iwl_bt_coex_cmd - */ - BT_CONFIG = 0x9b, - - /** - * @BT_COEX_UPDATE_CORUN_LUT: - * &struct iwl_bt_coex_corun_lut_update_cmd - */ - BT_COEX_UPDATE_CORUN_LUT = 0x5b, - - /** - * @BT_COEX_UPDATE_REDUCED_TXP: - * &struct iwl_bt_coex_reduced_txp_update_cmd - */ - BT_COEX_UPDATE_REDUCED_TXP = 0x5c, - - /** - * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd - */ - BT_COEX_CI = 0x5d, - - /** - * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd - */ - REPLY_SF_CFG_CMD = 0xd1, - /** - * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd - */ - REPLY_BEACON_FILTERING_CMD = 0xd2, - - /** - * @DTS_MEASUREMENT_NOTIFICATION: - * &struct iwl_dts_measurement_notif_v1 or - * &struct iwl_dts_measurement_notif_v2 - */ - DTS_MEASUREMENT_NOTIFICATION = 0xdd, - - /** - * @LDBG_CONFIG_CMD: configure continuous trace recording - */ - LDBG_CONFIG_CMD = 0xf6, - - /** - * @DEBUG_LOG_MSG: Debugging log data from firmware - */ - DEBUG_LOG_MSG = 0xf7, - - /** - * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd - */ - BCAST_FILTER_CMD = 0xcf, - - /** - * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd - */ - MCAST_FILTER_CMD = 0xd0, - - /** - * @D3_CONFIG_CMD: &struct iwl_d3_manager_config - */ - D3_CONFIG_CMD = 0xd3, - - /** - * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of - * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2, - * &struct iwl_proto_offload_cmd_v3_small, - * &struct iwl_proto_offload_cmd_v3_large - */ - PROT_OFFLOAD_CONFIG_CMD = 0xd4, - - /** - * @OFFLOADS_QUERY_CMD: - * No data in command, response in &struct iwl_wowlan_status - */ - OFFLOADS_QUERY_CMD = 0xd5, - - /** - * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config - */ - REMOTE_WAKE_CONFIG_CMD = 0xd6, - - /** - * @D0I3_END_CMD: End D0i3/D3 state, no command data - */ - D0I3_END_CMD = 0xed, - - /** - * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd - */ - WOWLAN_PATTERNS = 0xe0, - - /** - * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd - */ - WOWLAN_CONFIGURATION = 0xe1, - - /** - * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd - */ - WOWLAN_TSC_RSC_PARAM = 0xe2, - - /** - * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd - */ - WOWLAN_TKIP_PARAM = 0xe3, - - /** - * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd - */ - WOWLAN_KEK_KCK_MATERIAL = 0xe4, - - /** - * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status - */ - WOWLAN_GET_STATUSES = 0xe5, - - /** - * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: - * No command data, response is &struct iwl_scan_offload_profiles_query - */ - SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, -}; - -/* Please keep this enum *SORTED* by hex value. - * Needed for binary search, otherwise a warning will be triggered. - */ -enum iwl_mac_conf_subcmd_ids { - LINK_QUALITY_MEASUREMENT_CMD = 0x1, - LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE, - CHANNEL_SWITCH_NOA_NOTIF = 0xFF, -}; - -/** - * enum iwl_phy_ops_subcmd_ids - PHY group commands - */ -enum iwl_phy_ops_subcmd_ids { - /** - * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE: - * Uses either &struct iwl_dts_measurement_cmd or - * &struct iwl_ext_dts_measurement_cmd - */ - CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, - - /** - * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd - */ - CTDP_CONFIG_CMD = 0x03, - - /** - * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd - */ - TEMP_REPORTING_THRESHOLDS_CMD = 0x04, - - /** - * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd - */ - GEO_TX_POWER_LIMIT = 0x05, - - /** - * @CT_KILL_NOTIFICATION: &struct ct_kill_notif - */ - CT_KILL_NOTIFICATION = 0xFE, - - /** - * @DTS_MEASUREMENT_NOTIF_WIDE: - * &struct iwl_dts_measurement_notif_v1 or - * &struct iwl_dts_measurement_notif_v2 - */ - DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, -}; - -/** - * enum iwl_system_subcmd_ids - system group command IDs - */ -enum iwl_system_subcmd_ids { - /** - * @SHARED_MEM_CFG_CMD: - * response in &struct iwl_shared_mem_cfg or - * &struct iwl_shared_mem_cfg_v2 - */ - SHARED_MEM_CFG_CMD = 0x0, - - /** - * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd - */ - INIT_EXTENDED_CFG_CMD = 0x03, -}; - -/** - * enum iwl_data_path_subcmd_ids - data path group commands - */ -enum iwl_data_path_subcmd_ids { - /** - * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd - */ - DQA_ENABLE_CMD = 0x0, - - /** - * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd - */ - UPDATE_MU_GROUPS_CMD = 0x1, - - /** - * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd - */ - TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, - - /** - * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification - */ - STA_PM_NOTIF = 0xFD, - - /** - * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif - */ - MU_GROUP_MGMT_NOTIF = 0xFE, - - /** - * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification - */ - RX_QUEUES_NOTIFICATION = 0xFF, -}; - -/** - * enum iwl_prot_offload_subcmd_ids - protocol offload commands - */ -enum iwl_prot_offload_subcmd_ids { - /** - * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif - */ - STORED_BEACON_NTF = 0xFF, -}; - -/** - * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands - */ -enum iwl_regulatory_and_nvm_subcmd_ids { - /** - * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd - */ - NVM_ACCESS_COMPLETE = 0x0, - - /** - * @NVM_GET_INFO: - * Command is &struct iwl_nvm_get_info, - * response is &struct iwl_nvm_get_info_rsp - */ - NVM_GET_INFO = 0x2, -}; - -/** - * enum iwl_debug_cmds - debug commands - */ -enum iwl_debug_cmds { - /** - * @LMAC_RD_WR: - * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and - * &struct iwl_dbg_mem_access_rsp - */ - LMAC_RD_WR = 0x0, - /** - * @UMAC_RD_WR: - * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and - * &struct iwl_dbg_mem_access_rsp - */ - UMAC_RD_WR = 0x1, - /** - * @MFU_ASSERT_DUMP_NTF: - * &struct iwl_mfu_assert_dump_notif - */ - MFU_ASSERT_DUMP_NTF = 0xFE, -}; - -/** - * enum iwl_mvm_command_groups - command groups for the firmware - * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds - * @LONG_GROUP: legacy group with long header, also uses command IDs - * from &enum iwl_legacy_cmds - * @SYSTEM_GROUP: system group, uses command IDs from - * &enum iwl_system_subcmd_ids - * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from - * &enum iwl_mac_conf_subcmd_ids - * @PHY_OPS_GROUP: PHY operations group, uses command IDs from - * &enum iwl_phy_ops_subcmd_ids - * @DATA_PATH_GROUP: data path group, uses command IDs from - * &enum iwl_data_path_subcmd_ids - * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids - * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids - * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from - * &enum iwl_prot_offload_subcmd_ids - * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from - * &enum iwl_regulatory_and_nvm_subcmd_ids - * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds - */ -enum iwl_mvm_command_groups { - LEGACY_GROUP = 0x0, - LONG_GROUP = 0x1, - SYSTEM_GROUP = 0x2, - MAC_CONF_GROUP = 0x3, - PHY_OPS_GROUP = 0x4, - DATA_PATH_GROUP = 0x5, - PROT_OFFLOAD_GROUP = 0xb, - REGULATORY_AND_NVM_GROUP = 0xc, - DEBUG_GROUP = 0xf, -}; - -/** - * struct iwl_cmd_response - generic response struct for most commands - * @status: status of the command asked, changes for each one - */ -struct iwl_cmd_response { - __le32 status; -}; - -/* - * struct iwl_dqa_enable_cmd - * @cmd_queue: the TXQ number of the command queue - */ -struct iwl_dqa_enable_cmd { - __le32 cmd_queue; -} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */ - -/* - * struct iwl_tx_ant_cfg_cmd - * @valid: valid antenna configuration - */ -struct iwl_tx_ant_cfg_cmd { - __le32 valid; -} __packed; - -/** - * struct iwl_calib_ctrl - Calibration control struct. - * Sent as part of the phy configuration command. - * @flow_trigger: bitmap for which calibrations to perform according to - * flow triggers, using &enum iwl_calib_cfg - * @event_trigger: bitmap for which calibrations to perform according to - * event triggers, using &enum iwl_calib_cfg - */ -struct iwl_calib_ctrl { - __le32 flow_trigger; - __le32 event_trigger; -} __packed; - -/* This enum defines the bitmap of various calibrations to enable in both - * init ucode and runtime ucode through CALIBRATION_CFG_CMD. - */ -enum iwl_calib_cfg { - IWL_CALIB_CFG_XTAL_IDX = BIT(0), - IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1), - IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2), - IWL_CALIB_CFG_PAPD_IDX = BIT(3), - IWL_CALIB_CFG_TX_PWR_IDX = BIT(4), - IWL_CALIB_CFG_DC_IDX = BIT(5), - IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6), - IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7), - IWL_CALIB_CFG_TX_IQ_IDX = BIT(8), - IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9), - IWL_CALIB_CFG_RX_IQ_IDX = BIT(10), - IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11), - IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12), - IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13), - IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14), - IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15), - IWL_CALIB_CFG_DAC_IDX = BIT(16), - IWL_CALIB_CFG_ABS_IDX = BIT(17), - IWL_CALIB_CFG_AGC_IDX = BIT(18), -}; - -/** - * struct iwl_phy_cfg_cmd - Phy configuration command - * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg - * @calib_control: calibration control data - */ -struct iwl_phy_cfg_cmd { - __le32 phy_cfg; - struct iwl_calib_ctrl calib_control; -} __packed; - -#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1)) -#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3)) -#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5)) -#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7)) -#define PHY_CFG_TX_CHAIN_A BIT(8) -#define PHY_CFG_TX_CHAIN_B BIT(9) -#define PHY_CFG_TX_CHAIN_C BIT(10) -#define PHY_CFG_RX_CHAIN_A BIT(12) -#define PHY_CFG_RX_CHAIN_B BIT(13) -#define PHY_CFG_RX_CHAIN_C BIT(14) - - -/** - * enum iwl_nvm_access_op - NVM access opcode - * @IWL_NVM_READ: read NVM - * @IWL_NVM_WRITE: write NVM - */ -enum iwl_nvm_access_op { - IWL_NVM_READ = 0, - IWL_NVM_WRITE = 1, -}; - -/** - * enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD - * @NVM_ACCESS_TARGET_CACHE: access the cache - * @NVM_ACCESS_TARGET_OTP: access the OTP - * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM - */ -enum iwl_nvm_access_target { - NVM_ACCESS_TARGET_CACHE = 0, - NVM_ACCESS_TARGET_OTP = 1, - NVM_ACCESS_TARGET_EEPROM = 2, -}; - -/** - * enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD - * @NVM_SECTION_TYPE_SW: software section - * @NVM_SECTION_TYPE_REGULATORY: regulatory section - * @NVM_SECTION_TYPE_CALIBRATION: calibration section - * @NVM_SECTION_TYPE_PRODUCTION: production section - * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section - * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section - * @NVM_MAX_NUM_SECTIONS: number of sections - */ -enum iwl_nvm_section_type { - NVM_SECTION_TYPE_SW = 1, - NVM_SECTION_TYPE_REGULATORY = 3, - NVM_SECTION_TYPE_CALIBRATION = 4, - NVM_SECTION_TYPE_PRODUCTION = 5, - NVM_SECTION_TYPE_MAC_OVERRIDE = 11, - NVM_SECTION_TYPE_PHY_SKU = 12, - NVM_MAX_NUM_SECTIONS = 13, -}; - -/** - * struct iwl_nvm_access_cmd - Request the device to send an NVM section - * @op_code: &enum iwl_nvm_access_op - * @target: &enum iwl_nvm_access_target - * @type: &enum iwl_nvm_section_type - * @offset: offset in bytes into the section - * @length: in bytes, to read/write - * @data: if write operation, the data to write. On read its empty - */ -struct iwl_nvm_access_cmd { - u8 op_code; - u8 target; - __le16 type; - __le16 offset; - __le16 length; - u8 data[]; -} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ - -#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ - -/** - * struct iwl_fw_paging_cmd - paging layout - * - * (FW_PAGING_BLOCK_CMD = 0x4f) - * - * Send to FW the paging layout in the driver. - * - * @flags: various flags for the command - * @block_size: the block size in powers of 2 - * @block_num: number of blocks specified in the command. - * @device_phy_addr: virtual addresses from device side - */ -struct iwl_fw_paging_cmd { - __le32 flags; - __le32 block_size; - __le32 block_num; - __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; -} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ - -/* - * Fw items ID's - * - * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload - * download - */ -enum iwl_fw_item_id { - IWL_FW_ITEM_ID_PAGING = 3, -}; - -/* - * struct iwl_fw_get_item_cmd - get an item from the fw - */ -struct iwl_fw_get_item_cmd { - __le32 item_id; -} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ - -#define CONT_REC_COMMAND_SIZE 80 -#define ENABLE_CONT_RECORDING 0x15 -#define DISABLE_CONT_RECORDING 0x16 - -/* - * struct iwl_continuous_record_mode - recording mode - */ -struct iwl_continuous_record_mode { - __le16 enable_recording; -} __packed; - -/* - * struct iwl_continuous_record_cmd - enable/disable continuous recording - */ -struct iwl_continuous_record_cmd { - struct iwl_continuous_record_mode record_mode; - u8 pad[CONT_REC_COMMAND_SIZE - - sizeof(struct iwl_continuous_record_mode)]; -} __packed; - -struct iwl_fw_get_item_resp { - __le32 item_id; - __le32 item_byte_cnt; - __le32 item_val; -} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ - -/** - * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD - * @offset: offset in bytes into the section - * @length: in bytes, either how much was written or read - * @type: NVM_SECTION_TYPE_* - * @status: 0 for success, fail otherwise - * @data: if read operation, the data returned. Empty on write. - */ -struct iwl_nvm_access_resp { - __le16 offset; - __le16 length; - __le16 type; - __le16 status; - u8 data[]; -} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */ - -/* MVM_ALIVE 0x1 */ - -/* alive response is_valid values */ -#define ALIVE_RESP_UCODE_OK BIT(0) -#define ALIVE_RESP_RFKILL BIT(1) - -/* alive response ver_type values */ -enum { - FW_TYPE_HW = 0, - FW_TYPE_PROT = 1, - FW_TYPE_AP = 2, - FW_TYPE_WOWLAN = 3, - FW_TYPE_TIMING = 4, - FW_TYPE_WIPAN = 5 -}; - -/* alive response ver_subtype values */ -enum { - FW_SUBTYPE_FULL_FEATURE = 0, - FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */ - FW_SUBTYPE_REDUCED = 2, - FW_SUBTYPE_ALIVE_ONLY = 3, - FW_SUBTYPE_WOWLAN = 4, - FW_SUBTYPE_AP_SUBTYPE = 5, - FW_SUBTYPE_WIPAN = 6, - FW_SUBTYPE_INITIALIZE = 9 -}; - -#define IWL_ALIVE_STATUS_ERR 0xDEAD -#define IWL_ALIVE_STATUS_OK 0xCAFE - -#define IWL_ALIVE_FLG_RFKILL BIT(0) - -struct iwl_lmac_alive { - __le32 ucode_minor; - __le32 ucode_major; - u8 ver_subtype; - u8 ver_type; - u8 mac; - u8 opt; - __le32 timestamp; - __le32 error_event_table_ptr; /* SRAM address for error log */ - __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ - __le32 cpu_register_ptr; - __le32 dbgm_config_ptr; - __le32 alive_counter_ptr; - __le32 scd_base_ptr; /* SRAM address for SCD */ - __le32 st_fwrd_addr; /* pointer to Store and forward */ - __le32 st_fwrd_size; -} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ - -struct iwl_umac_alive { - __le32 umac_minor; /* UMAC version: minor */ - __le32 umac_major; /* UMAC version: major */ - __le32 error_info_addr; /* SRAM address for UMAC error log */ - __le32 dbg_print_buff_addr; -} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ - -struct mvm_alive_resp_v3 { - __le16 status; - __le16 flags; - struct iwl_lmac_alive lmac_data; - struct iwl_umac_alive umac_data; -} __packed; /* ALIVE_RES_API_S_VER_3 */ - -struct mvm_alive_resp { - __le16 status; - __le16 flags; - struct iwl_lmac_alive lmac_data[2]; - struct iwl_umac_alive umac_data; -} __packed; /* ALIVE_RES_API_S_VER_4 */ - -/* Error response/notification */ -enum { - FW_ERR_UNKNOWN_CMD = 0x0, - FW_ERR_INVALID_CMD_PARAM = 0x1, - FW_ERR_SERVICE = 0x2, - FW_ERR_ARC_MEMORY = 0x3, - FW_ERR_ARC_CODE = 0x4, - FW_ERR_WATCH_DOG = 0x5, - FW_ERR_WEP_GRP_KEY_INDX = 0x10, - FW_ERR_WEP_KEY_SIZE = 0x11, - FW_ERR_OBSOLETE_FUNC = 0x12, - FW_ERR_UNEXPECTED = 0xFE, - FW_ERR_FATAL = 0xFF -}; - -/** - * struct iwl_error_resp - FW error indication - * ( REPLY_ERROR = 0x2 ) - * @error_type: one of FW_ERR_* - * @cmd_id: the command ID for which the error occured - * @reserved1: reserved - * @bad_cmd_seq_num: sequence number of the erroneous command - * @error_service: which service created the error, applicable only if - * error_type = 2, otherwise 0 - * @timestamp: TSF in usecs. - */ -struct iwl_error_resp { - __le32 error_type; - u8 cmd_id; - u8 reserved1; - __le16 bad_cmd_seq_num; - __le32 error_service; - __le64 timestamp; -} __packed; - - -/* Common PHY, MAC and Bindings definitions */ -#define MAX_MACS_IN_BINDING (3) -#define MAX_BINDINGS (4) - -/** - * enum iwl_mvm_id_and_color - ID and color fields in context dword - * @FW_CTXT_ID_POS: position of the ID - * @FW_CTXT_ID_MSK: mask of the ID - * @FW_CTXT_COLOR_POS: position of the color - * @FW_CTXT_COLOR_MSK: mask of the color - * @FW_CTXT_INVALID: value used to indicate unused/invalid - */ -enum iwl_mvm_id_and_color { - FW_CTXT_ID_POS = 0, - FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS, - FW_CTXT_COLOR_POS = 8, - FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS, - FW_CTXT_INVALID = 0xffffffff, -}; - -#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\ - (_color << FW_CTXT_COLOR_POS)) - -/* Possible actions on PHYs, MACs and Bindings */ -enum iwl_phy_ctxt_action { - FW_CTXT_ACTION_STUB = 0, - FW_CTXT_ACTION_ADD, - FW_CTXT_ACTION_MODIFY, - FW_CTXT_ACTION_REMOVE, - FW_CTXT_ACTION_NUM -}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ - -/* Time Events */ - -/* Time Event types, according to MAC type */ -enum iwl_time_event_type { - /* BSS Station Events */ - TE_BSS_STA_AGGRESSIVE_ASSOC, - TE_BSS_STA_ASSOC, - TE_BSS_EAP_DHCP_PROT, - TE_BSS_QUIET_PERIOD, - - /* P2P Device Events */ - TE_P2P_DEVICE_DISCOVERABLE, - TE_P2P_DEVICE_LISTEN, - TE_P2P_DEVICE_ACTION_SCAN, - TE_P2P_DEVICE_FULL_SCAN, - - /* P2P Client Events */ - TE_P2P_CLIENT_AGGRESSIVE_ASSOC, - TE_P2P_CLIENT_ASSOC, - TE_P2P_CLIENT_QUIET_PERIOD, - - /* P2P GO Events */ - TE_P2P_GO_ASSOC_PROT, - TE_P2P_GO_REPETITIVET_NOA, - TE_P2P_GO_CT_WINDOW, - - /* WiDi Sync Events */ - TE_WIDI_TX_SYNC, - - /* Channel Switch NoA */ - TE_CHANNEL_SWITCH_PERIOD, - - TE_MAX -}; /* MAC_EVENT_TYPE_API_E_VER_1 */ - - - -/* Time event - defines for command API v1 */ - -/* - * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed. - * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only - * the first fragment is scheduled. - * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only - * the first 2 fragments are scheduled. - * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any - * number of fragments are valid. - * - * Other than the constant defined above, specifying a fragmentation value 'x' - * means that the event can be fragmented but only the first 'x' will be - * scheduled. - */ -enum { - TE_V1_FRAG_NONE = 0, - TE_V1_FRAG_SINGLE = 1, - TE_V1_FRAG_DUAL = 2, - TE_V1_FRAG_ENDLESS = 0xffffffff -}; - -/* If a Time Event can be fragmented, this is the max number of fragments */ -#define TE_V1_FRAG_MAX_MSK 0x0fffffff -/* Repeat the time event endlessly (until removed) */ -#define TE_V1_REPEAT_ENDLESS 0xffffffff -/* If a Time Event has bounded repetitions, this is the maximal value */ -#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff - -/* Time Event dependencies: none, on another TE, or in a specific time */ -enum { - TE_V1_INDEPENDENT = 0, - TE_V1_DEP_OTHER = BIT(0), - TE_V1_DEP_TSF = BIT(1), - TE_V1_EVENT_SOCIOPATHIC = BIT(2), -}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ - -/* - * @TE_V1_NOTIF_NONE: no notifications - * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start - * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end - * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use - * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use. - * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start - * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end - * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use. - * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use. - * - * Supported Time event notifications configuration. - * A notification (both event and fragment) includes a status indicating weather - * the FW was able to schedule the event or not. For fragment start/end - * notification the status is always success. There is no start/end fragment - * notification for monolithic events. - */ -enum { - TE_V1_NOTIF_NONE = 0, - TE_V1_NOTIF_HOST_EVENT_START = BIT(0), - TE_V1_NOTIF_HOST_EVENT_END = BIT(1), - TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2), - TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3), - TE_V1_NOTIF_HOST_FRAG_START = BIT(4), - TE_V1_NOTIF_HOST_FRAG_END = BIT(5), - TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6), - TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), -}; /* MAC_EVENT_ACTION_API_E_VER_2 */ - -/* Time event - defines for command API */ - -/* - * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. - * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only - * the first fragment is scheduled. - * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only - * the first 2 fragments are scheduled. - * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any - * number of fragments are valid. - * - * Other than the constant defined above, specifying a fragmentation value 'x' - * means that the event can be fragmented but only the first 'x' will be - * scheduled. - */ -enum { - TE_V2_FRAG_NONE = 0, - TE_V2_FRAG_SINGLE = 1, - TE_V2_FRAG_DUAL = 2, - TE_V2_FRAG_MAX = 0xfe, - TE_V2_FRAG_ENDLESS = 0xff -}; - -/* Repeat the time event endlessly (until removed) */ -#define TE_V2_REPEAT_ENDLESS 0xff -/* If a Time Event has bounded repetitions, this is the maximal value */ -#define TE_V2_REPEAT_MAX 0xfe - -#define TE_V2_PLACEMENT_POS 12 -#define TE_V2_ABSENCE_POS 15 - -/** - * enum iwl_time_event_policy - Time event policy values - * A notification (both event and fragment) includes a status indicating weather - * the FW was able to schedule the event or not. For fragment start/end - * notification the status is always success. There is no start/end fragment - * notification for monolithic events. - * - * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable - * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start - * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end - * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use - * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use. - * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start - * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end - * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. - * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. - * @T2_V2_START_IMMEDIATELY: start time event immediately - * @TE_V2_DEP_OTHER: depends on another time event - * @TE_V2_DEP_TSF: depends on a specific time - * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC - * @TE_V2_ABSENCE: are we present or absent during the Time Event. - */ -enum iwl_time_event_policy { - TE_V2_DEFAULT_POLICY = 0x0, - - /* notifications (event start/stop, fragment start/stop) */ - TE_V2_NOTIF_HOST_EVENT_START = BIT(0), - TE_V2_NOTIF_HOST_EVENT_END = BIT(1), - TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2), - TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3), - - TE_V2_NOTIF_HOST_FRAG_START = BIT(4), - TE_V2_NOTIF_HOST_FRAG_END = BIT(5), - TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), - TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), - T2_V2_START_IMMEDIATELY = BIT(11), - - /* placement characteristics */ - TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), - TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1), - TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2), - - /* are we present or absent during the Time Event. */ - TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS), -}; - -/** - * struct iwl_time_event_cmd - configuring Time Events - * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also - * with version 1. determined by IWL_UCODE_TLV_FLAGS) - * ( TIME_EVENT_CMD = 0x29 ) - * @id_and_color: ID and color of the relevant MAC, - * &enum iwl_mvm_id_and_color - * @action: action to perform, one of &enum iwl_phy_ctxt_action - * @id: this field has two meanings, depending on the action: - * If the action is ADD, then it means the type of event to add. - * For all other actions it is the unique event ID assigned when the - * event was added by the FW. - * @apply_time: When to start the Time Event (in GP2) - * @max_delay: maximum delay to event's start (apply time), in TU - * @depends_on: the unique ID of the event we depend on (if any) - * @interval: interval between repetitions, in TU - * @duration: duration of event in TU - * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS - * @max_frags: maximal number of fragments the Time Event can be divided to - * @policy: defines whether uCode shall notify the host or other uCode modules - * on event and/or fragment start and/or end - * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF - * TE_EVENT_SOCIOPATHIC - * using TE_ABSENCE and using TE_NOTIF_*, - * &enum iwl_time_event_policy - */ -struct iwl_time_event_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - __le32 id; - /* MAC_TIME_EVENT_DATA_API_S_VER_2 */ - __le32 apply_time; - __le32 max_delay; - __le32 depends_on; - __le32 interval; - __le32 duration; - u8 repeat; - u8 max_frags; - __le16 policy; -} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */ - -/** - * struct iwl_time_event_resp - response structure to iwl_time_event_cmd - * @status: bit 0 indicates success, all others specify errors - * @id: the Time Event type - * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE - * @id_and_color: ID and color of the relevant MAC, - * &enum iwl_mvm_id_and_color - */ -struct iwl_time_event_resp { - __le32 status; - __le32 id; - __le32 unique_id; - __le32 id_and_color; -} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */ - -/** - * struct iwl_time_event_notif - notifications of time event start/stop - * ( TIME_EVENT_NOTIFICATION = 0x2a ) - * @timestamp: action timestamp in GP2 - * @session_id: session's unique id - * @unique_id: unique id of the Time Event itself - * @id_and_color: ID and color of the relevant MAC - * @action: &enum iwl_time_event_policy - * @status: true if scheduled, false otherwise (not executed) - */ -struct iwl_time_event_notif { - __le32 timestamp; - __le32 session_id; - __le32 unique_id; - __le32 id_and_color; - __le32 action; - __le32 status; -} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ - - -/* Bindings and Time Quota */ - -/** - * struct iwl_binding_cmd_v1 - configuring bindings - * ( BINDING_CONTEXT_CMD = 0x2b ) - * @id_and_color: ID and color of the relevant Binding, - * &enum iwl_mvm_id_and_color - * @action: action to perform, one of FW_CTXT_ACTION_* - * @macs: array of MAC id and colors which belong to the binding, - * &enum iwl_mvm_id_and_color - * @phy: PHY id and color which belongs to the binding, - * &enum iwl_mvm_id_and_color - */ -struct iwl_binding_cmd_v1 { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* BINDING_DATA_API_S_VER_1 */ - __le32 macs[MAX_MACS_IN_BINDING]; - __le32 phy; -} __packed; /* BINDING_CMD_API_S_VER_1 */ - -/** - * struct iwl_binding_cmd - configuring bindings - * ( BINDING_CONTEXT_CMD = 0x2b ) - * @id_and_color: ID and color of the relevant Binding, - * &enum iwl_mvm_id_and_color - * @action: action to perform, one of FW_CTXT_ACTION_* - * @macs: array of MAC id and colors which belong to the binding - * &enum iwl_mvm_id_and_color - * @phy: PHY id and color which belongs to the binding - * &enum iwl_mvm_id_and_color - * @lmac_id: the lmac id the binding belongs to - */ -struct iwl_binding_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* BINDING_DATA_API_S_VER_1 */ - __le32 macs[MAX_MACS_IN_BINDING]; - __le32 phy; - __le32 lmac_id; -} __packed; /* BINDING_CMD_API_S_VER_2 */ - -#define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1) -#define IWL_LMAC_24G_INDEX 0 -#define IWL_LMAC_5G_INDEX 1 - -/* The maximal number of fragments in the FW's schedule session */ -#define IWL_MVM_MAX_QUOTA 128 - -/** - * struct iwl_time_quota_data - configuration of time quota per binding - * @id_and_color: ID and color of the relevant Binding, - * &enum iwl_mvm_id_and_color - * @quota: absolute time quota in TU. The scheduler will try to divide the - * remainig quota (after Time Events) according to this quota. - * @max_duration: max uninterrupted context duration in TU - */ -struct iwl_time_quota_data { - __le32 id_and_color; - __le32 quota; - __le32 max_duration; -} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ - -/** - * struct iwl_time_quota_cmd - configuration of time quota between bindings - * ( TIME_QUOTA_CMD = 0x2c ) - * @quotas: allocations per binding - * Note: on non-CDB the fourth one is the auxilary mac and is - * essentially zero. - * On CDB the fourth one is a regular binding. - */ -struct iwl_time_quota_cmd { - struct iwl_time_quota_data quotas[MAX_BINDINGS]; -} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */ - - -/* PHY context */ - -/* Supported bands */ -#define PHY_BAND_5 (0) -#define PHY_BAND_24 (1) - -/* Supported channel width, vary if there is VHT support */ -#define PHY_VHT_CHANNEL_MODE20 (0x0) -#define PHY_VHT_CHANNEL_MODE40 (0x1) -#define PHY_VHT_CHANNEL_MODE80 (0x2) -#define PHY_VHT_CHANNEL_MODE160 (0x3) - -/* - * Control channel position: - * For legacy set bit means upper channel, otherwise lower. - * For VHT - bit-2 marks if the control is lower/upper relative to center-freq - * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. - * center_freq - * | - * 40Mhz |_______|_______| - * 80Mhz |_______|_______|_______|_______| - * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| - * code 011 010 001 000 | 100 101 110 111 - */ -#define PHY_VHT_CTRL_POS_1_BELOW (0x0) -#define PHY_VHT_CTRL_POS_2_BELOW (0x1) -#define PHY_VHT_CTRL_POS_3_BELOW (0x2) -#define PHY_VHT_CTRL_POS_4_BELOW (0x3) -#define PHY_VHT_CTRL_POS_1_ABOVE (0x4) -#define PHY_VHT_CTRL_POS_2_ABOVE (0x5) -#define PHY_VHT_CTRL_POS_3_ABOVE (0x6) -#define PHY_VHT_CTRL_POS_4_ABOVE (0x7) - -/* - * @band: PHY_BAND_* - * @channel: channel number - * @width: PHY_[VHT|LEGACY]_CHANNEL_* - * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* - */ -struct iwl_fw_channel_info { - u8 band; - u8 channel; - u8 width; - u8 ctrl_pos; -} __packed; - -#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) -#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ - (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) -#define PHY_RX_CHAIN_VALID_POS (1) -#define PHY_RX_CHAIN_VALID_MSK \ - (0x7 << PHY_RX_CHAIN_VALID_POS) -#define PHY_RX_CHAIN_FORCE_SEL_POS (4) -#define PHY_RX_CHAIN_FORCE_SEL_MSK \ - (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) -#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) -#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ - (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) -#define PHY_RX_CHAIN_CNT_POS (10) -#define PHY_RX_CHAIN_CNT_MSK \ - (0x3 << PHY_RX_CHAIN_CNT_POS) -#define PHY_RX_CHAIN_MIMO_CNT_POS (12) -#define PHY_RX_CHAIN_MIMO_CNT_MSK \ - (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) -#define PHY_RX_CHAIN_MIMO_FORCE_POS (14) -#define PHY_RX_CHAIN_MIMO_FORCE_MSK \ - (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) - -/* TODO: fix the value, make it depend on firmware at runtime? */ -#define NUM_PHY_CTX 3 - -/* TODO: complete missing documentation */ -/** - * struct iwl_phy_context_cmd - config of the PHY context - * ( PHY_CONTEXT_CMD = 0x8 ) - * @id_and_color: ID and color of the relevant Binding - * @action: action to perform, one of FW_CTXT_ACTION_* - * @apply_time: 0 means immediate apply and context switch. - * other value means apply new params after X usecs - * @tx_param_color: ??? - * @ci: channel info - * @txchain_info: ??? - * @rxchain_info: ??? - * @acquisition_data: ??? - * @dsp_cfg_flags: set to 0 - */ -struct iwl_phy_context_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* PHY_CONTEXT_DATA_API_S_VER_1 */ - __le32 apply_time; - __le32 tx_param_color; - struct iwl_fw_channel_info ci; - __le32 txchain_info; - __le32 rxchain_info; - __le32 acquisition_data; - __le32 dsp_cfg_flags; -} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ - -/* - * Aux ROC command - * - * Command requests the firmware to create a time event for a certain duration - * and remain on the given channel. This is done by using the Aux framework in - * the FW. - * The command was first used for Hot Spot issues - but can be used regardless - * to Hot Spot. - * - * ( HOT_SPOT_CMD 0x53 ) - * - * @id_and_color: ID and color of the MAC - * @action: action to perform, one of FW_CTXT_ACTION_* - * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the - * event_unique_id should be the id of the time event assigned by ucode. - * Otherwise ignore the event_unique_id. - * @sta_id_and_color: station id and color, resumed during "Remain On Channel" - * activity. - * @channel_info: channel info - * @node_addr: Our MAC Address - * @reserved: reserved for alignment - * @apply_time: GP2 value to start (should always be the current GP2 value) - * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max - * time by which start of the event is allowed to be postponed. - * @duration: event duration in TU To calculate event duration: - * timeEventDuration = min(duration, remainingQuota) - */ -struct iwl_hs20_roc_req { - /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ - __le32 id_and_color; - __le32 action; - __le32 event_unique_id; - __le32 sta_id_and_color; - struct iwl_fw_channel_info channel_info; - u8 node_addr[ETH_ALEN]; - __le16 reserved; - __le32 apply_time; - __le32 apply_time_max_delay; - __le32 duration; -} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ - -/* - * values for AUX ROC result values - */ -enum iwl_mvm_hot_spot { - HOT_SPOT_RSP_STATUS_OK, - HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS, - HOT_SPOT_MAX_NUM_OF_SESSIONS, -}; - -/* - * Aux ROC command response - * - * In response to iwl_hs20_roc_req the FW sends this command to notify the - * driver the uid of the timevent. - * - * ( HOT_SPOT_CMD 0x53 ) - * - * @event_unique_id: Unique ID of time event assigned by ucode - * @status: Return status 0 is success, all the rest used for specific errors - */ -struct iwl_hs20_roc_res { - __le32 event_unique_id; - __le32 status; -} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ - -/** - * struct iwl_radio_version_notif - information on the radio version - * ( RADIO_VERSION_NOTIFICATION = 0x68 ) - * @radio_flavor: radio flavor - * @radio_step: radio version step - * @radio_dash: radio version dash - */ -struct iwl_radio_version_notif { - __le32 radio_flavor; - __le32 radio_step; - __le32 radio_dash; -} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */ - -enum iwl_card_state_flags { - CARD_ENABLED = 0x00, - HW_CARD_DISABLED = 0x01, - SW_CARD_DISABLED = 0x02, - CT_KILL_CARD_DISABLED = 0x04, - HALT_CARD_DISABLED = 0x08, - CARD_DISABLED_MSK = 0x0f, - CARD_IS_RX_ON = 0x10, -}; - -/** - * struct iwl_radio_version_notif - information on the radio version - * ( CARD_STATE_NOTIFICATION = 0xa1 ) - * @flags: %iwl_card_state_flags - */ -struct iwl_card_state_notif { - __le32 flags; -} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ - -/** - * struct iwl_missed_beacons_notif - information on missed beacons - * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) - * @mac_id: interface ID - * @consec_missed_beacons_since_last_rx: number of consecutive missed - * beacons since last RX. - * @consec_missed_beacons: number of consecutive missed beacons - * @num_expected_beacons: number of expected beacons - * @num_recvd_beacons: number of received beacons - */ -struct iwl_missed_beacons_notif { - __le32 mac_id; - __le32 consec_missed_beacons_since_last_rx; - __le32 consec_missed_beacons; - __le32 num_expected_beacons; - __le32 num_recvd_beacons; -} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ - -/** - * struct iwl_mfuart_load_notif - mfuart image version & status - * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) - * @installed_ver: installed image version - * @external_ver: external image version - * @status: MFUART loading status - * @duration: MFUART loading time - * @image_size: MFUART image size in bytes -*/ -struct iwl_mfuart_load_notif { - __le32 installed_ver; - __le32 external_ver; - __le32 status; - __le32 duration; - /* image size valid only in v2 of the command */ - __le32 image_size; -} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/ - -/** - * struct iwl_mfu_assert_dump_notif - mfuart dump logs - * ( MFU_ASSERT_DUMP_NTF = 0xfe ) - * @assert_id: mfuart assert id that cause the notif - * @curr_reset_num: number of asserts since uptime - * @index_num: current chunk id - * @parts_num: total number of chunks - * @data_size: number of data bytes sent - * @data: data buffer - */ -struct iwl_mfu_assert_dump_notif { - __le32 assert_id; - __le32 curr_reset_num; - __le16 index_num; - __le16 parts_num; - __le32 data_size; - __le32 data[0]; -} __packed; /*MFU_DUMP_ASSERT_API_S_VER_1*/ - -#define MAX_PORT_ID_NUM 2 -#define MAX_MCAST_FILTERING_ADDRESSES 256 - -/** - * struct iwl_mcast_filter_cmd - configure multicast filter. - * @filter_own: Set 1 to filter out multicast packets sent by station itself - * @port_id: Multicast MAC addresses array specifier. This is a strange way - * to identify network interface adopted in host-device IF. - * It is used by FW as index in array of addresses. This array has - * MAX_PORT_ID_NUM members. - * @count: Number of MAC addresses in the array - * @pass_all: Set 1 to pass all multicast packets. - * @bssid: current association BSSID. - * @reserved: reserved - * @addr_list: Place holder for array of MAC addresses. - * IMPORTANT: add padding if necessary to ensure DWORD alignment. - */ -struct iwl_mcast_filter_cmd { - u8 filter_own; - u8 port_id; - u8 count; - u8 pass_all; - u8 bssid[6]; - u8 reserved[2]; - u8 addr_list[0]; -} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ - -#define MAX_BCAST_FILTERS 8 -#define MAX_BCAST_FILTER_ATTRS 2 - -/** - * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet - * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. - * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. - * start of ip payload). - */ -enum iwl_mvm_bcast_filter_attr_offset { - BCAST_FILTER_OFFSET_PAYLOAD_START = 0, - BCAST_FILTER_OFFSET_IP_END = 1, -}; - -/** - * struct iwl_fw_bcast_filter_attr - broadcast filter attribute - * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. - * @offset: starting offset of this pattern. - * @reserved1: reserved - * @val: value to match - big endian (MSB is the first - * byte to match from offset pos). - * @mask: mask to match (big endian). - */ -struct iwl_fw_bcast_filter_attr { - u8 offset_type; - u8 offset; - __le16 reserved1; - __be32 val; - __be32 mask; -} __packed; /* BCAST_FILTER_ATT_S_VER_1 */ - -/** - * enum iwl_mvm_bcast_filter_frame_type - filter frame type - * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. - * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames - */ -enum iwl_mvm_bcast_filter_frame_type { - BCAST_FILTER_FRAME_TYPE_ALL = 0, - BCAST_FILTER_FRAME_TYPE_IPV4 = 1, -}; - -/** - * struct iwl_fw_bcast_filter - broadcast filter - * @discard: discard frame (1) or let it pass (0). - * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. - * @reserved1: reserved - * @num_attrs: number of valid attributes in this filter. - * @attrs: attributes of this filter. a filter is considered matched - * only when all its attributes are matched (i.e. AND relationship) - */ -struct iwl_fw_bcast_filter { - u8 discard; - u8 frame_type; - u8 num_attrs; - u8 reserved1; - struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; -} __packed; /* BCAST_FILTER_S_VER_1 */ - -#define BA_WINDOW_STREAMS_MAX 16 -#define BA_WINDOW_STATUS_TID_MSK 0x000F -#define BA_WINDOW_STATUS_STA_ID_POS 4 -#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0 -#define BA_WINDOW_STATUS_VALID_MSK BIT(9) - -/** - * struct iwl_ba_window_status_notif - reordering window's status notification - * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63] - * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid - * @start_seq_num: the start sequence number of the bitmap - * @mpdu_rx_count: the number of received MPDUs since entering D0i3 - */ -struct iwl_ba_window_status_notif { - __le64 bitmap[BA_WINDOW_STREAMS_MAX]; - __le16 ra_tid[BA_WINDOW_STREAMS_MAX]; - __le32 start_seq_num[BA_WINDOW_STREAMS_MAX]; - __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; -} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ - -/** - * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. - * @default_discard: default action for this mac (discard (1) / pass (0)). - * @reserved1: reserved - * @attached_filters: bitmap of relevant filters for this mac. - */ -struct iwl_fw_bcast_mac { - u8 default_discard; - u8 reserved1; - __le16 attached_filters; -} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ - -/** - * struct iwl_bcast_filter_cmd - broadcast filtering configuration - * @disable: enable (0) / disable (1) - * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) - * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) - * @reserved1: reserved - * @filters: broadcast filters - * @macs: broadcast filtering configuration per-mac - */ -struct iwl_bcast_filter_cmd { - u8 disable; - u8 max_bcast_filters; - u8 max_macs; - u8 reserved1; - struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; - struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; -} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ - -/* - * enum iwl_mvm_marker_id - maker ids - * - * The ids for different type of markers to insert into the usniffer logs - */ -enum iwl_mvm_marker_id { - MARKER_ID_TX_FRAME_LATENCY = 1, -}; /* MARKER_ID_API_E_VER_1 */ - -/** - * struct iwl_mvm_marker - mark info into the usniffer logs - * - * (MARKER_CMD = 0xcb) - * - * Mark the UTC time stamp into the usniffer logs together with additional - * metadata, so the usniffer output can be parsed. - * In the command response the ucode will return the GP2 time. - * - * @dw_len: The amount of dwords following this byte including this byte. - * @marker_id: A unique marker id (iwl_mvm_marker_id). - * @reserved: reserved. - * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC - * @metadata: additional meta data that will be written to the unsiffer log - */ -struct iwl_mvm_marker { - u8 dw_len; - u8 marker_id; - __le16 reserved; - __le64 timestamp; - __le32 metadata[0]; -} __packed; /* MARKER_API_S_VER_1 */ - -/* - * enum iwl_dc2dc_config_id - flag ids - * - * Ids of dc2dc configuration flags - */ -enum iwl_dc2dc_config_id { - DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ - DCDC_FREQ_TUNE_SET = 0x2, -}; /* MARKER_ID_API_E_VER_1 */ - -/** - * struct iwl_dc2dc_config_cmd - configure dc2dc values - * - * (DC2DC_CONFIG_CMD = 0x83) - * - * Set/Get & configure dc2dc values. - * The command always returns the current dc2dc values. - * - * @flags: set/get dc2dc - * @enable_low_power_mode: not used. - * @dc2dc_freq_tune0: frequency divider - digital domain - * @dc2dc_freq_tune1: frequency divider - analog domain - */ -struct iwl_dc2dc_config_cmd { - __le32 flags; - __le32 enable_low_power_mode; /* not used */ - __le32 dc2dc_freq_tune0; - __le32 dc2dc_freq_tune1; -} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ - -/** - * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd - * - * Current dc2dc values returned by the FW. - * - * @dc2dc_freq_tune0: frequency divider - digital domain - * @dc2dc_freq_tune1: frequency divider - analog domain - */ -struct iwl_dc2dc_config_resp { - __le32 dc2dc_freq_tune0; - __le32 dc2dc_freq_tune1; -} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ - -/*********************************** - * Smart Fifo API - ***********************************/ -/* Smart Fifo state */ -enum iwl_sf_state { - SF_LONG_DELAY_ON = 0, /* should never be called by driver */ - SF_FULL_ON, - SF_UNINIT, - SF_INIT_OFF, - SF_HW_NUM_STATES -}; - -/* Smart Fifo possible scenario */ -enum iwl_sf_scenario { - SF_SCENARIO_SINGLE_UNICAST, - SF_SCENARIO_AGG_UNICAST, - SF_SCENARIO_MULTICAST, - SF_SCENARIO_BA_RESP, - SF_SCENARIO_TX_RESP, - SF_NUM_SCENARIO -}; - -#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */ -#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ - -/* smart FIFO default values */ -#define SF_W_MARK_SISO 6144 -#define SF_W_MARK_MIMO2 8192 -#define SF_W_MARK_MIMO3 6144 -#define SF_W_MARK_LEGACY 4096 -#define SF_W_MARK_SCAN 4096 - -/* SF Scenarios timers for default configuration (aligned to 32 uSec) */ -#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */ -#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */ - -/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */ -#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */ -#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */ -#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */ -#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */ -#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */ -#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */ -#define SF_BA_IDLE_TIMER 320 /* 300 uSec */ -#define SF_BA_AGING_TIMER 2016 /* 2 mSec */ -#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */ -#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */ - -#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */ - -#define SF_CFG_DUMMY_NOTIF_OFF BIT(16) - -/** - * struct iwl_sf_cfg_cmd - Smart Fifo configuration command. - * @state: smart fifo state, types listed in &enum iwl_sf_state. - * @watermark: Minimum allowed availabe free space in RXF for transient state. - * @long_delay_timeouts: aging and idle timer values for each scenario - * in long delay state. - * @full_on_timeouts: timer values for each scenario in full on state. - */ -struct iwl_sf_cfg_cmd { - __le32 state; - __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; - __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; - __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; -} __packed; /* SF_CFG_API_S_VER_2 */ - -/*********************************** - * Location Aware Regulatory (LAR) API - MCC updates - ***********************************/ - -/** - * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: the source from where we got the MCC, see iwl_mcc_source - * @reserved: reserved for alignment - */ -struct iwl_mcc_update_cmd_v1 { - __le16 mcc; - u8 source_id; - u8 reserved; -} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */ - -/** - * struct iwl_mcc_update_cmd - Request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: the source from where we got the MCC, see iwl_mcc_source - * @reserved: reserved for alignment - * @key: integrity key for MCC API OEM testing - * @reserved2: reserved - */ -struct iwl_mcc_update_cmd { - __le16 mcc; - u8 source_id; - u8 reserved; - __le32 key; - u8 reserved2[20]; -} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ - -/** - * struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD. - * Contains the new channel control profile map, if changed, and the new MCC - * (mobile country code). - * The new MCC may be different than what was requested in MCC_UPDATE_CMD. - * @status: see &enum iwl_mcc_update_status - * @mcc: the new applied MCC - * @cap: capabilities for all channels which matches the MCC - * @source_id: the MCC source, see iwl_mcc_source - * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 - * channels, depending on platform) - * @channels: channel control data map, DWORD for each channel. Only the first - * 16bits are used. - */ -struct iwl_mcc_update_resp_v1 { - __le32 status; - __le16 mcc; - u8 cap; - u8 source_id; - __le32 n_channels; - __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */ - -/** - * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. - * Contains the new channel control profile map, if changed, and the new MCC - * (mobile country code). - * The new MCC may be different than what was requested in MCC_UPDATE_CMD. - * @status: see &enum iwl_mcc_update_status - * @mcc: the new applied MCC - * @cap: capabilities for all channels which matches the MCC - * @source_id: the MCC source, see iwl_mcc_source - * @time: time elapsed from the MCC test start (in 30 seconds TU) - * @reserved: reserved. - * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 - * channels, depending on platform) - * @channels: channel control data map, DWORD for each channel. Only the first - * 16bits are used. - */ -struct iwl_mcc_update_resp { - __le32 status; - __le16 mcc; - u8 cap; - u8 source_id; - __le16 time; - __le16 reserved; - __le32 n_channels; - __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */ - -/** - * struct iwl_mcc_chub_notif - chub notifies of mcc change - * (MCC_CHUB_UPDATE_CMD = 0xc9) - * The Chub (Communication Hub, CommsHUB) is a HW component that connects to - * the cellular and connectivity cores that gets updates of the mcc, and - * notifies the ucode directly of any mcc change. - * The ucode requests the driver to request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: identity of the change originator, see iwl_mcc_source - * @reserved1: reserved for alignment - */ -struct iwl_mcc_chub_notif { - __le16 mcc; - u8 source_id; - u8 reserved1; -} __packed; /* LAR_MCC_NOTIFY_S */ - -enum iwl_mcc_update_status { - MCC_RESP_NEW_CHAN_PROFILE, - MCC_RESP_SAME_CHAN_PROFILE, - MCC_RESP_INVALID, - MCC_RESP_NVM_DISABLED, - MCC_RESP_ILLEGAL, - MCC_RESP_LOW_PRIORITY, - MCC_RESP_TEST_MODE_ACTIVE, - MCC_RESP_TEST_MODE_NOT_ACTIVE, - MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE, -}; - -enum iwl_mcc_source { - MCC_SOURCE_OLD_FW = 0, - MCC_SOURCE_ME = 1, - MCC_SOURCE_BIOS = 2, - MCC_SOURCE_3G_LTE_HOST = 3, - MCC_SOURCE_3G_LTE_DEVICE = 4, - MCC_SOURCE_WIFI = 5, - MCC_SOURCE_RESERVED = 6, - MCC_SOURCE_DEFAULT = 7, - MCC_SOURCE_UNINITIALIZED = 8, - MCC_SOURCE_MCC_API = 9, - MCC_SOURCE_GET_CURRENT = 0x10, - MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, -}; - -/* DTS measurements */ - -enum iwl_dts_measurement_flags { - DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0), - DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1), -}; - -/** - * struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements - * - * @flags: indicates which measurements we want as specified in - * &enum iwl_dts_measurement_flags - */ -struct iwl_dts_measurement_cmd { - __le32 flags; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ - -/** -* enum iwl_dts_control_measurement_mode - DTS measurement type -* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read -* back (latest value. Not waiting for new value). Use automatic -* SW DTS configuration. -* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings, -* trigger DTS reading and provide read back temperature read -* when available. -* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read -* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result, -* without measurement trigger. -*/ -enum iwl_dts_control_measurement_mode { - DTS_AUTOMATIC = 0, - DTS_REQUEST_READ = 1, - DTS_OVER_WRITE = 2, - DTS_DIRECT_WITHOUT_MEASURE = 3, -}; - -/** -* enum iwl_dts_used - DTS to use or used for measurement in the DTS request -* @DTS_USE_TOP: Top -* @DTS_USE_CHAIN_A: chain A -* @DTS_USE_CHAIN_B: chain B -* @DTS_USE_CHAIN_C: chain C -* @XTAL_TEMPERATURE: read temperature from xtal -*/ -enum iwl_dts_used { - DTS_USE_TOP = 0, - DTS_USE_CHAIN_A = 1, - DTS_USE_CHAIN_B = 2, - DTS_USE_CHAIN_C = 3, - XTAL_TEMPERATURE = 4, -}; - -/** -* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode -* @DTS_BIT6_MODE: bit 6 mode -* @DTS_BIT8_MODE: bit 8 mode -*/ -enum iwl_dts_bit_mode { - DTS_BIT6_MODE = 0, - DTS_BIT8_MODE = 1, -}; - -/** - * struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements - * @control_mode: see &enum iwl_dts_control_measurement_mode - * @temperature: used when over write DTS mode is selected - * @sensor: set temperature sensor to use. See &enum iwl_dts_used - * @avg_factor: average factor to DTS in request DTS read mode - * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode - * @step_duration: step duration for the DTS - */ -struct iwl_ext_dts_measurement_cmd { - __le32 control_mode; - __le32 temperature; - __le32 sensor; - __le32 avg_factor; - __le32 bit_mode; - __le32 step_duration; -} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ - -/** - * struct iwl_dts_measurement_notif_v1 - measurements notification - * - * @temp: the measured temperature - * @voltage: the measured voltage - */ -struct iwl_dts_measurement_notif_v1 { - __le32 temp; - __le32 voltage; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/ - -/** - * struct iwl_dts_measurement_notif_v2 - measurements notification - * - * @temp: the measured temperature - * @voltage: the measured voltage - * @threshold_idx: the trip index that was crossed - */ -struct iwl_dts_measurement_notif_v2 { - __le32 temp; - __le32 voltage; - __le32 threshold_idx; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */ - -/** - * struct ct_kill_notif - CT-kill entry notification - * - * @temperature: the current temperature in celsius - * @reserved: reserved - */ -struct ct_kill_notif { - __le16 temperature; - __le16 reserved; -} __packed; /* GRP_PHY_CT_KILL_NTF */ - -/** -* enum ctdp_cmd_operation - CTDP command operations -* @CTDP_CMD_OPERATION_START: update the current budget -* @CTDP_CMD_OPERATION_STOP: stop ctdp -* @CTDP_CMD_OPERATION_REPORT: get the average budget -*/ -enum iwl_mvm_ctdp_cmd_operation { - CTDP_CMD_OPERATION_START = 0x1, - CTDP_CMD_OPERATION_STOP = 0x2, - CTDP_CMD_OPERATION_REPORT = 0x4, -};/* CTDP_CMD_OPERATION_TYPE_E */ - -/** - * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget - * - * @operation: see &enum iwl_mvm_ctdp_cmd_operation - * @budget: the budget in milliwatt - * @window_size: defined in API but not used - */ -struct iwl_mvm_ctdp_cmd { - __le32 operation; - __le32 budget; - __le32 window_size; -} __packed; - -#define IWL_MAX_DTS_TRIPS 8 - -/** - * struct temp_report_ths_cmd - set temperature thresholds - * - * @num_temps: number of temperature thresholds passed - * @thresholds: array with the thresholds to be configured - */ -struct temp_report_ths_cmd { - __le32 num_temps; - __le16 thresholds[IWL_MAX_DTS_TRIPS]; -} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */ - -/*********************************** - * TDLS API - ***********************************/ - -/* Type of TDLS request */ -enum iwl_tdls_channel_switch_type { - TDLS_SEND_CHAN_SW_REQ = 0, - TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH, - TDLS_MOVE_CH, -}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */ - -/** - * struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch - * @frame_timestamp: GP2 timestamp of channel-switch request/response packet - * received from peer - * @max_offchan_duration: What amount of microseconds out of a DTIM is given - * to the TDLS off-channel communication. For instance if the DTIM is - * 200TU and the TDLS peer is to be given 25% of the time, the value - * given will be 50TU, or 50 * 1024 if translated into microseconds. - * @switch_time: switch time the peer sent in its channel switch timing IE - * @switch_timeout: switch timeout the peer sent in its channel switch timing IE - */ -struct iwl_tdls_channel_switch_timing { - __le32 frame_timestamp; /* GP2 time of peer packet Rx */ - __le32 max_offchan_duration; /* given in micro-seconds */ - __le32 switch_time; /* given in micro-seconds */ - __le32 switch_timeout; /* given in micro-seconds */ -} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */ - -#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200 - -/** - * struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template - * - * A template representing a TDLS channel-switch request or response frame - * - * @switch_time_offset: offset to the channel switch timing IE in the template - * @tx_cmd: Tx parameters for the frame - * @data: frame data - */ -struct iwl_tdls_channel_switch_frame { - __le32 switch_time_offset; - struct iwl_tx_cmd tx_cmd; - u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ - -/** - * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command - * - * The command is sent to initiate a channel switch and also in response to - * incoming TDLS channel-switch request/response packets from remote peers. - * - * @switch_type: see &enum iwl_tdls_channel_switch_type - * @peer_sta_id: station id of TDLS peer - * @ci: channel we switch to - * @timing: timing related data for command - * @frame: channel-switch request/response template, depending to switch_type - */ -struct iwl_tdls_channel_switch_cmd { - u8 switch_type; - __le32 peer_sta_id; - struct iwl_fw_channel_info ci; - struct iwl_tdls_channel_switch_timing timing; - struct iwl_tdls_channel_switch_frame frame; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ - -/** - * struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification - * - * @status: non-zero on success - * @offchannel_duration: duration given in microseconds - * @sta_id: peer currently performing the channel-switch with - */ -struct iwl_tdls_channel_switch_notif { - __le32 status; - __le32 offchannel_duration; - __le32 sta_id; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */ - -/** - * struct iwl_tdls_sta_info - TDLS station info - * - * @sta_id: station id of the TDLS peer - * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx - * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer - * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise - */ -struct iwl_tdls_sta_info { - u8 sta_id; - u8 tx_to_peer_tid; - __le16 tx_to_peer_ssn; - __le32 is_initiator; -} __packed; /* TDLS_STA_INFO_VER_1 */ - -/** - * struct iwl_tdls_config_cmd - TDLS basic config command - * - * @id_and_color: MAC id and color being configured - * @tdls_peer_count: amount of currently connected TDLS peers - * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx - * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP - * @sta_info: per-station info. Only the first tdls_peer_count entries are set - * @pti_req_data_offset: offset of network-level data for the PTI template - * @pti_req_tx_cmd: Tx parameters for PTI request template - * @pti_req_template: PTI request template data - */ -struct iwl_tdls_config_cmd { - __le32 id_and_color; /* mac id and color */ - u8 tdls_peer_count; - u8 tx_to_ap_tid; - __le16 tx_to_ap_ssn; - struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; - - __le32 pti_req_data_offset; - struct iwl_tx_cmd pti_req_tx_cmd; - u8 pti_req_template[0]; -} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ - -/** - * struct iwl_tdls_config_sta_info_res - TDLS per-station config information - * - * @sta_id: station id of the TDLS peer - * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to - * the peer - */ -struct iwl_tdls_config_sta_info_res { - __le16 sta_id; - __le16 tx_to_peer_last_seq; -} __packed; /* TDLS_STA_INFO_RSP_VER_1 */ - -/** - * struct iwl_tdls_config_res - TDLS config information from FW - * - * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP - * @sta_info: per-station TDLS config information - */ -struct iwl_tdls_config_res { - __le32 tx_to_ap_last_seq; - struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; -} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ - -#define TX_FIFO_MAX_NUM_9000 8 -#define TX_FIFO_MAX_NUM 15 -#define RX_FIFO_MAX_NUM 2 -#define TX_FIFO_INTERNAL_MAX_NUM 6 - -/** - * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information - * - * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not - * accessible) - * @shared_mem_size: shared memory size - * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to - * 0x0 as accessible only via DBGM RDAT) - * @sample_buff_size: internal sample buff size - * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre - * 8000 HW set to 0x0 as not accessible) - * @txfifo_size: size of TXF0 ... TXF7 - * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 - * @page_buff_addr: used by UMAC and performance debug (page miss analysis), - * when paging is not supported this should be 0 - * @page_buff_size: size of %page_buff_addr - * @rxfifo_addr: Start address of rxFifo - * @internal_txfifo_addr: start address of internalFifo - * @internal_txfifo_size: internal fifos' size - * - * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG - * set, the last 3 members don't exist. - */ -struct iwl_shared_mem_cfg_v2 { - __le32 shared_mem_addr; - __le32 shared_mem_size; - __le32 sample_buff_addr; - __le32 sample_buff_size; - __le32 txfifo_addr; - __le32 txfifo_size[TX_FIFO_MAX_NUM_9000]; - __le32 rxfifo_size[RX_FIFO_MAX_NUM]; - __le32 page_buff_addr; - __le32 page_buff_size; - __le32 rxfifo_addr; - __le32 internal_txfifo_addr; - __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ - -/** - * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration - * - * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) - * @txfifo_size: size of TX FIFOs - * @rxfifo1_addr: RXF1 addr - * @rxfifo1_size: RXF1 size - */ -struct iwl_shared_mem_lmac_cfg { - __le32 txfifo_addr; - __le32 txfifo_size[TX_FIFO_MAX_NUM]; - __le32 rxfifo1_addr; - __le32 rxfifo1_size; - -} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ - -/** - * struct iwl_shared_mem_cfg - Shared memory configuration information - * - * @shared_mem_addr: shared memory address - * @shared_mem_size: shared memory size - * @sample_buff_addr: internal sample (mon/adc) buff addr - * @sample_buff_size: internal sample buff size - * @rxfifo2_addr: start addr of RXF2 - * @rxfifo2_size: size of RXF2 - * @page_buff_addr: used by UMAC and performance debug (page miss analysis), - * when paging is not supported this should be 0 - * @page_buff_size: size of %page_buff_addr - * @lmac_num: number of LMACs (1 or 2) - * @lmac_smem: per - LMAC smem data - */ -struct iwl_shared_mem_cfg { - __le32 shared_mem_addr; - __le32 shared_mem_size; - __le32 sample_buff_addr; - __le32 sample_buff_size; - __le32 rxfifo2_addr; - __le32 rxfifo2_size; - __le32 page_buff_addr; - __le32 page_buff_size; - __le32 lmac_num; - struct iwl_shared_mem_lmac_cfg lmac_smem[2]; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ - -/** - * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration - * - * @reserved: reserved - * @membership_status: a bitmap of MU groups - * @user_position:the position of station in a group. If the station is in the - * group then bits (group * 2) is the position -1 - */ -struct iwl_mu_group_mgmt_cmd { - __le32 reserved; - __le32 membership_status[2]; - __le32 user_position[4]; -} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */ - -/** - * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification - * - * @membership_status: a bitmap of MU groups - * @user_position: the position of station in a group. If the station is in the - * group then bits (group * 2) is the position -1 - */ -struct iwl_mu_group_mgmt_notif { - __le32 membership_status[2]; - __le32 user_position[4]; -} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ - -#define MAX_STORED_BEACON_SIZE 600 - -/** - * struct iwl_stored_beacon_notif - Stored beacon notification - * - * @system_time: system time on air rise - * @tsf: TSF on air rise - * @beacon_timestamp: beacon on air rise - * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition - * @channel: channel this beacon was received on - * @rates: rate in ucode internal format - * @byte_count: frame's byte count - * @data: beacon data, length in @byte_count - */ -struct iwl_stored_beacon_notif { - __le32 system_time; - __le64 tsf; - __le32 beacon_timestamp; - __le16 band; - __le16 channel; - __le32 rates; - __le32 byte_count; - u8 data[MAX_STORED_BEACON_SIZE]; -} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ - -#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16 - -enum iwl_lqm_cmd_operatrions { - LQM_CMD_OPERATION_START_MEASUREMENT = 0x01, - LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02, -}; - -enum iwl_lqm_status { - LQM_STATUS_SUCCESS = 0, - LQM_STATUS_TIMEOUT = 1, - LQM_STATUS_ABORT = 2, -}; - -/** - * struct iwl_link_qual_msrmnt_cmd - Link Quality Measurement command - * @cmd_operation: command operation to be performed (start or stop) - * as defined above. - * @mac_id: MAC ID the measurement applies to. - * @measurement_time: time of the total measurement to be performed, in uSec. - * @timeout: maximum time allowed until a response is sent, in uSec. - */ -struct iwl_link_qual_msrmnt_cmd { - __le32 cmd_operation; - __le32 mac_id; - __le32 measurement_time; - __le32 timeout; -} __packed /* LQM_CMD_API_S_VER_1 */; - -/** - * struct iwl_link_qual_msrmnt_notif - Link Quality Measurement notification - * - * @frequent_stations_air_time: an array containing the total air time - * (in uSec) used by the most frequently transmitting stations. - * @number_of_stations: the number of uniqe stations included in the array - * (a number between 0 to 16) - * @total_air_time_other_stations: the total air time (uSec) used by all the - * stations which are not included in the above report. - * @time_in_measurement_window: the total time in uSec in which a measurement - * took place. - * @tx_frame_dropped: the number of TX frames dropped due to retry limit during - * measurement - * @mac_id: MAC ID the measurement applies to. - * @status: return status. may be one of the LQM_STATUS_* defined above. - * @reserved: reserved. - */ -struct iwl_link_qual_msrmnt_notif { - __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT]; - __le32 number_of_stations; - __le32 total_air_time_other_stations; - __le32 time_in_measurement_window; - __le32 tx_frame_dropped; - __le32 mac_id; - __le32 status; - u8 reserved[12]; -} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */ - -/** - * struct iwl_channel_switch_noa_notif - Channel switch NOA notification - * - * @id_and_color: ID and color of the MAC - */ -struct iwl_channel_switch_noa_notif { - __le32 id_and_color; -} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ - -/* Operation types for the debug mem access */ -enum { - DEBUG_MEM_OP_READ = 0, - DEBUG_MEM_OP_WRITE = 1, - DEBUG_MEM_OP_WRITE_BYTES = 2, -}; - -#define DEBUG_MEM_MAX_SIZE_DWORDS 32 - -/** - * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory - * @op: DEBUG_MEM_OP_* - * @addr: address to read/write from/to - * @len: in dwords, to read/write - * @data: for write opeations, contains the source buffer - */ -struct iwl_dbg_mem_access_cmd { - __le32 op; - __le32 addr; - __le32 len; - __le32 data[]; -} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */ - -/* Status responses for the debug mem access */ -enum { - DEBUG_MEM_STATUS_SUCCESS = 0x0, - DEBUG_MEM_STATUS_FAILED = 0x1, - DEBUG_MEM_STATUS_LOCKED = 0x2, - DEBUG_MEM_STATUS_HIDDEN = 0x3, - DEBUG_MEM_STATUS_LENGTH = 0x4, -}; - -/** - * struct iwl_dbg_mem_access_rsp - Response to debug mem commands - * @status: DEBUG_MEM_STATUS_* - * @len: read dwords (0 for write operations) - * @data: contains the read DWs - */ -struct iwl_dbg_mem_access_rsp { - __le32 status; - __le32 len; - __le32 data[]; -} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ - -/** - * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed - * @reserved: reserved - */ -struct iwl_nvm_access_complete_cmd { - __le32 reserved; -} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ - -/** - * enum iwl_extended_cfg_flag - commands driver may send before - * finishing init flow - * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command - * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands - * @IWL_INIT_PHY: driver is going to send PHY_DB commands - */ -enum iwl_extended_cfg_flags { - IWL_INIT_DEBUG_CFG, - IWL_INIT_NVM, - IWL_INIT_PHY, -}; - -/** - * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for - * before finishing init flows - * @init_flags: values from iwl_extended_cfg_flags - */ -struct iwl_init_extended_cfg_cmd { - __le32 init_flags; -} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */ - -/* - * struct iwl_nvm_get_info - request to get NVM data - */ -struct iwl_nvm_get_info { - __le32 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_general - general NVM data - * @flags: 1 - empty, 0 - valid - * @nvm_version: nvm version - * @board_type: board type - * @reserved: reserved - */ -struct iwl_nvm_get_info_general { - __le32 flags; - __le16 nvm_version; - u8 board_type; - u8 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_sku - mac information - * @enable_24g: band 2.4G enabled - * @enable_5g: band 5G enabled - * @enable_11n: 11n enabled - * @enable_11ac: 11ac enabled - * @mimo_disable: MIMO enabled - * @ext_crypto: Extended crypto enabled - */ -struct iwl_nvm_get_info_sku { - __le32 enable_24g; - __le32 enable_5g; - __le32 enable_11n; - __le32 enable_11ac; - __le32 mimo_disable; - __le32 ext_crypto; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_phy - phy information - * @tx_chains: BIT 0 chain A, BIT 1 chain B - * @rx_chains: BIT 0 chain A, BIT 1 chain B - */ -struct iwl_nvm_get_info_phy { - __le32 tx_chains; - __le32 rx_chains; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ - -#define IWL_NUM_CHANNELS (51) - -/** - * struct iwl_nvm_get_info_regulatory - regulatory information - * @lar_enabled: is LAR enabled - * @channel_profile: regulatory data of this channel - * @reserved: reserved - */ -struct iwl_nvm_get_info_regulatory { - __le32 lar_enabled; - __le16 channel_profile[IWL_NUM_CHANNELS]; - __le16 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_rsp - response to get NVM data - * @general: general NVM data - * @mac_sku: data relating to MAC sku - * @phy_sku: data relating to PHY sku - * @regulatory: regulatory data - */ -struct iwl_nvm_get_info_rsp { - struct iwl_nvm_get_info_general general; - struct iwl_nvm_get_info_sku mac_sku; - struct iwl_nvm_get_info_phy phy_sku; - struct iwl_nvm_get_info_regulatory regulatory; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */ - -/** - * struct iwl_mvm_antenna_coupling_notif - antenna coupling notification - * @isolation: antenna isolation value - */ -struct iwl_mvm_antenna_coupling_notif { - __le32 isolation; -} __packed; +#include "fw/api/tdls.h" +#include "fw/api/mac-cfg.h" +#include "fw/api/offload.h" +#include "fw/api/context.h" +#include "fw/api/time-event.h" +#include "fw/api/datapath.h" +#include "fw/api/phy.h" +#include "fw/api/config.h" +#include "fw/api/alive.h" +#include "fw/api/binding.h" +#include "fw/api/cmdhdr.h" +#include "fw/api/coex.h" +#include "fw/api/commands.h" +#include "fw/api/d3.h" +#include "fw/api/filter.h" +#include "fw/api/mac.h" +#include "fw/api/nvm-reg.h" +#include "fw/api/phy-ctxt.h" +#include "fw/api/power.h" +#include "fw/api/rs.h" +#include "fw/api/rx.h" +#include "fw/api/scan.h" +#include "fw/api/sf.h" +#include "fw/api/sta.h" +#include "fw/api/stats.h" +#include "fw/api/tof.h" +#include "fw/api/tx.h" #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 79e7a7a285dc..875cf3a60adb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -78,7 +78,7 @@ #include "iwl-eeprom-parse.h" #include "mvm.h" -#include "fw-dbg.h" +#include "fw/dbg.h" #include "iwl-phy-db.h" #define MVM_UCODE_ALIVE_TIMEOUT HZ @@ -144,134 +144,6 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) return ret; } -void iwl_free_fw_paging(struct iwl_mvm *mvm) -{ - int i; - - if (!mvm->fw_paging_db[0].fw_paging_block) - return; - - for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { - struct iwl_fw_paging *paging = &mvm->fw_paging_db[i]; - - if (!paging->fw_paging_block) { - IWL_DEBUG_FW(mvm, - "Paging: block %d already freed, continue to next page\n", - i); - - continue; - } - dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys, - paging->fw_paging_size, DMA_BIDIRECTIONAL); - - __free_pages(paging->fw_paging_block, - get_order(paging->fw_paging_size)); - paging->fw_paging_block = NULL; - } - kfree(mvm->trans->paging_download_buf); - mvm->trans->paging_download_buf = NULL; - mvm->trans->paging_db = NULL; - - memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); -} - -static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) -{ - int sec_idx, idx; - u32 offset = 0; - - /* - * find where is the paging image start point: - * if CPU2 exist and it's in paging format, then the image looks like: - * CPU1 sections (2 or more) - * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 - * CPU2 sections (not paged) - * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 - * non paged to CPU2 paging sec - * CPU2 paging CSS - * CPU2 paging image (including instruction and data) - */ - for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { - if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { - sec_idx++; - break; - } - } - - /* - * If paging is enabled there should be at least 2 more sections left - * (one for CSS and one for Paging data) - */ - if (sec_idx >= image->num_sec - 1) { - IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n"); - iwl_free_fw_paging(mvm); - return -EINVAL; - } - - /* copy the CSS block to the dram */ - IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n", - sec_idx); - - memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), - image->sec[sec_idx].data, - mvm->fw_paging_db[0].fw_paging_size); - dma_sync_single_for_device(mvm->trans->dev, - mvm->fw_paging_db[0].fw_paging_phys, - mvm->fw_paging_db[0].fw_paging_size, - DMA_BIDIRECTIONAL); - - IWL_DEBUG_FW(mvm, - "Paging: copied %d CSS bytes to first block\n", - mvm->fw_paging_db[0].fw_paging_size); - - sec_idx++; - - /* - * copy the paging blocks to the dram - * loop index start from 1 since that CSS block already copied to dram - * and CSS index is 0. - * loop stop at num_of_paging_blk since that last block is not full. - */ - for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { - struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; - - memcpy(page_address(block->fw_paging_block), - image->sec[sec_idx].data + offset, - block->fw_paging_size); - dma_sync_single_for_device(mvm->trans->dev, - block->fw_paging_phys, - block->fw_paging_size, - DMA_BIDIRECTIONAL); - - - IWL_DEBUG_FW(mvm, - "Paging: copied %d paging bytes to block %d\n", - mvm->fw_paging_db[idx].fw_paging_size, - idx); - - offset += mvm->fw_paging_db[idx].fw_paging_size; - } - - /* copy the last paging block */ - if (mvm->num_of_pages_in_last_blk > 0) { - struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; - - memcpy(page_address(block->fw_paging_block), - image->sec[sec_idx].data + offset, - FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); - dma_sync_single_for_device(mvm->trans->dev, - block->fw_paging_phys, - block->fw_paging_size, - DMA_BIDIRECTIONAL); - - IWL_DEBUG_FW(mvm, - "Paging: copied %d pages in the last block %d\n", - mvm->num_of_pages_in_last_blk, idx); - } - - return 0; -} - void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { @@ -293,178 +165,6 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, le32_to_cpu(dump_data[i])); } -static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, - const struct fw_img *image) -{ - struct page *block; - dma_addr_t phys = 0; - int blk_idx, order, num_of_pages, size, dma_enabled; - - if (mvm->fw_paging_db[0].fw_paging_block) - return 0; - - dma_enabled = is_device_dma_capable(mvm->trans->dev); - - /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ - BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); - - num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; - mvm->num_of_paging_blk = - DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); - mvm->num_of_pages_in_last_blk = - num_of_pages - - NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); - - IWL_DEBUG_FW(mvm, - "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", - mvm->num_of_paging_blk, - mvm->num_of_pages_in_last_blk); - - /* - * Allocate CSS and paging blocks in dram. - */ - for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ - size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; - order = get_order(size); - block = alloc_pages(GFP_KERNEL, order); - if (!block) { - /* free all the previous pages since we failed */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - - mvm->fw_paging_db[blk_idx].fw_paging_block = block; - mvm->fw_paging_db[blk_idx].fw_paging_size = size; - - if (dma_enabled) { - phys = dma_map_page(mvm->trans->dev, block, 0, - PAGE_SIZE << order, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(mvm->trans->dev, phys)) { - /* - * free the previous pages and the current one - * since we failed to map_page. - */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; - } else { - mvm->fw_paging_db[blk_idx].fw_paging_phys = - PAGING_ADDR_SIG | - blk_idx << BLOCK_2_EXP_SIZE; - } - - if (!blk_idx) - IWL_DEBUG_FW(mvm, - "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", - order); - else - IWL_DEBUG_FW(mvm, - "Paging: allocated 32K bytes (order %d) for firmware paging.\n", - order); - } - - return 0; -} - -static int iwl_save_fw_paging(struct iwl_mvm *mvm, - const struct fw_img *fw) -{ - int ret; - - ret = iwl_alloc_fw_paging_mem(mvm, fw); - if (ret) - return ret; - - return iwl_fill_paging_mem(mvm, fw); -} - -/* send paging cmd to FW in case CPU2 has paging image */ -static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) -{ - struct iwl_fw_paging_cmd paging_cmd = { - .flags = cpu_to_le32(PAGING_CMD_IS_SECURED | - PAGING_CMD_IS_ENABLED | - (mvm->num_of_pages_in_last_blk << - PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), - .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), - .block_num = cpu_to_le32(mvm->num_of_paging_blk), - }; - int blk_idx; - - /* loop for for all paging blocks + CSS block */ - for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys; - __le32 phy_addr; - - addr = addr >> PAGE_2_EXP_SIZE; - phy_addr = cpu_to_le32(addr); - paging_cmd.device_phy_addr[blk_idx] = phy_addr; - } - - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(paging_cmd), &paging_cmd); -} - -/* - * Send paging item cmd to FW in case CPU2 has paging image - */ -static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) -{ - int ret; - struct iwl_fw_get_item_cmd fw_get_item_cmd = { - .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), - }; - - struct iwl_fw_get_item_resp *item_resp; - struct iwl_host_cmd cmd = { - .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), - .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, - .data = { &fw_get_item_cmd, }, - }; - - cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) { - IWL_ERR(mvm, - "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", - ret); - return ret; - } - - item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; - if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { - IWL_ERR(mvm, - "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", - le32_to_cpu(item_resp->item_id)); - ret = -EIO; - goto exit; - } - - /* Add an extra page for headers */ - mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE + - FW_PAGING_SIZE, - GFP_KERNEL); - if (!mvm->trans->paging_download_buf) { - ret = -ENOMEM; - goto exit; - } - mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); - mvm->trans->paging_db = mvm->fw_paging_db; - IWL_DEBUG_FW(mvm, - "Paging: got paging request address (paging_req_addr 0x%08x)\n", - mvm->trans->paging_req_addr); - -exit: - iwl_free_resp(&cmd); - - return ret; -} - static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { @@ -544,48 +244,6 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, return false; } -static int iwl_mvm_init_paging(struct iwl_mvm *mvm) -{ - const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode]; - int ret; - - /* - * Configure and operate fw paging mechanism. - * The driver configures the paging flow only once. - * The CPU2 paging image is included in the IWL_UCODE_INIT image. - */ - if (!fw->paging_mem_size) - return 0; - - /* - * When dma is not enabled, the driver needs to copy / write - * the downloaded / uploaded page to / from the smem. - * This gets the location of the place were the pages are - * stored. - */ - if (!is_device_dma_capable(mvm->trans->dev)) { - ret = iwl_trans_get_paging_item(mvm); - if (ret) { - IWL_ERR(mvm, "failed to get FW paging item\n"); - return ret; - } - } - - ret = iwl_save_fw_paging(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to save the FW paging image\n"); - return ret; - } - - ret = iwl_send_paging_cmd(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to send the paging cmd\n"); - iwl_free_fw_paging(mvm); - return ret; - } - - return 0; -} static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) { @@ -593,7 +251,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, struct iwl_mvm_alive_data alive_data; const struct fw_img *fw; int ret, i; - enum iwl_ucode_type old_type = mvm->cur_ucode; + enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { MVM_ALIVE }; struct iwl_sf_region st_fwrd_space; @@ -606,7 +264,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, fw = iwl_get_ucode_image(mvm->fw, ucode_type); if (WARN_ON(!fw)) return -EINVAL; - mvm->cur_ucode = ucode_type; + iwl_fw_set_current_image(&mvm->fwrt, ucode_type); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, @@ -615,7 +273,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); if (ret) { - mvm->cur_ucode = old_type; + iwl_fw_set_current_image(&mvm->fwrt, old_type); iwl_remove_notification(&mvm->notif_wait, &alive_wait); return ret; } @@ -639,13 +297,13 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_prph(trans, SB_CPU_1_STATUS), iwl_read_prph(trans, SB_CPU_2_STATUS)); - mvm->cur_ucode = old_type; + iwl_fw_set_current_image(&mvm->fwrt, old_type); return ret; } if (!alive_data.valid) { IWL_ERR(mvm, "Loaded ucode is not valid!\n"); - mvm->cur_ucode = old_type; + iwl_fw_set_current_image(&mvm->fwrt, old_type); return -EIO; } @@ -673,10 +331,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, */ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); - if (iwl_mvm_is_dqa_supported(mvm)) - mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; - else - mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; + mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; for (i = 0; i < IEEE80211_MAX_QUEUES; i++) atomic_set(&mvm->mac80211_queue_stop_count[i], 0); @@ -774,7 +429,7 @@ error: static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { struct iwl_phy_cfg_cmd phy_cfg_cmd; - enum iwl_ucode_type ucode_type = mvm->cur_ucode; + enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; /* Set parameters */ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); @@ -799,7 +454,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) }; int ret; - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm, true); lockdep_assert_held(&mvm->mutex); @@ -910,95 +565,6 @@ out: return ret; } -static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; - int i, lmac; - int lmac_num = le32_to_cpu(mem_cfg->lmac_num); - - if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) - return; - - mvm->smem_cfg.num_lmacs = lmac_num; - mvm->smem_cfg.num_txfifo_entries = - ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size); - mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size); - - for (lmac = 0; lmac < lmac_num; lmac++) { - struct iwl_shared_mem_lmac_cfg *lmac_cfg = - &mem_cfg->lmac_smem[lmac]; - - for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) - mvm->smem_cfg.lmac[lmac].txfifo_size[i] = - le32_to_cpu(lmac_cfg->txfifo_size[i]); - mvm->smem_cfg.lmac[lmac].rxfifo1_size = - le32_to_cpu(lmac_cfg->rxfifo1_size); - } -} - -static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data; - int i; - - mvm->smem_cfg.num_lmacs = 1; - - mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size); - for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) - mvm->smem_cfg.lmac[0].txfifo_size[i] = - le32_to_cpu(mem_cfg->txfifo_size[i]); - - mvm->smem_cfg.lmac[0].rxfifo1_size = - le32_to_cpu(mem_cfg->rxfifo_size[0]); - mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]); - - /* new API has more data, from rxfifo_addr field and on */ - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { - BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) != - sizeof(mem_cfg->internal_txfifo_size)); - - for (i = 0; - i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size); - i++) - mvm->smem_cfg.internal_txfifo_size[i] = - le32_to_cpu(mem_cfg->internal_txfifo_size[i]); - } -} - -static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) -{ - struct iwl_host_cmd cmd = { - .flags = CMD_WANT_SKB, - .data = { NULL, }, - .len = { 0, }, - }; - struct iwl_rx_packet *pkt; - - lockdep_assert_held(&mvm->mutex); - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) - cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); - else - cmd.id = SHARED_MEM_CFG; - - if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) - return; - - pkt = cmd.resp_pkt; - if (iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_parse_shared_mem_a000(mvm, pkt); - else - iwl_mvm_parse_shared_mem(mvm, pkt); - - IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); - - iwl_free_resp(&cmd); -} - static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) { struct iwl_ltr_config_cmd cmd = { @@ -1048,8 +614,8 @@ static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm, union acpi_object *data, int data_size) { + union acpi_object *wifi_pkg = NULL; int i; - union acpi_object *wifi_pkg; /* * We need at least two packages, one for the revision and one @@ -1465,7 +1031,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) { int ret; - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm, false); ret = iwl_run_init_mvm_ucode(mvm, false); @@ -1495,7 +1061,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (ret) return ret; - return iwl_mvm_init_paging(mvm); + return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img); } int iwl_mvm_up(struct iwl_mvm *mvm) @@ -1516,24 +1082,24 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - iwl_mvm_get_shared_mem_conf(mvm); + iwl_get_shared_mem_conf(&mvm->fwrt); ret = iwl_mvm_sf_update(mvm, NULL, false); if (ret) IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); - mvm->fw_dbg_conf = FW_DBG_INVALID; + mvm->fwrt.dump.conf = FW_DBG_INVALID; /* if we have a destination, assume EARLY START */ if (mvm->fw->dbg_dest_tlv) - mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE; - iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE); + mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; + iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) goto error; - /* Send phy db control command and then phy db calibration*/ - if (!iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_unified_ucode(mvm)) { + /* Send phy db control command and then phy db calibration */ ret = iwl_send_phy_db_data(mvm->phy_db); if (ret) goto error; @@ -1549,7 +1115,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* Init RSS configuration */ /* TODO - remove a000 disablement when we have RXQ config API */ - if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (iwl_mvm_has_new_rx_api(mvm) && + mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", @@ -1567,14 +1134,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); - /* Enable DQA-mode if required */ - if (iwl_mvm_is_dqa_supported(mvm)) { - ret = iwl_mvm_send_dqa_cmd(mvm); - if (ret) - goto error; - } else { - IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n"); - } + ret = iwl_mvm_send_dqa_cmd(mvm); + if (ret) + goto error; /* Add auxiliary station for scanning */ ret = iwl_mvm_add_aux_sta(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index dc631b23e189..8fe955d58c6e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -72,7 +72,6 @@ #include "fw-api.h" #include "mvm.h" #include "time-event.h" -#include "fw-dbg.h" const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_VO, @@ -81,6 +80,13 @@ const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_BK, }; +const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = { + IWL_GEN2_EDCA_TX_FIFO_VO, + IWL_GEN2_EDCA_TX_FIFO_VI, + IWL_GEN2_EDCA_TX_FIFO_BE, + IWL_GEN2_EDCA_TX_FIFO_BK, +}; + struct iwl_mvm_mac_iface_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; @@ -235,32 +241,17 @@ static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac, data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif); } -static void iwl_mvm_mac_sta_hw_queues_iter(void *_data, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_hw_queues_iface_iterator_data *data = _data; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - - /* Mark the queues used by the sta */ - data->used_hw_queues |= mvmsta->tfd_queue_msk; -} - unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, struct ieee80211_vif *exclude_vif) { - u8 sta_id; struct iwl_mvm_hw_queues_iface_iterator_data data = { .exclude_vif = exclude_vif, .used_hw_queues = BIT(IWL_MVM_OFFCHANNEL_QUEUE) | - BIT(mvm->aux_queue), + BIT(mvm->aux_queue) | + BIT(IWL_MVM_DQA_GCAST_QUEUE), }; - if (iwl_mvm_is_dqa_supported(mvm)) - data.used_hw_queues |= BIT(IWL_MVM_DQA_GCAST_QUEUE); - else - data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE); - lockdep_assert_held(&mvm->mutex); /* mark all VIF used hw queues */ @@ -268,26 +259,6 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_iface_hw_queues_iter, &data); - /* - * for DQA, the hw_queue in mac80211 is never really used for - * real traffic (only the few queue IDs covered above), so - * we can reuse the real HW queue IDs the stations use - */ - if (iwl_mvm_is_dqa_supported(mvm)) - return data.used_hw_queues; - - /* don't assign the same hw queues as TDLS stations */ - ieee80211_iterate_stations_atomic(mvm->hw, - iwl_mvm_mac_sta_hw_queues_iter, - &data); - - /* - * Some TDLS stations may be removed but are in the process of being - * drained. Don't touch their queues. - */ - for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) - data.used_hw_queues |= mvm->tfd_drained[sta_id]; - return data.used_hw_queues; } @@ -338,8 +309,7 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, NUM_TSF_IDS); } -static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_iface_iterator_data data = { @@ -355,6 +325,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, int ret, i, queue_limit; unsigned long used_hw_queues; + lockdep_assert_held(&mvm->mutex); + /* * Allocate a MAC ID and a TSF for this MAC, along with the queues * and other resources. @@ -438,19 +410,14 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, return 0; } - if (iwl_mvm_is_dqa_supported(mvm)) { - /* - * queues in mac80211 almost entirely independent of - * the ones here - no real limit - */ - queue_limit = IEEE80211_MAX_QUEUES; - BUILD_BUG_ON(IEEE80211_MAX_QUEUES > - BITS_PER_BYTE * - sizeof(mvm->hw_queue_to_mac80211[0])); - } else { - /* need to not use too many in this case */ - queue_limit = mvm->first_agg_queue; - } + /* + * queues in mac80211 almost entirely independent of + * the ones here - no real limit + */ + queue_limit = IEEE80211_MAX_QUEUES; + BUILD_BUG_ON(IEEE80211_MAX_QUEUES > + BITS_PER_BYTE * + sizeof(mvm->hw_queue_to_mac80211[0])); /* * Find available queues, and allocate them to the ACs. When in @@ -472,27 +439,12 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP) { - u8 queue; - - if (!iwl_mvm_is_dqa_supported(mvm)) { - queue = find_first_zero_bit(&used_hw_queues, - mvm->first_agg_queue); - - if (queue >= mvm->first_agg_queue) { - IWL_ERR(mvm, "Failed to allocate cab queue\n"); - ret = -EIO; - goto exit_fail; - } - } else { - queue = IWL_MVM_DQA_GCAST_QUEUE; - } - /* * For TVQM this will be overwritten later with the FW assigned * queue value (when queue is enabled). */ - mvmvif->cab_queue = queue; - vif->cab_queue = queue; + mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } else { vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; } @@ -513,78 +465,6 @@ exit_fail: return ret; } -int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); - u32 ac; - int ret; - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif); - if (ret) - return ret; - - /* If DQA is supported - queues will be enabled when needed */ - if (iwl_mvm_is_dqa_supported(mvm)) - return 0; - - switch (vif->type) { - case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_TX_FIFO_VO, 0, wdg_timeout); - break; - case NL80211_IFTYPE_AP: - iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); - /* fall through */ - default: - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], - vif->hw_queue[ac], - iwl_mvm_ac_to_tx_fifo[ac], 0, - wdg_timeout); - break; - } - - return 0; -} - -void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - int ac; - - lockdep_assert_held(&mvm->mutex); - - /* - * If DQA is supported - queues were already disabled, since in - * DQA-mode the queues are a property of the STA and not of the - * vif, and at this point the STA was already deleted - */ - if (iwl_mvm_is_dqa_supported(mvm)) - return; - - switch (vif->type) { - case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MAX_TID_COUNT, 0); - - break; - case NL80211_IFTYPE_AP: - iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, - IWL_MAX_TID_COUNT, 0); - /* fall through */ - default: - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], - vif->hw_queue[ac], - IWL_MAX_TID_COUNT, 0); - } -} - static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum nl80211_band band, @@ -775,7 +655,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); for (i = 0; i < IEEE80211_NUM_ACS; i++) { - u8 txf = iwl_mvm_ac_to_tx_fifo[i]; + u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); cmd->ac[txf].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min); @@ -908,18 +788,12 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, { struct iwl_mac_ctx_cmd cmd = {}; u32 tfd_queue_msk = 0; - int ret, i; + int ret; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - if (!iwl_mvm_is_dqa_supported(mvm)) { - for (i = 0; i < IEEE80211_NUM_ACS; i++) - if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) - tfd_queue_msk |= BIT(vif->hw_queue[i]); - } - cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON | @@ -1049,83 +923,26 @@ static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) return ie - beacon; } -static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct sk_buff *beacon) +static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon, + struct iwl_tx_cmd *tx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_host_cmd cmd = { - .id = BEACON_TEMPLATE_CMD, - .flags = CMD_ASYNC, - }; - union { - struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; - struct iwl_mac_beacon_cmd_v7 beacon_cmd; - } u = {}; - struct iwl_mac_beacon_cmd beacon_cmd = {}; struct ieee80211_tx_info *info; - u32 beacon_skb_len; u32 rate, tx_flags; - if (WARN_ON(!beacon)) - return -EINVAL; - - beacon_skb_len = beacon->len; - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) { - u32 csa_offset, ecsa_offset; - - csa_offset = iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_CHANNEL_SWITCH, - beacon_skb_len); - ecsa_offset = - iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_EXT_CHANSWITCH_ANN, - beacon_skb_len); - - if (iwl_mvm_has_new_tx_api(mvm)) { - beacon_cmd.data.template_id = - cpu_to_le32((u32)mvmvif->id); - beacon_cmd.data.ecsa_offset = cpu_to_le32(ecsa_offset); - beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset); - beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon_skb_len); - if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_set_tim(mvm, - &beacon_cmd.data.tim_idx, - &beacon_cmd.data.tim_size, - beacon->data, - beacon_skb_len); - cmd.len[0] = sizeof(beacon_cmd); - cmd.data[0] = &beacon_cmd; - goto send; - - } else { - u.beacon_cmd.data.ecsa_offset = - cpu_to_le32(ecsa_offset); - u.beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset); - cmd.len[0] = sizeof(u.beacon_cmd); - cmd.data[0] = &u; - } - } else { - cmd.len[0] = sizeof(u.beacon_cmd_v6); - cmd.data[0] = &u; - } - - /* TODO: for now the beacon template id is set to be the mac context id. - * Might be better to handle it as another resource ... */ - u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id); info = IEEE80211_SKB_CB(beacon); /* Set up TX command fields */ - u.beacon_cmd_v6.tx.len = cpu_to_le16((u16)beacon_skb_len); - u.beacon_cmd_v6.tx.sta_id = mvmvif->bcast_sta.sta_id; - u.beacon_cmd_v6.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); + tx->len = cpu_to_le16((u16)beacon->len); + tx->sta_id = mvmvif->bcast_sta.sta_id; + tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) << TX_CMD_FLG_BT_PRIO_POS; - u.beacon_cmd_v6.tx.tx_flags = cpu_to_le32(tx_flags); + tx->tx_flags = cpu_to_le32(tx_flags); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) { @@ -1134,7 +951,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, mvm->mgmt_last_antenna_idx); } - u.beacon_cmd_v6.tx.rate_n_flags = + tx->rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); @@ -1142,29 +959,126 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, rate = IWL_FIRST_OFDM_RATE; } else { rate = IWL_FIRST_CCK_RATE; - u.beacon_cmd_v6.tx.rate_n_flags |= - cpu_to_le32(RATE_MCS_CCK_MSK); + tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); } - u.beacon_cmd_v6.tx.rate_n_flags |= - cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); - /* Set up TX beacon command fields */ - if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6.tim_idx, - &u.beacon_cmd_v6.tim_size, - beacon->data, - beacon_skb_len); + tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); +} + +static int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, + struct sk_buff *beacon, + void *data, int len) +{ + struct iwl_host_cmd cmd = { + .id = BEACON_TEMPLATE_CMD, + .flags = CMD_ASYNC, + }; -send: - /* Submit command */ + cmd.len[0] = len; + cmd.data[0] = data; cmd.dataflags[0] = 0; - cmd.len[1] = beacon_skb_len; + cmd.len[1] = beacon->len; cmd.data[1] = beacon->data; cmd.dataflags[1] = IWL_HCMD_DFL_DUP; return iwl_mvm_send_cmd(mvm, &cmd); } +static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_beacon_cmd_v6 beacon_cmd = {}; + + iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); + + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); + + return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); +} + +static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_beacon_cmd_v7 beacon_cmd = {}; + + iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); + + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); + + beacon_cmd.csa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon->len)); + beacon_cmd.ecsa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon->len)); + + return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); +} + +static int iwl_mvm_mac_ctxt_send_beacon_v8(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_beacon_cmd beacon_cmd = {}; + + beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, + &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); + + beacon_cmd.csa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon->len)); + beacon_cmd.ecsa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon->len)); + + return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); +} + +static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + if (WARN_ON(!beacon)) + return -EINVAL; + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) + return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon); + + if (!iwl_mvm_has_new_tx_api(mvm)) + return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon); + + return iwl_mvm_mac_ctxt_send_beacon_v8(mvm, vif, beacon); +} + /* The beacon template for the AP/GO/IBSS has changed and needs update */ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -1559,12 +1473,14 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, /* TODO: implement start trigger */ - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + trigger)) return; if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) - iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); } void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index c7b1e58e3384..2d1404c9fbf4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -87,7 +87,6 @@ #include "fw/error-dump.h" #include "iwl-prph.h" #include "iwl-nvm-parse.h" -#include "fw-dbg.h" static const struct ieee80211_iface_limit iwl_mvm_limits[] = { { @@ -446,8 +445,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); if (iwl_mvm_has_new_rx_api(mvm)) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { ieee80211_hw_set(hw, AP_LINK_PS); + } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { + /* + * we absolutely need this for the new TX API since that comes + * with many more queues than the current code can deal with + * for station powersave + */ + return -EINVAL; + } if (mvm->trans->num_rx_queues > 1) ieee80211_hw_set(hw, USES_RSS); @@ -455,10 +464,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; - if (!iwl_mvm_is_dqa_supported(mvm)) - hw->queues = mvm->first_agg_queue; - else - hw->queues = IEEE80211_MAX_QUEUES; + hw->queues = IEEE80211_MAX_QUEUES; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; @@ -799,7 +805,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, goto drop; } - if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && + if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; @@ -807,9 +813,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */ if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER && ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_deauth(hdr->frame_control) && - !ieee80211_is_disassoc(hdr->frame_control) && - !ieee80211_is_action(hdr->frame_control))) + !ieee80211_is_bufferable_mmpdu(hdr->frame_control))) sta = NULL; if (sta) { @@ -845,11 +849,11 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) return true; } -#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ - do { \ - if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ - break; \ - iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \ +#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ + do { \ + if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ + break; \ + iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ } while (0) static void @@ -866,7 +870,8 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; switch (action) { @@ -1029,8 +1034,8 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) * on D3->D0 transition */ if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) { - mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert; - iwl_mvm_fw_error_dump(mvm); + mvm->fwrt.dump.desc = &iwl_dump_desc_assert; + iwl_fw_error_dump(&mvm->fwrt); } /* cleanup all stale references (scan, roc), but keep the @@ -1059,9 +1064,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); - memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); - memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); @@ -1072,7 +1075,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) mvm->vif_count = 0; mvm->rx_ba_sessions = 0; - mvm->fw_dbg_conf = FW_DBG_INVALID; + mvm->fwrt.dump.conf = FW_DBG_INVALID; /* keep statistics ticking */ iwl_mvm_accu_radio_stats(mvm); @@ -1255,16 +1258,16 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) * Lock and clear the firmware running bit here already, so that * new commands coming in elsewhere, e.g. from debugfs, will not * be able to proceed. This is important here because one of those - * debugfs files causes the fw_dump_wk to be triggered, and if we + * debugfs files causes the firmware dump to be triggered, and if we * don't stop debugfs accesses before canceling that it could be * retriggered after we flush it but before we've cleared the bit. */ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); - cancel_delayed_work_sync(&mvm->fw_dump_wk); + iwl_fw_cancel_dump(&mvm->fwrt); cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); cancel_delayed_work_sync(&mvm->scan_timeout_dwork); - iwl_mvm_free_fw_dump_desc(mvm); + iwl_fw_free_dump_desc(&mvm->fwrt); mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); @@ -1370,17 +1373,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, goto out_release; } - if (iwl_mvm_is_dqa_supported(mvm)) { - /* - * Only queue for this station is the mcast queue, - * which shouldn't be in TFD mask anyway - */ - ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, - 0, vif->type, - IWL_STA_MULTICAST); - if (ret) - goto out_release; - } + /* + * Only queue for this station is the mcast queue, + * which shouldn't be in TFD mask anyway + */ + ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, + 0, vif->type, + IWL_STA_MULTICAST); + if (ret) + goto out_release; iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; @@ -1426,7 +1427,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, if (ret) goto out_unref_phy; - ret = iwl_mvm_add_bcast_sta(mvm, vif); + ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); if (ret) goto out_unbind; @@ -1454,8 +1455,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, out_release: if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; - - iwl_mvm_mac_ctxt_release(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); @@ -1467,40 +1466,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif); - - if (tfd_msk && !iwl_mvm_is_dqa_supported(mvm)) { - /* - * mac80211 first removes all the stations of the vif and - * then removes the vif. When it removes a station it also - * flushes the AMPDU session. So by now, all the AMPDU sessions - * of all the stations of this vif are closed, and the queues - * of these AMPDU sessions are properly closed. - * We still need to take care of the shared queues of the vif. - * Flush them here. - * For DQA mode there is no need - broacast and multicast queue - * are flushed separately. - */ - mutex_lock(&mvm->mutex); - iwl_mvm_flush_tx_path(mvm, tfd_msk, 0); - mutex_unlock(&mvm->mutex); - - /* - * There are transports that buffer a few frames in the host. - * For these, the flush above isn't enough since while we were - * flushing, the transport might have sent more frames to the - * device. To solve this, wait here until the transport is - * empty. Technically, this could have replaced the flush - * above, but flush is much faster than draining. So flush - * first, and drain to make sure we have no frames in the - * transport anymore. - * If a station still had frames on the shared queues, it is - * already marked as draining, so to complete the draining, we - * just need to wait until the transport is empty. - */ - iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk); - } - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* * Flush the ROC worker which will flush the OFFCHANNEL queue. @@ -1508,14 +1473,6 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, * queue are sent in ROC session. */ flush_work(&mvm->roc_done_wk); - } else { - /* - * By now, all the AC queues are empty. The AGG queues are - * empty too. We already got all the Tx responses for all the - * packets in the queues. The drain work can have been - * triggered. Flush it. - */ - flush_work(&mvm->sta_drained_wk); } } @@ -1556,7 +1513,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_device_vif = NULL; - iwl_mvm_rm_bcast_sta(mvm, vif); + iwl_mvm_rm_p2p_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); mvmvif->phy_ctxt = NULL; @@ -1569,7 +1526,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_remove(mvm, vif); out_release: - iwl_mvm_mac_ctxt_release(mvm, vif); mutex_unlock(&mvm->mutex); } @@ -2405,15 +2361,18 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, unsigned long txqs = 0, tids = 0; int tid; + /* + * If we have TVQM then we get too high queue numbers - luckily + * we really shouldn't get here with that because such hardware + * should have firmware supporting buffer station offload. + */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + spin_lock_bh(&mvmsta->lock); for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - if (!iwl_mvm_is_dqa_supported(mvm) && - tid_data->state != IWL_AGG_ON && - tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) - continue; - if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) continue; @@ -2427,9 +2386,6 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, switch (cmd) { case STA_NOTIFY_SLEEP: - if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) - ieee80211_sta_block_awake(hw, sta, true); - for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) ieee80211_sta_set_buffered(sta, tid, true); @@ -2572,7 +2528,8 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS); tdls_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (!(tdls_trig->action_bitmap & BIT(action))) @@ -2582,9 +2539,9 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "TDLS event occurred, peer %pM, action %d", - peer_addr, action); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "TDLS event occurred, peer %pM, action %d", + peer_addr, action); } static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, @@ -2621,9 +2578,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) return -EINVAL; - /* if a STA is being removed, reuse its ID */ - flush_work(&mvm->sta_drained_wk); - /* * If we are in a STA removal flow and in DQA mode: * @@ -2638,8 +2592,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, * make sure the worker is no longer handling frames for this STA. */ if (old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST && - iwl_mvm_is_dqa_supported(mvm)) { + new_state == IEEE80211_STA_NOTEXIST) { iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); flush_work(&mvm->add_stream_wk); @@ -3882,7 +3835,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", chsw->chandef.center_freq1); - iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH); + iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_CHANNEL_SWITCH); switch (vif->type) { case NL80211_IFTYPE_AP: @@ -4019,8 +3974,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, return; /* Make sure we're done with the deferred traffic before flushing */ - if (iwl_mvm_is_dqa_supported(mvm)) - flush_work(&mvm->add_stream_wk); + flush_work(&mvm->add_stream_wk); mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -4157,11 +4111,11 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { -#define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ - do { \ - if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ - break; \ - iwl_mvm_fw_dbg_collect_trig(mvm, trig, _fmt); \ +#define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ + do { \ + if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ + break; \ + iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ } while (0) struct iwl_fw_dbg_trigger_tlv *trig; @@ -4172,7 +4126,8 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (event->u.mlme.data == ASSOC_EVENT) { @@ -4213,16 +4168,17 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "BAR received from %pM, tid %d, ssn %d", - event->u.ba.sta->addr, event->u.ba.tid, - event->u.ba.ssn); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "BAR received from %pM, tid %d, ssn %d", + event->u.ba.sta->addr, event->u.ba.tid, + event->u.ba.ssn); } static void @@ -4238,15 +4194,16 @@ iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid))) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "Frame from %pM timed out, tid %d", - event->u.ba.sta->addr, event->u.ba.tid); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "Frame from %pM timed out, tid %d", + event->u.ba.sta->addr, event->u.ba.tid); } static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, @@ -4280,7 +4237,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); /* TODO - remove a000 disablement when we have RXQ config API */ - if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm)) + if (!iwl_mvm_has_new_rx_api(mvm) || + mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) return; notif->cookie = mvm->queue_sync_cookie; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index ddd8719f27b8..c274fe177dfa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -87,6 +87,8 @@ #include "fw-api.h" #include "constants.h" #include "tof.h" +#include "fw/runtime.h" +#include "fw/dbg.h" #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ @@ -119,6 +121,9 @@ */ #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 +/* offchannel queue towards mac80211 */ +#define IWL_MVM_OFFCHANNEL_QUEUE 0 + extern const struct ieee80211_ops iwl_mvm_hw_ops; /** @@ -137,34 +142,6 @@ struct iwl_mvm_mod_params { }; extern struct iwl_mvm_mod_params iwlmvm_mod_params; -/** - * struct iwl_mvm_dump_ptrs - set of pointers needed for the fw-error-dump - * - * @op_mode_ptr: pointer to the buffer coming from the mvm op_mode - * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the - * transport's data. - * @trans_len: length of the valid data in trans_ptr - * @op_mode_len: length of the valid data in op_mode_ptr - */ -struct iwl_mvm_dump_ptrs { - struct iwl_trans_dump_data *trans_ptr; - void *op_mode_ptr; - u32 op_mode_len; -}; - -/** - * struct iwl_mvm_dump_desc - describes the dump - * @len: length of trig_desc->data - * @trig_desc: the description of the dump - */ -struct iwl_mvm_dump_desc { - size_t len; - /* must be last */ - struct iwl_fw_error_dump_trigger_desc trig_desc; -}; - -extern const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert; - struct iwl_mvm_phy_ctxt { u16 id; u16 color; @@ -606,19 +583,6 @@ enum iwl_mvm_tdls_cs_state { IWL_MVM_TDLS_SW_ACTIVE, }; -#define MAX_NUM_LMAC 2 -struct iwl_mvm_shared_mem_cfg { - int num_lmacs; - int num_txfifo_entries; - struct { - u32 txfifo_size[TX_FIFO_MAX_NUM]; - u32 rxfifo1_size; - } lmac[MAX_NUM_LMAC]; - u32 rxfifo2_size; - u32 internal_txfifo_addr; - u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; -}; - /** * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn @@ -766,7 +730,6 @@ struct iwl_mvm { */ struct iwl_mvm_vif *bf_allowed_vif; - enum iwl_ucode_type cur_ucode; bool hw_registered; bool calibrating; u32 error_event_table[2]; @@ -815,10 +778,7 @@ struct iwl_mvm { /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; - /* Paging section */ - struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; - u16 num_of_paging_blk; - u16 num_of_pages_in_last_blk; + struct iwl_fw_runtime fwrt; /* EEPROM MAC addresses */ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; @@ -826,11 +786,7 @@ struct iwl_mvm { /* data related to data path */ struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; - struct work_struct sta_drained_wk; unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; - unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; - atomic_t pending_frames[IWL_MVM_STATION_COUNT]; - u32 tfd_drained[IWL_MVM_STATION_COUNT]; u8 rx_ba_sessions; /* configured by mac80211 */ @@ -847,9 +803,6 @@ struct iwl_mvm { /* max number of simultaneous scans the FW supports */ unsigned int max_scans; - /* ts of the beginning of a non-collect fw dbg data period */ - unsigned long fw_dbg_non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1]; - /* UMAC scan tracking */ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; @@ -925,10 +878,6 @@ struct iwl_mvm { /* -1 for always, 0 for never, >0 for that many times */ s8 fw_restart; - u8 fw_dbg_conf; - struct delayed_work fw_dump_wk; - const struct iwl_mvm_dump_desc *fw_dump_desc; - const struct iwl_fw_dbg_trigger_tlv *fw_dump_trig; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; @@ -1010,9 +959,6 @@ struct iwl_mvm { u16 probe_queue; u16 p2p_dev_queue; - u8 first_agg_queue; - u8 last_agg_queue; - /* Indicate if device power save is allowed */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ unsigned int max_amsdu_len; /* used for debugfs only */ @@ -1055,7 +1001,6 @@ struct iwl_mvm { } peer; } tdls_cs; - struct iwl_mvm_shared_mem_cfg smem_cfg; u32 ciphers[IWL_MVM_NUM_CIPHERS]; struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; @@ -1095,7 +1040,6 @@ struct iwl_mvm { * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done - * @IWL_MVM_STATUS_DUMPING_FW_LOG: FW log is being dumped * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running */ enum iwl_mvm_status { @@ -1107,7 +1051,6 @@ enum iwl_mvm_status { IWL_MVM_STATUS_IN_D0I3, IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_D3_RECONFIG, - IWL_MVM_STATUS_DUMPING_FW_LOG, IWL_MVM_STATUS_FIRMWARE_RUNNING, }; @@ -1180,12 +1123,6 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); } -static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) -{ - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DQA_SUPPORT); -} - static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) { /* For now we only use this mode to differentiate between @@ -1287,6 +1224,12 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) return mvm->trans->cfg->use_tfh; } +static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm) +{ + /* TODO - better define this */ + return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_A000; +} + static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) { /* @@ -1340,6 +1283,14 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) } extern const u8 iwl_mvm_ac_to_tx_fifo[]; +extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[]; + +static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm, + enum ieee80211_ac_numbers ac) +{ + return iwl_mvm_has_new_tx_api(mvm) ? + iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac]; +} struct iwl_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ @@ -1510,7 +1461,6 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); /* MAC (virtual interface) programming */ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override); @@ -1573,9 +1523,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -/* Paging */ -void iwl_free_fw_paging(struct iwl_mvm *mvm); - /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); @@ -1764,10 +1711,6 @@ bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, u8 sta_id, u8 tid, unsigned int timeout); -/* - * Disable a TXQ. - * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. - */ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, u8 tid, u8 flags); int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); @@ -1777,33 +1720,15 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); */ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) { - u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE : - IWL_MVM_CMD_QUEUE; - return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & - ~BIT(cmd_queue)); -} - -static inline -void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 fifo, u16 ssn, unsigned int wdg_timeout) -{ - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = fifo, - .tid = IWL_MAX_TID_COUNT, - .aggregate = false, - .frame_limit = IWL_FRAME_LIMIT, - }; - - iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); + ~BIT(IWL_MVM_DQA_CMD_QUEUE)); } static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { - if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_free_fw_paging(mvm); + iwl_free_fw_paging(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); - mvm->fw_dbg_conf = FW_DBG_INVALID; + iwl_fw_dump_conf_clear(&mvm->fwrt); iwl_trans_stop_device(mvm->trans); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index dac7e542a190..5cc749261ce3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -576,11 +576,8 @@ int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm) } rsp = (void *)hcmd.resp_pkt->data; - if (le32_to_cpu(rsp->general.flags)) { - IWL_ERR(mvm, "Invalid NVM data from FW\n"); - ret = -EINVAL; - goto out; - } + if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) + IWL_INFO(mvm, "OTP is empty\n"); mvm->nvm_data = kzalloc(sizeof(*mvm->nvm_data) + sizeof(struct ieee80211_channel) * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 9c175d5e9d67..9c9c1b4b6d48 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -82,11 +82,10 @@ #include "iwl-io.h" #include "iwl-prph.h" #include "rs.h" -#include "fw-api-scan.h" +#include "fw/api/scan.h" #include "time-event.h" -#include "fw-dbg.h" #include "fw-api.h" -#include "fw-api-scan.h" +#include "fw/api/scan.h" #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); @@ -510,8 +509,6 @@ static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg) return 0; } -static void iwl_mvm_fw_error_dump_wk(struct work_struct *work); - static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) { struct iwl_mvm *mvm = @@ -535,6 +532,34 @@ unlock: mutex_unlock(&mvm->mutex); } +static int iwl_mvm_fwrt_dump_start(void *ctx) +{ + struct iwl_mvm *mvm = ctx; + int ret; + + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT); + if (ret) + return ret; + + mutex_lock(&mvm->mutex); + + return 0; +} + +static void iwl_mvm_fwrt_dump_end(void *ctx) +{ + struct iwl_mvm *mvm = ctx; + + mutex_unlock(&mvm->mutex); + + iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); +} + +static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { + .dump_start = iwl_mvm_fwrt_dump_start, + .dump_end = iwl_mvm_fwrt_dump_end, +}; + static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) @@ -580,6 +605,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw = fw; mvm->hw = hw; + iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm); + mvm->init_status = 0; if (iwl_mvm_has_new_rx_api(mvm)) { @@ -596,32 +623,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; - if (!iwl_mvm_is_dqa_supported(mvm)) { - mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; - - if (mvm->cfg->base_params->num_of_queues == 16) { - mvm->aux_queue = 11; - mvm->first_agg_queue = 12; - BUILD_BUG_ON(BITS_PER_BYTE * - sizeof(mvm->hw_queue_to_mac80211[0]) < 12); - } else { - mvm->aux_queue = 15; - mvm->first_agg_queue = 16; - BUILD_BUG_ON(BITS_PER_BYTE * - sizeof(mvm->hw_queue_to_mac80211[0]) < 16); - } - } else { - mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; - mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; - mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; - mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE; - mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; - } + mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; + mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; + mvm->sf_state = SF_UNINIT; - if (iwl_mvm_has_new_tx_api(mvm)) - mvm->cur_ucode = IWL_UCODE_REGULAR; + if (iwl_mvm_has_unified_ucode(mvm)) + iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); else - mvm->cur_ucode = IWL_UCODE_INIT; + iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT); mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); @@ -635,9 +645,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); - INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); - INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); @@ -688,10 +696,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); - if (iwl_mvm_is_dqa_supported(mvm)) - trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; - else - trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; + trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.scd_set_active = true; @@ -800,7 +805,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); out_free: - flush_delayed_work(&mvm->fw_dump_wk); + iwl_fw_flush_dump(&mvm->fwrt); if (iwlmvm_mod_params.init_dbg) return op_mode; @@ -920,7 +925,7 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); cmds_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { @@ -932,9 +937,9 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, cmds_trig->cmds[i].group_id != pkt->hdr.group_id) continue; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "CMD 0x%02x.%02x received", - pkt->hdr.group_id, pkt->hdr.cmd); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "CMD 0x%02x.%02x received", + pkt->hdr.group_id, pkt->hdr.cmd); break; } } @@ -980,8 +985,10 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, list_add_tail(&entry->list, &mvm->async_handlers_list); spin_unlock(&mvm->async_handlers_lock); schedule_work(&mvm->async_handlers_wk); - break; + return; } + + iwl_fwrt_handle_notification(&mvm->fwrt, rxb); } static void iwl_mvm_rx(struct iwl_op_mode *op_mode, @@ -1131,7 +1138,7 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) * Stop the device if we run OPERATIONAL firmware or if we are in the * middle of the calibrations. */ - return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating); + return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating); } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) @@ -1160,57 +1167,6 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) module_put(THIS_MODULE); } -static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) -{ - struct iwl_mvm *mvm = - container_of(work, struct iwl_mvm, fw_dump_wk.work); - - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT)) - return; - - mutex_lock(&mvm->mutex); - - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - /* stop recording */ - iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); - - iwl_mvm_fw_error_dump(mvm); - - /* start recording again if the firmware is not crashed */ - if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && - mvm->fw->dbg_dest_tlv) { - iwl_clear_bits_prph(mvm->trans, - MON_BUFF_SAMPLE_CTL, 0x100); - iwl_clear_bits_prph(mvm->trans, - MON_BUFF_SAMPLE_CTL, 0x1); - iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x1); - } - } else { - u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE); - u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL); - - /* stop recording */ - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); - udelay(100); - iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0); - /* wait before we collect the data till the DBGC stop */ - udelay(500); - - iwl_mvm_fw_error_dump(mvm); - - /* start recording again if the firmware is not crashed */ - if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && - mvm->fw->dbg_dest_tlv) { - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample); - iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl); - } - } - - mutex_unlock(&mvm->mutex); - - iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); -} - void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) { iwl_abort_notification_waits(&mvm->notif_wait); @@ -1234,7 +1190,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) * can't recover this since we're already half suspended. */ if (!mvm->fw_restart && fw_error) { - iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, + iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, NULL); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; @@ -1260,7 +1216,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) reprobe->dev = mvm->trans->dev; INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); - } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && + } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered) { /* don't let the transport/FW power down */ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -1439,7 +1395,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); - if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR)) + if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) return -EINVAL; set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); @@ -1665,7 +1621,7 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm) IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); - if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR)) + if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) return -EINVAL; mutex_lock(&mvm->d0i3_suspend_mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index fb9eaf003ea5..7ee8e9077baf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -251,7 +251,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { - enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY; + enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY; lockdep_assert_held(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index e684811f8e8b..c11fe2621d51 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -75,7 +75,7 @@ #include "iwl-debug.h" #include "mvm.h" #include "iwl-modparams.h" -#include "fw-api-power.h" +#include "fw/api/power.h" #define POWER_KEEP_ALIVE_PERIOD_SEC 25 @@ -186,7 +186,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, if (!mvmvif->queue_params[ac].uapsd) continue; - if (mvm->cur_ucode != IWL_UCODE_WOWLAN) + if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN) cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); @@ -220,14 +220,15 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, BIT(IEEE80211_AC_BK))) { cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL); - cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ? - cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : - cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); + cmd->snooze_window = + (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? + cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : + cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); } cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len; - if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags & + if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN || cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); @@ -502,7 +503,7 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm, struct iwl_mac_power_cmd cmd = {}; iwl_mvm_power_build_cmd(mvm, vif, &cmd, - mvm->cur_ucode != IWL_UCODE_WOWLAN); + mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN); iwl_mvm_power_log(mvm, &cmd); #ifdef CONFIG_IWLWIFI_DEBUGFS memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd)); @@ -525,8 +526,8 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm) cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); #ifdef CONFIG_IWLWIFI_DEBUGFS - if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 : - mvm->disable_power_off) + if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? + mvm->disable_power_off_d3 : mvm->disable_power_off) cmd.flags &= cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); #endif @@ -933,7 +934,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, if (!mvmvif->bf_data.bf_enabled) return 0; - if (mvm->cur_ucode == IWL_UCODE_WOWLAN) + if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 622d543abb70..184c749766f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -67,7 +67,6 @@ #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" -#include "fw-dbg.h" /* * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler @@ -397,10 +396,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rssi = le32_to_cpu(rssi_trig->rssi); trig_check = - iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif, + iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(mvmsta->vif), trig); if (trig_check && rx_status->signal < rssi) - iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + NULL); } if (ieee80211_is_data(hdr->frame_control)) @@ -624,7 +625,7 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS); trig_stats = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; trig_offset = le32_to_cpu(trig_stats->stop_offset); @@ -636,7 +637,7 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index f3e608196369..6b8e57b7234a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -63,7 +63,6 @@ #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" -#include "fw-dbg.h" static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) @@ -852,7 +851,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rcu_read_lock(); - if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) { + if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK; if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) { @@ -906,10 +905,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rssi = le32_to_cpu(rssi_trig->rssi); trig_check = - iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif, + iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(mvmsta->vif), trig); if (trig_check && rx_status->signal < rssi) - iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + NULL); } if (ieee80211_is_data(hdr->frame_control)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 35e813bdfbe5..50983615dce6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -69,7 +69,7 @@ #include <net/mac80211.h> #include "mvm.h" -#include "fw-api-scan.h" +#include "fw/api/scan.h" #include "iwl-io.h" #define IWL_DENSE_EBS_SCAN_RATIO 5 @@ -743,7 +743,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, * 4. it's not a p2p find operation. */ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && - mvm->last_ebs_successful && + mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS && vif->type != NL80211_IFTYPE_P2P_DEVICE); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index ab66b4394dfc..922cd5379841 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -296,60 +296,6 @@ unlock: rcu_read_unlock(); } -static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - unsigned long used_hw_queues; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, NULL, true, false); - u32 ac; - - lockdep_assert_held(&mvm->mutex); - - used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL); - - /* Find available queues, and allocate them to the ACs */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - u8 queue = find_first_zero_bit(&used_hw_queues, - mvm->first_agg_queue); - - if (queue >= mvm->first_agg_queue) { - IWL_ERR(mvm, "Failed to allocate STA queue\n"); - return -EBUSY; - } - - __set_bit(queue, &used_hw_queues); - mvmsta->hw_queue[ac] = queue; - } - - /* Found a place for all queues - enable them */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], - mvmsta->hw_queue[ac], - iwl_mvm_ac_to_tx_fifo[ac], 0, - wdg_timeout); - mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); - } - - return 0; -} - -static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - unsigned long sta_msk; - int i; - - lockdep_assert_held(&mvm->mutex); - - /* disable the TDLS STA-specific queues */ - sta_msk = mvmsta->tfd_queue_msk; - for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE) - iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); -} - /* Disable aggregations for a bitmap of TIDs for a given station */ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, unsigned long disable_agg_tids, @@ -757,7 +703,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_trans_txq_scd_cfg cfg = { - .fifo = iwl_mvm_ac_to_tx_fifo[ac], + .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), .sta_id = mvmsta->sta_id, .tid = tid, .frame_limit = IWL_FRAME_LIMIT, @@ -1315,7 +1261,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); cfg.tid = i; - cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac]; + cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); @@ -1329,8 +1275,6 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } } - - atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0); } int iwl_mvm_add_sta(struct iwl_mvm *mvm, @@ -1355,9 +1299,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, spin_lock_init(&mvm_sta->lock); - /* In DQA mode, if this is a HW restart, re-alloc existing queues */ - if (iwl_mvm_is_dqa_supported(mvm) && - test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + /* if this is a HW restart re-alloc existing queues */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); goto update_fw; } @@ -1375,33 +1318,15 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; /* HW restart, don't assume the memory has been zeroed */ - atomic_set(&mvm->pending_frames[sta_id], 0); mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ mvm_sta->tfd_queue_msk = 0; - /* - * Allocate new queues for a TDLS station, unless we're in DQA mode, - * and then they'll be allocated dynamically - */ - if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) { - ret = iwl_mvm_tdls_sta_init(mvm, sta); - if (ret) - return ret; - } else if (!iwl_mvm_is_dqa_supported(mvm)) { - for (i = 0; i < IEEE80211_NUM_ACS; i++) - if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) - mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); - } - /* for HW restart - reset everything but the sequence number */ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { u16 seq = mvm_sta->tid_data[i].seq_number; memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); mvm_sta->tid_data[i].seq_number = seq; - if (!iwl_mvm_is_dqa_supported(mvm)) - continue; - /* * Mark all queues for this STA as unallocated and defer TX * frames until the queue is allocated @@ -1435,7 +1360,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->dup_data = dup_data; } - if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif)); if (ret) @@ -1461,8 +1386,6 @@ update_fw: return 0; err: - if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) - iwl_mvm_tdls_sta_deinit(mvm, sta); return ret; } @@ -1535,79 +1458,6 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) return 0; } -void iwl_mvm_sta_drained_wk(struct work_struct *wk) -{ - struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk); - u8 sta_id; - - /* - * The mutex is needed because of the SYNC cmd, but not only: if the - * work would run concurrently with iwl_mvm_rm_sta, it would run before - * iwl_mvm_rm_sta sets the station as busy, and exit. Then - * iwl_mvm_rm_sta would set the station as busy, and nobody will clean - * that later. - */ - mutex_lock(&mvm->mutex); - - for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) { - int ret; - struct ieee80211_sta *sta = - rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - - /* - * This station is in use or RCU-removed; the latter happens in - * managed mode, where mac80211 removes the station before we - * can remove it from firmware (we can only do that after the - * MAC is marked unassociated), and possibly while the deauth - * frame to disconnect from the AP is still queued. Then, the - * station pointer is -ENOENT when the last skb is reclaimed. - */ - if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT) - continue; - - if (PTR_ERR(sta) == -EINVAL) { - IWL_ERR(mvm, "Drained sta %d, but it is internal?\n", - sta_id); - continue; - } - - if (!sta) { - IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n", - sta_id); - continue; - } - - WARN_ON(PTR_ERR(sta) != -EBUSY); - /* This station was removed and we waited until it got drained, - * we can now proceed and remove it. - */ - ret = iwl_mvm_rm_sta_common(mvm, sta_id); - if (ret) { - IWL_ERR(mvm, - "Couldn't remove sta %d after it was drained\n", - sta_id); - continue; - } - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); - clear_bit(sta_id, mvm->sta_drained); - - if (mvm->tfd_drained[sta_id]) { - unsigned long i, msk = mvm->tfd_drained[sta_id]; - - for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE) - iwl_mvm_disable_txq(mvm, i, i, - IWL_MAX_TID_COUNT, 0); - - mvm->tfd_drained[sta_id] = 0; - IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", - sta_id, msk); - } - } - - mutex_unlock(&mvm->mutex); -} - static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvm_sta) @@ -1631,10 +1481,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta) { - int i, ret; + int i; for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { u16 txq_id; + int ret; spin_lock_bh(&mvm_sta->lock); txq_id = mvm_sta->tid_data[i].txq_id; @@ -1645,10 +1496,10 @@ int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); if (ret) - break; + return ret; } - return ret; + return 0; } int iwl_mvm_rm_sta(struct iwl_mvm *mvm, @@ -1665,79 +1516,65 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, if (iwl_mvm_has_new_rx_api(mvm)) kfree(mvm_sta->dup_data); - if ((vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == sta_id) || - iwl_mvm_is_dqa_supported(mvm)){ - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); - if (ret) - return ret; - /* flush its queues here since we are freeing mvm_sta */ - ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); - if (ret) - return ret; - if (iwl_mvm_has_new_tx_api(mvm)) { - ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); - } else { - u32 q_mask = mvm_sta->tfd_queue_msk; + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); + if (ret) + return ret; - ret = iwl_trans_wait_tx_queues_empty(mvm->trans, - q_mask); - } - if (ret) - return ret; - ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); - - /* If DQA is supported - the queues can be disabled now */ - if (iwl_mvm_is_dqa_supported(mvm)) { - iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); - /* - * If pending_frames is set at this point - it must be - * driver internal logic error, since queues are empty - * and removed successuly. - * warn on it but set it to 0 anyway to avoid station - * not being removed later in the function - */ - WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0)); - } + /* flush its queues here since we are freeing mvm_sta */ + ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); + if (ret) + return ret; + if (iwl_mvm_has_new_tx_api(mvm)) { + ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); + } else { + u32 q_mask = mvm_sta->tfd_queue_msk; - /* If there is a TXQ still marked as reserved - free it */ - if (iwl_mvm_is_dqa_supported(mvm) && - mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { - u8 reserved_txq = mvm_sta->reserved_queue; - enum iwl_mvm_queue_status *status; - - /* - * If no traffic has gone through the reserved TXQ - it - * is still marked as IWL_MVM_QUEUE_RESERVED, and - * should be manually marked as free again - */ - spin_lock_bh(&mvm->queue_info_lock); - status = &mvm->queue_info[reserved_txq].status; - if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && - (*status != IWL_MVM_QUEUE_FREE), - "sta_id %d reserved txq %d status %d", - sta_id, reserved_txq, *status)) { - spin_unlock_bh(&mvm->queue_info_lock); - return -EINVAL; - } + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + q_mask); + } + if (ret) + return ret; + + ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); + + iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); + + /* If there is a TXQ still marked as reserved - free it */ + if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { + u8 reserved_txq = mvm_sta->reserved_queue; + enum iwl_mvm_queue_status *status; - *status = IWL_MVM_QUEUE_FREE; + /* + * If no traffic has gone through the reserved TXQ - it + * is still marked as IWL_MVM_QUEUE_RESERVED, and + * should be manually marked as free again + */ + spin_lock_bh(&mvm->queue_info_lock); + status = &mvm->queue_info[reserved_txq].status; + if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && + (*status != IWL_MVM_QUEUE_FREE), + "sta_id %d reserved txq %d status %d", + sta_id, reserved_txq, *status)) { spin_unlock_bh(&mvm->queue_info_lock); + return -EINVAL; } - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == sta_id) { - /* if associated - we can't remove the AP STA now */ - if (vif->bss_conf.assoc) - return ret; + *status = IWL_MVM_QUEUE_FREE; + spin_unlock_bh(&mvm->queue_info_lock); + } + + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id == sta_id) { + /* if associated - we can't remove the AP STA now */ + if (vif->bss_conf.assoc) + return ret; - /* unassoc - go ahead - remove the AP STA now */ - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; + /* unassoc - go ahead - remove the AP STA now */ + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; - /* clear d0i3_ap_sta_id if no longer relevant */ - if (mvm->d0i3_ap_sta_id == sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; - } + /* clear d0i3_ap_sta_id if no longer relevant */ + if (mvm->d0i3_ap_sta_id == sta_id) + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; } /* @@ -1754,32 +1591,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, * calls the drain worker. */ spin_lock_bh(&mvm_sta->lock); + spin_unlock_bh(&mvm_sta->lock); - /* - * There are frames pending on the AC queues for this station. - * We need to wait until all the frames are drained... - */ - if (atomic_read(&mvm->pending_frames[sta_id])) { - rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], - ERR_PTR(-EBUSY)); - spin_unlock_bh(&mvm_sta->lock); - - /* disable TDLS sta queues on drain complete */ - if (sta->tdls) { - mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk; - IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id); - } - - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); - } else { - spin_unlock_bh(&mvm_sta->lock); - - if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) - iwl_mvm_tdls_sta_deinit(mvm, sta); - - ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); - } + ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); return ret; } @@ -1878,7 +1693,7 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) IWL_MAX_TID_COUNT, wdg_timeout); mvm->aux_queue = queue; - } else if (iwl_mvm_is_dqa_supported(mvm)) { + } else { struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_MCAST, .sta_id = mvm->aux_sta.sta_id, @@ -1889,9 +1704,6 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, wdg_timeout); - } else { - iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); } } @@ -1991,7 +1803,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_new_tx_api(mvm)) { if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) queue = mvm->probe_queue; @@ -2078,8 +1890,7 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_free_bcast_sta_queues(mvm, vif); + iwl_mvm_free_bcast_sta_queues(mvm, vif); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); if (ret) @@ -2090,23 +1901,10 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 qmask = 0; lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_is_dqa_supported(mvm)) { - qmask = iwl_mvm_mac_get_queues_mask(vif); - - /* - * The firmware defines the TFD queue mask to only be relevant - * for *unicast* queues, so the multicast (CAB) queue shouldn't - * be included. This only happens in NL80211_IFTYPE_AP vif type, - * so the next line will only have an effect there. - */ - qmask &= ~BIT(vif->cab_queue); - } - - return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, ieee80211_vif_type_p2p(vif), IWL_STA_GENERAL_PURPOSE); } @@ -2118,7 +1916,7 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; @@ -2149,7 +1947,7 @@ void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; @@ -2188,9 +1986,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_is_dqa_supported(mvm)) - return 0; - if (WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC)) return -ENOTSUPP; @@ -2255,9 +2050,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_is_dqa_supported(mvm)) - return 0; - iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, @@ -2507,8 +2299,6 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, mvm_sta->tid_disable_agg &= ~BIT(tid); } else { /* In DQA-mode the queue isn't removed on agg termination */ - if (!iwl_mvm_is_dqa_supported(mvm)) - mvm_sta->tfd_queue_msk &= ~BIT(queue); mvm_sta->tid_disable_agg |= BIT(tid); } @@ -2611,19 +2401,17 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ret = -ENXIO; goto release_locks; } - } else if (iwl_mvm_is_dqa_supported(mvm) && - unlikely(mvm->queue_info[txq_id].status == + } else if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { ret = -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", tid); goto release_locks; - } else if (!iwl_mvm_is_dqa_supported(mvm) || - mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { + } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, - mvm->first_agg_queue, - mvm->last_agg_queue); + IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); if (txq_id < 0) { ret = txq_id; IWL_ERR(mvm, "Failed to allocate agg queue\n"); @@ -2741,37 +2529,34 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, queue_status = mvm->queue_info[queue].status; spin_unlock_bh(&mvm->queue_info_lock); - /* In DQA mode, the existing queue might need to be reconfigured */ - if (iwl_mvm_is_dqa_supported(mvm)) { - /* Maybe there is no need to even alloc a queue... */ - if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) - alloc_queue = false; + /* Maybe there is no need to even alloc a queue... */ + if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) + alloc_queue = false; + /* + * Only reconfig the SCD for the queue if the window size has + * changed from current (become smaller) + */ + if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { /* - * Only reconfig the SCD for the queue if the window size has - * changed from current (become smaller) + * If reconfiguring an existing queue, it first must be + * drained */ - if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { - /* - * If reconfiguring an existing queue, it first must be - * drained - */ - ret = iwl_trans_wait_tx_queues_empty(mvm->trans, - BIT(queue)); - if (ret) { - IWL_ERR(mvm, - "Error draining queue before reconfig\n"); - return ret; - } + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + BIT(queue)); + if (ret) { + IWL_ERR(mvm, + "Error draining queue before reconfig\n"); + return ret; + } - ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, - mvmsta->sta_id, tid, - buf_size, ssn); - if (ret) { - IWL_ERR(mvm, - "Error reconfiguring TXQ #%d\n", queue); - return ret; - } + ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, + mvmsta->sta_id, tid, + buf_size, ssn); + if (ret) { + IWL_ERR(mvm, + "Error reconfiguring TXQ #%d\n", queue); + return ret; } } @@ -2867,18 +2652,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, "ssn = %d, next_recl = %d\n", tid_data->ssn, tid_data->next_reclaimed); - /* - * There are still packets for this RA / TID in the HW. - * Not relevant for DQA mode, since there is no need to disable - * the queue. - */ - if (!iwl_mvm_is_dqa_supported(mvm) && - tid_data->ssn != tid_data->next_reclaimed) { - tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA; - err = 0; - break; - } - tid_data->ssn = 0xffff; tid_data->state = IWL_AGG_OFF; spin_unlock_bh(&mvmsta->lock); @@ -2886,12 +2659,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - - if (!iwl_mvm_is_dqa_supported(mvm)) { - int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; - - iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0); - } return 0; case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: @@ -2961,13 +2728,6 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_drain_sta(mvm, mvmsta, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - - if (!iwl_mvm_is_dqa_supported(mvm)) { - int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; - - iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue, - tid, 0); - } } return 0; @@ -3586,15 +3346,6 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, u16 n_queued; tid_data = &mvmsta->tid_data[tid]; - if (WARN(!iwl_mvm_is_dqa_supported(mvm) && - tid_data->state != IWL_AGG_ON && - tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, - "TID %d state is %d\n", - tid, tid_data->state)) { - spin_unlock_bh(&mvmsta->lock); - ieee80211_sta_eosp(sta); - return; - } n_queued = iwl_mvm_tid_queued(mvm, tid_data); if (n_queued > remaining) { @@ -3688,13 +3439,8 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, mvm_sta->disable_tx = disable; - /* - * Tell mac80211 to start/stop queuing tx for this station, - * but don't stop queuing if there are still pending frames - * for this station. - */ - if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) - ieee80211_sta_block_awake(mvm->hw, sta, disable); + /* Tell mac80211 to start/stop queuing tx for this station */ + ieee80211_sta_block_awake(mvm->hw, sta, disable); iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 05fecbe87da4..005037aa3122 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -222,16 +222,7 @@ struct iwl_mvm_vif; * we remove the STA of the AP. The flush can be done synchronously against the * fw. * Drain means that the fw will drop all the frames sent to a specific station. - * This is useful when a client (if we are IBSS / GO or AP) disassociates. In - * that case, we need to drain all the frames for that client from the AC queues - * that are shared with the other clients. Only then, we can remove the STA in - * the fw. In order to do so, we track the non-AMPDU packets for each station. - * If mac80211 removes a STA and if it still has non-AMPDU packets pending in - * the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all - * the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped - * (we know about it with its Tx response), we remove the station in fw and set - * it as %NULL in %fw_id_to_mac_id: this is the purpose of - * %iwl_mvm_sta_drained_wk. + * This is useful when a client (if we are IBSS / GO or AP) disassociates. */ /** @@ -371,7 +362,6 @@ struct iwl_mvm_rxq_dup_data { * struct iwl_mvm_sta - representation of a station in the driver * @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @tfd_queue_msk: the tfd queues used by the station - * @hw_queue: per-AC mapping of the TFD queues used by station * @mac_id_n_color: the MAC context this station is linked to * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * tid. @@ -409,7 +399,6 @@ struct iwl_mvm_rxq_dup_data { struct iwl_mvm_sta { u32 sta_id; u32 tfd_queue_msk; - u8 hw_queue[IEEE80211_NUM_ACS]; u32 mac_id_n_color; u16 tid_disable_agg; u8 max_agg_bufsize; @@ -533,9 +522,9 @@ void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, @@ -548,7 +537,6 @@ int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm); -void iwl_mvm_sta_drained_wk(struct work_struct *wk); void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta); void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 5a682722adce..65d8299108d5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -73,7 +73,6 @@ #include "mvm.h" #include "iwl-io.h" #include "iwl-prph.h" -#include "fw-dbg.h" /* * For the high priority TE use a time event type that has similar priority to @@ -130,10 +129,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * issue as it will have to complete before the next command is * executed, and a new time event means a new command. */ - if (iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); - else - iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC); + iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); } static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) @@ -248,7 +244,9 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT); te_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, te_data->vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(te_data->vif), + trig)) return; for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { @@ -263,11 +261,11 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, !(trig_status_bitmap & BIT(le32_to_cpu(notif->status)))) continue; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "Time event %d Action 0x%x received status: %d", - te_data->id, - le32_to_cpu(notif->action), - le32_to_cpu(notif->status)); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "Time event %d Action 0x%x received status: %d", + te_data->id, + le32_to_cpu(notif->action), + le32_to_cpu(notif->status)); break; } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c index 634175b2480c..2d0b8a391308 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c @@ -61,7 +61,7 @@ * *****************************************************************************/ #include "mvm.h" -#include "fw-api-tof.h" +#include "fw/api/tof.h" #define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h index 8c3421c9991d..2ff560aa1a82 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h @@ -63,7 +63,7 @@ #ifndef __tof_h__ #define __tof_h__ -#include "fw-api-tof.h" +#include "fw/api/tof.h" struct iwl_mvm_tof_data { struct iwl_tof_config_cmd tof_cfg; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 453a785a3ea5..a638bd69a1f9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -629,7 +629,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device, mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) { + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto out; } @@ -680,7 +680,7 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) { + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto out; } @@ -795,7 +795,7 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) { + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto unlock; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 60360ed73f26..6d7d1a66af81 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -74,7 +74,6 @@ #include "iwl-eeprom-parse.h" #include "mvm.h" #include "sta.h" -#include "fw-dbg.h" static void iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, @@ -89,15 +88,15 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "BAR sent to %pM, tid %d, ssn %d", - addr, tid, ssn); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "BAR sent to %pM, tid %d, ssn %d", + addr, tid, ssn); } #define OPT_HDR(type, skb, off) \ @@ -553,9 +552,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif; - if (!iwl_mvm_is_dqa_supported(mvm)) - return info->hw_queue; - mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); switch (info->control.vif->type) { @@ -654,8 +650,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (ap_sta_id != IWL_MVM_INVALID_STA) sta_id = ap_sta_id; - } else if (iwl_mvm_is_dqa_supported(mvm) && - info.control.vif->type == NL80211_IFTYPE_MONITOR) { + } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->aux_queue; } } @@ -674,17 +669,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return -1; } - /* - * Increase the pending frames counter, so that later when a reply comes - * in and the counter is decreased - we don't start getting negative - * values. - * Note that we don't need to make sure it isn't agg'd, since we're - * TXing non-sta - * For DQA mode - we shouldn't increase it though - */ - if (!iwl_mvm_is_dqa_supported(mvm)) - atomic_inc(&mvm->pending_frames[sta_id]); - return 0; } @@ -752,7 +736,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, max_amsdu_len = sta->max_amsdu_len; /* the Tx FIFO to which this A-MSDU will be routed */ - txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]); /* * Don't send an AMSDU that will be longer than the TXF. @@ -761,7 +745,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * fifo to be able to send bursts. */ max_amsdu_len = min_t(unsigned int, max_amsdu_len, - mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256); + mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] - + 256); if (unlikely(dbg_max_amsdu_len)) max_amsdu_len = min_t(unsigned int, max_amsdu_len, @@ -994,22 +979,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, } } - if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu) - txq_id = mvmsta->tid_data[tid].txq_id; - - if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) { - /* default to TID 0 for non-QoS packets */ - u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; - - txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; - } + txq_id = mvmsta->tid_data[tid].txq_id; WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); /* Check if TXQ needs to be allocated or re-activated */ if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE || - !mvmsta->tid_data[tid].is_tid_active) && - iwl_mvm_is_dqa_supported(mvm)) { + !mvmsta->tid_data[tid].is_tid_active)) { /* If TXQ needs to be allocated... */ if (txq_id == IWL_MVM_INVALID_QUEUE) { iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); @@ -1036,7 +1012,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id); } - if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_new_tx_api(mvm)) { /* Keep track of the time of the last frame for this RA/TID */ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; @@ -1070,10 +1046,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - /* Increase pending frames count if this isn't AMPDU or DQA queue */ - if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu) - atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); - return 0; drop_unlock_sta: @@ -1142,8 +1114,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, lockdep_assert_held(&mvmsta->lock); if ((tid_data->state == IWL_AGG_ON || - tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA || - iwl_mvm_is_dqa_supported(mvm)) && + tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && iwl_mvm_tid_queued(mvm, tid_data) == 0) { /* * Now that this aggregation or DQA queue is empty tell @@ -1177,13 +1148,6 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); - if (!iwl_mvm_is_dqa_supported(mvm)) { - u8 mac80211_ac = tid_to_mac80211_ac[tid]; - - iwl_mvm_disable_txq(mvm, tid_data->txq_id, - vif->hw_queue[mac80211_ac], tid, - CMD_ASYNC); - } tid_data->state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; @@ -1295,7 +1259,7 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); status_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { @@ -1306,9 +1270,9 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) continue; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "Tx status %d was received", - status & TX_STATUS_MSK); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "Tx status %d was received", + status & TX_STATUS_MSK); break; } } @@ -1381,10 +1345,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, info->flags |= IEEE80211_TX_STAT_ACK; break; case TX_STATUS_FAIL_DEST_PS: - /* In DQA, the FW should have stopped the queue and not + /* the FW should have stopped the queue and not * return this status */ - WARN_ON(iwl_mvm_is_dqa_supported(mvm)); + WARN_ON(1); info->flags |= IEEE80211_TX_STAT_TX_FILTERED; break; default: @@ -1440,26 +1404,21 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ieee80211_tx_status(mvm->hw, skb); } - if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) { - /* If this is an aggregation queue, we use the ssn since: - * ssn = wifi seq_num % 256. - * The seq_ctl is the sequence control of the packet to which - * this Tx response relates. But if there is a hole in the - * bitmap of the BA we received, this Tx response may allow to - * reclaim the hole and all the subsequent packets that were - * already acked. In that case, seq_ctl != ssn, and the next - * packet to be reclaimed will be ssn and not seq_ctl. In that - * case, several packets will be reclaimed even if - * frame_count = 1. - * - * The ssn is the index (% 256) of the latest packet that has - * treated (acked / dropped) + 1. - */ - next_reclaimed = ssn; - } else { - /* The next packet to be reclaimed is the one after this one */ - next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10); - } + /* This is an aggregation queue or might become one, so we use + * the ssn since: ssn = wifi seq_num % 256. + * The seq_ctl is the sequence control of the packet to which + * this Tx response relates. But if there is a hole in the + * bitmap of the BA we received, this Tx response may allow to + * reclaim the hole and all the subsequent packets that were + * already acked. In that case, seq_ctl != ssn, and the next + * packet to be reclaimed will be ssn and not seq_ctl. In that + * case, several packets will be reclaimed even if + * frame_count = 1. + * + * The ssn is the index (% 256) of the latest packet that has + * treated (acked / dropped) + 1. + */ + next_reclaimed = ssn; IWL_DEBUG_TX_REPLY(mvm, "TXQ %d status %s (0x%08x)\n", @@ -1542,49 +1501,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, mvmsta = NULL; } - /* - * If the txq is not an AMPDU queue, there is no chance we freed - * several skbs. Check that out... - */ - if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) - goto out; - - /* We can't free more than one frame at once on a shared queue */ - WARN_ON(skb_freed > 1); - - /* If we have still frames for this STA nothing to do here */ - if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) - goto out; - - if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) { - - /* - * If there are no pending frames for this STA and - * the tx to this station is not disabled, notify - * mac80211 that this station can now wake up in its - * STA table. - * If mvmsta is not NULL, sta is valid. - */ - - spin_lock_bh(&mvmsta->lock); - - if (!mvmsta->disable_tx) - ieee80211_sta_block_awake(mvm->hw, sta, false); - - spin_unlock_bh(&mvmsta->lock); - } - - if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) { - /* - * We are draining and this was the last packet - pre_rcu_remove - * has been called already. We might be after the - * synchronize_net already. - * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues. - */ - set_bit(sta_id, mvm->sta_drained); - schedule_work(&mvm->sta_drained_wk); - } - out: rcu_read_unlock(); } @@ -1648,9 +1564,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta; int queue = SEQ_TO_QUEUE(sequence); - if (WARN_ON_ONCE(queue < mvm->first_agg_queue && - (!iwl_mvm_is_dqa_supported(mvm) || - (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))) + if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && + (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) return; if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index fc5a490880d0..2ea74abad73d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -70,9 +70,8 @@ #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" -#include "fw-dbg.h" #include "mvm.h" -#include "fw-api-rs.h" +#include "fw/api/rs.h" /* * Will return 0 even if the cmd failed when RFKILL is asserted unless @@ -464,8 +463,8 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, - (mvm->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); + (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) + ? "Init" : "RT"); return; } @@ -500,7 +499,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) struct iwl_error_event_table table; u32 val; - if (mvm->cur_ucode == IWL_UCODE_INIT) { + if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) { if (!base) base = mvm->fw->init_errlog_ptr; } else { @@ -512,8 +511,8 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, - (mvm->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); + (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) + ? "Init" : "RT"); return; } @@ -1190,14 +1189,15 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) goto out; if (trig_mlme->stop_connection_loss && --trig_mlme->stop_connection_loss) goto out; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg); out: ieee80211_connection_loss(vif); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index f16c1bb9bf94..87712aeac31f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -805,11 +805,11 @@ static int iwl_pci_resume(struct device *device) /* * Enable rfkill interrupt (in order to keep track of the rfkill * status). Must be locked to avoid processing a possible rfkill - * interrupt while in iwl_trans_check_hw_rf_kill(). + * interrupt while in iwl_pcie_check_hw_rf_kill(). */ mutex_lock(&trans_pcie->mutex); iwl_enable_rfkill_int(trans); - iwl_trans_check_hw_rf_kill(trans); + iwl_pcie_check_hw_rf_kill(trans); mutex_unlock(&trans_pcie->mutex); return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index fa315d84e98e..a8ffd4ca8cd8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -791,7 +791,7 @@ void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); void iwl_pcie_apm_config(struct iwl_trans *trans); int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); -bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans); +bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill); void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index b84b78293e7b..c59f4581e972 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -307,7 +307,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; @@ -340,7 +340,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, goto out; /* re-check RF-Kill state since we may have missed the interrupt */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index f95eec52508e..32f06f14328c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -986,7 +986,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, &first_ucode_section); } -bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) +bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill = iwl_is_rfkill_set(trans); @@ -1252,7 +1252,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; @@ -1300,7 +1300,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, ret = iwl_pcie_load_given_ucode(trans, fw); /* re-check RF-Kill state since we may have missed the interrupt */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; @@ -1663,7 +1663,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = false; /* ...rfkill can call stop_device and set it false if needed */ - iwl_trans_check_hw_rf_kill(trans); + iwl_pcie_check_hw_rf_kill(trans); /* Make sure we sync here, because we'll need full access later */ if (low_power) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index a3795ba0d7b9..5dc785d4c167 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -55,7 +55,7 @@ #include "iwl-csr.h" #include "iwl-io.h" #include "internal.h" -#include "mvm/fw-api.h" +#include "fw/api/tx.h" /* * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels @@ -422,9 +422,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, hdr_len = ieee80211_hdrlen(hdr->frame_control); if (amsdu) { - if (!iwl_pcie_gen2_build_amsdu(trans, skb, tfd, - tb1_len + IWL_FIRST_TB_SIZE, - hdr_len, dev_cmd)) + if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, + tb1_len + IWL_FIRST_TB_SIZE, + hdr_len, dev_cmd)) goto out_err; /* diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 034bdb4a0b06..6f5894545f4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -43,8 +43,7 @@ #include "iwl-scd.h" #include "iwl-op-mode.h" #include "internal.h" -/* FIXME: need to abstract out TX command (once we know what it looks like) */ -#include "dvm/commands.h" +#include "fw/api/tx.h" #define IWL_TX_CRC_SIZE 4 #define IWL_TX_DELIMITER_SIZE 4 @@ -2370,7 +2369,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tb1_len = ALIGN(len, 4); /* Tell NIC about any 2-byte padding after MAC header */ if (tb1_len != len) - tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); } else { tb1_len = len; } diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index a3c066f90afc..012930d35434 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c @@ -125,8 +125,8 @@ void hostap_remove_interface(struct net_device *dev, int rtnl_locked, else unregister_netdev(dev); - /* dev->destructor = free_netdev() will free the device data, including - * private data, when removing the device */ + /* 'dev->needs_free_netdev = true' implies device data, including + * private data, will be freed when the device is removed */ } diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index 16c77c27f1b6..725206914911 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -572,6 +572,8 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac) mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid); + memset(&add_ba_req, 0, sizeof(add_ba_req)); + if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && priv->adapter->is_hw_11ac_capable && diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 06ad2d50f9b0..2be78170ec67 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3123,11 +3123,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) priv->dfs_chan_sw_workqueue = NULL; } /* Clear the priv in adapter */ - priv->netdev->ieee80211_ptr = NULL; priv->netdev = NULL; - priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - - priv->media_connected = false; switch (priv->bss_mode) { case NL80211_IFTYPE_UNSPECIFIED: @@ -4215,7 +4211,7 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter) if (adapter->config_bands & BAND_A) n_channels_a = mwifiex_band_5ghz.n_channels; - adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a); + adapter->num_in_chan_stats = n_channels_bg + n_channels_a; adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) * adapter->num_in_chan_stats); diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c index 6e2994308526..bfe84e55df77 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfp.c +++ b/drivers/net/wireless/marvell/mwifiex/cfp.c @@ -180,11 +180,9 @@ static struct region_code_mapping region_code_mapping_t[] = { u8 *mwifiex_11d_code_2_region(u8 code) { u8 i; - u8 size = sizeof(region_code_mapping_t)/ - sizeof(struct region_code_mapping); /* Look for code in mapping table */ - for (i = 0; i < size; i++) + for (i = 0; i < ARRAY_SIZE(region_code_mapping_t); i++) if (region_code_mapping_t[i].code == code) return region_code_mapping_t[i].region; diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 8dad52886034..0edc5d621304 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -26,6 +26,8 @@ #include "11n.h" #include "11ac.h" +static void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); + /* * This function initializes a command node. * @@ -427,7 +429,7 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter) * The function calls the completion callback for all the command * buffers that still have response buffers associated with them. */ -int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) +void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_array; u32 i; @@ -436,7 +438,7 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) if (!adapter->cmd_pool) { mwifiex_dbg(adapter, FATAL, "info: FREE_CMD_BUF: cmd_pool is null\n"); - return 0; + return; } cmd_array = adapter->cmd_pool; @@ -464,8 +466,6 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) kfree(adapter->cmd_pool); adapter->cmd_pool = NULL; } - - return 0; } /* @@ -666,7 +666,7 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, cmd_no == HostCmd_CMD_802_11_SCAN_EXT) { mwifiex_queue_scan_cmd(priv, cmd_node); } else { - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node); queue_work(adapter->workqueue, &adapter->main_work); if (cmd_node->wait_q_enabled) ret = mwifiex_wait_queue_complete(adapter, cmd_node); @@ -684,11 +684,12 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, */ void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, - struct cmd_ctrl_node *cmd_node, u32 add_tail) + struct cmd_ctrl_node *cmd_node) { struct host_cmd_ds_command *host_cmd = NULL; u16 command; unsigned long flags; + bool add_tail = true; host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); if (!host_cmd) { @@ -1075,7 +1076,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) * In case of scan commands, all pending commands in scan pending queue * are cancelled. */ -void +static void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL; diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index f6f105a7d3ff..6f4239be609d 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -940,8 +940,6 @@ mwifiex_reset_write(struct file *file, if (adapter->if_ops.card_reset) { dev_info(adapter->dev, "Resetting per request\n"); - adapter->hw_status = MWIFIEX_HW_STATUS_RESET; - mwifiex_cancel_all_pending_cmd(adapter); adapter->if_ops.card_reset(adapter); } diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 3ecb59f7405b..e11919db7818 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -337,17 +337,9 @@ void mwifiex_wake_up_net_dev_queue(struct net_device *netdev, struct mwifiex_adapter *adapter) { unsigned long dev_queue_flags; - unsigned int i; spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); - - for (i = 0; i < netdev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(netdev, i); - - if (netif_tx_queue_stopped(txq)) - netif_tx_wake_queue(txq); - } - + netif_tx_wake_all_queues(netdev); spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); } @@ -358,30 +350,20 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev, struct mwifiex_adapter *adapter) { unsigned long dev_queue_flags; - unsigned int i; spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); - - for (i = 0; i < netdev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(netdev, i); - - if (!netif_tx_queue_stopped(txq)) - netif_tx_stop_queue(txq); - } - + netif_tx_stop_all_queues(netdev); spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); } /* - * This function releases the lock variables and frees the locks and - * associated locks. + * This function invalidates the list heads. */ -static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter) +static void mwifiex_invalidate_lists(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; s32 i, j; - /* Free lists */ list_del(&adapter->cmd_free_q); list_del(&adapter->cmd_pending_q); list_del(&adapter->scan_pending_q); @@ -418,9 +400,11 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) mwifiex_cancel_all_pending_cmd(adapter); wake_up_interruptible(&adapter->cmd_wait_q.wait); wake_up_interruptible(&adapter->hs_activate_wait_q); +} - /* Free lock variables */ - mwifiex_free_lock_list(adapter); +void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter) +{ + mwifiex_invalidate_lists(adapter); /* Free command buffer */ mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n"); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index f2600b827e81..d67d70002ea9 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -46,7 +46,7 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0"); bool aggr_ctrl; module_param(aggr_ctrl, bool, 0000); -MODULE_PARM_DESC(aggr_ctrl, "usb tx aggreataon enable:1, disable:0"); +MODULE_PARM_DESC(aggr_ctrl, "usb tx aggregation enable:1, disable:0"); /* * This function registers the device and performs all the necessary @@ -588,7 +588,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) if (mwifiex_init_channel_scan_gap(adapter)) { mwifiex_dbg(adapter, ERROR, "could not init channel stats table\n"); - goto err_init_fw; + goto err_init_chan_scan; } if (driver_mode) { @@ -636,6 +636,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) err_add_intf: vfree(adapter->chan_stats); +err_init_chan_scan: wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); err_init_fw: @@ -653,6 +654,7 @@ err_dnld_fw: if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { pr_debug("info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); + mwifiex_free_cmd_buffers(adapter); } init_failed = true; @@ -665,8 +667,11 @@ done: release_firmware(adapter->firmware); adapter->firmware = NULL; } - if (init_failed) + if (init_failed) { + if (adapter->irq_wakeup >= 0) + device_init_wakeup(adapter->dev, false); mwifiex_free_adapter(adapter); + } /* Tell all current and future waiters we're finished */ complete_all(fw_done); @@ -1352,26 +1357,12 @@ static void mwifiex_main_work_queue(struct work_struct *work) mwifiex_main_process(adapter); } -/* - * This function gets called during PCIe function level reset. Required - * code is extracted from mwifiex_remove_card() - */ -int -mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) +/* Common teardown code used for both device removal and reset */ +static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; int i; - if (!adapter) - goto exit_return; - - wait_for_completion(adapter->fw_done); - /* Caller should ensure we aren't suspending while this happens */ - reinit_completion(adapter->fw_done); - - priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); - mwifiex_deauthenticate(priv, NULL); - /* We can no longer handle interrupts once we start doing the teardown * below. */ @@ -1380,6 +1371,7 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) adapter->surprise_removed = true; mwifiex_terminate_workqueue(adapter); + adapter->int_status = 0; /* Stop data */ for (i = 0; i < adapter->priv_num; i++) { @@ -1393,12 +1385,9 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) } mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n"); - mwifiex_shutdown_drv(adapter); - if (adapter->if_ops.down_dev) - adapter->if_ops.down_dev(adapter); - mwifiex_dbg(adapter, CMD, "cmd: mwifiex_shutdown_drv done\n"); + if (atomic_read(&adapter->rx_pending) || atomic_read(&adapter->tx_pending) || atomic_read(&adapter->cmd_pending)) { @@ -1420,10 +1409,37 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev); rtnl_unlock(); } + + wiphy_unregister(adapter->wiphy); + wiphy_free(adapter->wiphy); + adapter->wiphy = NULL; + vfree(adapter->chan_stats); + mwifiex_free_cmd_buffers(adapter); +} + +/* + * This function gets called during PCIe function level reset. + */ +int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) +{ + struct mwifiex_private *priv; + + if (!adapter) + return 0; + + wait_for_completion(adapter->fw_done); + /* Caller should ensure we aren't suspending while this happens */ + reinit_completion(adapter->fw_done); + + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); + mwifiex_deauthenticate(priv, NULL); + + mwifiex_uninit_sw(adapter); + + if (adapter->if_ops.down_dev) + adapter->if_ops.down_dev(adapter); - mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); -exit_return: return 0; } EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw); @@ -1506,6 +1522,7 @@ err_kmalloc: mwifiex_dbg(adapter, ERROR, "info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); + mwifiex_free_cmd_buffers(adapter); } complete_all(adapter->fw_done); @@ -1605,10 +1622,8 @@ mwifiex_add_card(void *card, struct completion *fw_done, adapter->cmd_wait_q.status = 0; adapter->scan_wait_q_woken = false; - if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) { + if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) adapter->rx_work_enabled = true; - pr_notice("rx work enabled, cpus %d\n", num_possible_cpus()); - } adapter->workqueue = alloc_workqueue("MWIFIEX_WORK_QUEUE", @@ -1653,8 +1668,11 @@ err_registerdev: if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { pr_debug("info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); + mwifiex_free_cmd_buffers(adapter); } err_kmalloc: + if (adapter->irq_wakeup >= 0) + device_init_wakeup(adapter->dev, false); mwifiex_free_adapter(adapter); err_init_sw: @@ -1676,64 +1694,10 @@ EXPORT_SYMBOL_GPL(mwifiex_add_card); */ int mwifiex_remove_card(struct mwifiex_adapter *adapter) { - struct mwifiex_private *priv = NULL; - int i; - if (!adapter) - goto exit_remove; - - /* We can no longer handle interrupts once we start doing the teardown - * below. */ - if (adapter->if_ops.disable_int) - adapter->if_ops.disable_int(adapter); - - adapter->surprise_removed = true; - - mwifiex_terminate_workqueue(adapter); - - /* Stop data */ - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - if (priv && priv->netdev) { - mwifiex_stop_net_dev_queue(priv->netdev, adapter); - if (netif_carrier_ok(priv->netdev)) - netif_carrier_off(priv->netdev); - } - } - - mwifiex_dbg(adapter, CMD, - "cmd: calling mwifiex_shutdown_drv...\n"); - - mwifiex_shutdown_drv(adapter); - mwifiex_dbg(adapter, CMD, - "cmd: mwifiex_shutdown_drv done\n"); - if (atomic_read(&adapter->rx_pending) || - atomic_read(&adapter->tx_pending) || - atomic_read(&adapter->cmd_pending)) { - mwifiex_dbg(adapter, ERROR, - "rx_pending=%d, tx_pending=%d,\t" - "cmd_pending=%d\n", - atomic_read(&adapter->rx_pending), - atomic_read(&adapter->tx_pending), - atomic_read(&adapter->cmd_pending)); - } - - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - - if (!priv) - continue; - - rtnl_lock(); - if (priv->netdev && - priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) - mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev); - rtnl_unlock(); - } - vfree(adapter->chan_stats); + return 0; - wiphy_unregister(adapter->wiphy); - wiphy_free(adapter->wiphy); + mwifiex_uninit_sw(adapter); if (adapter->irq_wakeup >= 0) device_init_wakeup(adapter->dev, false); @@ -1748,7 +1712,6 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter) "info: free adapter\n"); mwifiex_free_adapter(adapter); -exit_remove: return 0; } EXPORT_SYMBOL_GPL(mwifiex_remove_card); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index f8cf3079ac7d..537a0ad795ff 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1077,9 +1077,9 @@ int mwifiex_get_debug_info(struct mwifiex_private *, struct mwifiex_debug_info *); int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter); -int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter); +void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter); +void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter); void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter); -void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter); void mwifiex_cancel_scan(struct mwifiex_adapter *adapter); @@ -1087,8 +1087,7 @@ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node); void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, - struct cmd_ctrl_node *cmd_node, - u32 addtail); + struct cmd_ctrl_node *cmd_node); int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter); int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 21f2201405d1..cd314946452c 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -1043,12 +1043,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(card->cmdrsp_buf); + card->cmdrsp_buf = NULL; } if (card && card->cmd_buf) { mwifiex_unmap_pci_memory(adapter, card->cmd_buf, PCI_DMA_TODEVICE); dev_kfree_skb_any(card->cmd_buf); + card->cmd_buf = NULL; } return 0; } @@ -1983,7 +1985,8 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, * (3) wifi image. * * This function bypass the header and bluetooth part, return - * the offset of tail wifi-only part. + * the offset of tail wifi-only part. If the image is already wifi-only, + * that is start with CMD1, return 0. */ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, @@ -1991,7 +1994,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, const struct mwifiex_fw_data *fwdata; u32 offset = 0, data_len, dnld_cmd; int ret = 0; - bool cmd7_before = false; + bool cmd7_before = false, first_cmd = false; while (1) { /* Check for integer and buffer overflow */ @@ -2012,20 +2015,29 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, switch (dnld_cmd) { case MWIFIEX_FW_DNLD_CMD_1: - if (!cmd7_before) { - mwifiex_dbg(adapter, ERROR, - "no cmd7 before cmd1!\n"); + if (offset + data_len < data_len) { + mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); ret = -1; goto done; } - if (offset + data_len < data_len) { - mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); + + /* Image start with cmd1, already wifi-only firmware */ + if (!first_cmd) { + mwifiex_dbg(adapter, MSG, + "input wifi-only firmware\n"); + return 0; + } + + if (!cmd7_before) { + mwifiex_dbg(adapter, ERROR, + "no cmd7 before cmd1!\n"); ret = -1; goto done; } offset += data_len; break; case MWIFIEX_FW_DNLD_CMD_5: + first_cmd = true; /* Check for integer overflow */ if (offset + data_len < data_len) { mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); @@ -2035,6 +2047,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, offset += data_len; break; case MWIFIEX_FW_DNLD_CMD_6: + first_cmd = true; /* Check for integer overflow */ if (offset + data_len < data_len) { mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); @@ -2051,6 +2064,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, } goto done; case MWIFIEX_FW_DNLD_CMD_7: + first_cmd = true; cmd7_before = true; break; default: @@ -2428,7 +2442,7 @@ exit: * In case of Rx packets received, the packets are uploaded from card to * host and processed accordingly. */ -static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) +static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) { int ret; u32 pcie_ireg = 0; @@ -2471,28 +2485,24 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) } if (pcie_ireg & HOST_INTR_DNLD_DONE) { - pcie_ireg &= ~HOST_INTR_DNLD_DONE; mwifiex_dbg(adapter, INTR, "info: TX DNLD Done\n"); ret = mwifiex_pcie_send_data_complete(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_UPLD_RDY) { - pcie_ireg &= ~HOST_INTR_UPLD_RDY; mwifiex_dbg(adapter, INTR, "info: Rx DATA\n"); ret = mwifiex_pcie_process_recv_data(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_EVENT_RDY) { - pcie_ireg &= ~HOST_INTR_EVENT_RDY; mwifiex_dbg(adapter, INTR, "info: Rx EVENT\n"); ret = mwifiex_pcie_process_event_ready(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_CMD_DONE) { - pcie_ireg &= ~HOST_INTR_CMD_DONE; if (adapter->cmd_sent) { mwifiex_dbg(adapter, INTR, "info: CMD sent Interrupt\n"); @@ -2507,75 +2517,13 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n", adapter->cmd_sent, adapter->data_sent); - if (!card->msi_enable && adapter->ps_state != PS_STATE_SLEEP) + if (!card->msi_enable && !card->msix_enable && + adapter->ps_state != PS_STATE_SLEEP) mwifiex_pcie_enable_host_int(adapter); return 0; } -static int mwifiex_process_msix_int(struct mwifiex_adapter *adapter) -{ - int ret; - u32 pcie_ireg; - unsigned long flags; - - spin_lock_irqsave(&adapter->int_lock, flags); - /* Clear out unused interrupts */ - pcie_ireg = adapter->int_status; - adapter->int_status = 0; - spin_unlock_irqrestore(&adapter->int_lock, flags); - - if (pcie_ireg & HOST_INTR_DNLD_DONE) { - mwifiex_dbg(adapter, INTR, - "info: TX DNLD Done\n"); - ret = mwifiex_pcie_send_data_complete(adapter); - if (ret) - return ret; - } - if (pcie_ireg & HOST_INTR_UPLD_RDY) { - mwifiex_dbg(adapter, INTR, - "info: Rx DATA\n"); - ret = mwifiex_pcie_process_recv_data(adapter); - if (ret) - return ret; - } - if (pcie_ireg & HOST_INTR_EVENT_RDY) { - mwifiex_dbg(adapter, INTR, - "info: Rx EVENT\n"); - ret = mwifiex_pcie_process_event_ready(adapter); - if (ret) - return ret; - } - - if (pcie_ireg & HOST_INTR_CMD_DONE) { - if (adapter->cmd_sent) { - mwifiex_dbg(adapter, INTR, - "info: CMD sent Interrupt\n"); - adapter->cmd_sent = false; - } - /* Handle command response */ - ret = mwifiex_pcie_process_cmd_complete(adapter); - if (ret) - return ret; - } - - mwifiex_dbg(adapter, INTR, - "info: cmd_sent=%d data_sent=%d\n", - adapter->cmd_sent, adapter->data_sent); - - return 0; -} - -static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) -{ - struct pcie_service_card *card = adapter->card; - - if (card->msix_enable) - return mwifiex_process_msix_int(adapter); - else - return mwifiex_process_pcie_int(adapter); -} - /* * This function downloads data from driver to card. * @@ -2934,7 +2882,6 @@ static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter) mwifiex_pcie_delete_evtbd_ring(adapter); mwifiex_pcie_delete_rxbd_ring(adapter); mwifiex_pcie_delete_txbd_ring(adapter); - card->cmdrsp_buf = NULL; } /* @@ -3036,15 +2983,14 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) "Failed to write driver not-ready signature\n"); } - mwifiex_pcie_free_buffers(adapter); + pci_disable_device(pdev); - if (pdev) { - pci_iounmap(pdev, card->pci_mmap); - pci_iounmap(pdev, card->pci_mmap1); - pci_disable_device(pdev); - pci_release_region(pdev, 2); - pci_release_region(pdev, 0); - } + pci_iounmap(pdev, card->pci_mmap); + pci_iounmap(pdev, card->pci_mmap1); + pci_release_region(pdev, 2); + pci_release_region(pdev, 0); + + mwifiex_pcie_free_buffers(adapter); } static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) @@ -3220,7 +3166,6 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - int ret; struct pci_dev *pdev = card->dev; /* tx_buf_size might be changed to 3584 by firmware during @@ -3228,11 +3173,9 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) */ adapter->tx_buf_size = card->pcie.tx_buf_size; - ret = mwifiex_pcie_alloc_buffers(adapter); - if (!ret) - return; + mwifiex_pcie_alloc_buffers(adapter); - pci_iounmap(pdev, card->pci_mmap1); + pci_set_master(pdev); } /* This function cleans up the PCI-E host memory space. */ @@ -3240,10 +3183,13 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + struct pci_dev *pdev = card->dev; if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n"); + pci_clear_master(pdev); + adapter->seq_num = 0; mwifiex_pcie_free_buffers(adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index ae9630b49342..d8e8b857ddfb 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1534,8 +1534,7 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, - true); + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node); queue_work(adapter->workqueue, &adapter->main_work); /* Perform internal scan synchronously */ @@ -2033,7 +2032,7 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) struct cmd_ctrl_node, list); list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node); } return; @@ -2492,6 +2491,12 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv, sizeof(struct mwifiex_chan_stats); for (i = 0 ; i < num_chan; i++) { + if (adapter->survey_idx >= adapter->num_in_chan_stats) { + mwifiex_dbg(adapter, WARN, + "FW reported too many channel results (max %d)\n", + adapter->num_in_chan_stats); + return; + } chan_stats.chan_num = fw_chan_stats->chan_num; chan_stats.bandcfg = fw_chan_stats->bandcfg; chan_stats.flags = fw_chan_stats->flags; diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index f81a006668f3..fd5183c10c4e 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -390,7 +390,8 @@ mwifiex_sdio_remove(struct sdio_func *func) mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num); ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); - if (firmware_stat == FIRMWARE_READY_SDIO && !adapter->mfg_mode) { + if (!ret && firmware_stat == FIRMWARE_READY_SDIO && + !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 534d94a206a5..fb090144a6d8 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -189,9 +189,7 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv, if (pbitmap_rates != NULL) { rate_scope->hr_dsss_rate_bitmap = cpu_to_le16(pbitmap_rates[0]); rate_scope->ofdm_rate_bitmap = cpu_to_le16(pbitmap_rates[1]); - for (i = 0; - i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16); - i++) + for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++) rate_scope->ht_mcs_rate_bitmap[i] = cpu_to_le16(pbitmap_rates[2 + i]); if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) { @@ -206,9 +204,7 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv, cpu_to_le16(priv->bitmap_rates[0]); rate_scope->ofdm_rate_bitmap = cpu_to_le16(priv->bitmap_rates[1]); - for (i = 0; - i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16); - i++) + for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++) rate_scope->ht_mcs_rate_bitmap[i] = cpu_to_le16(priv->bitmap_rates[2 + i]); if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) { @@ -1755,7 +1751,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, struct mwifiex_ie_types_vhtcap *vht_capab; struct mwifiex_ie_types_aid *aid; struct mwifiex_ie_types_tdls_idle_timeout *timeout; - u8 *pos, qos_info; + u8 *pos; u16 config_len = 0; struct station_parameters *params = priv->sta_params; @@ -1789,12 +1785,11 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, put_unaligned_le16(params->capability, pos); config_len += sizeof(params->capability); - qos_info = params->uapsd_queues | (params->max_sp << 5); - wmm_qos_info = (struct mwifiex_ie_types_qos_info *)(pos + - config_len); + wmm_qos_info = (void *)(pos + config_len); wmm_qos_info->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA); - wmm_qos_info->header.len = cpu_to_le16(sizeof(qos_info)); - wmm_qos_info->qos_info = qos_info; + wmm_qos_info->header.len = + cpu_to_le16(sizeof(wmm_qos_info->qos_info)); + wmm_qos_info->qos_info = 0; config_len += sizeof(struct mwifiex_ie_types_qos_info); if (params->ht_capa) { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 2945775e83c5..0fba5b10ef2d 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -298,9 +298,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, priv->bitmap_rates[1] = le16_to_cpu(rate_scope->ofdm_rate_bitmap); for (i = 0; - i < - sizeof(rate_scope->ht_mcs_rate_bitmap) / - sizeof(u16); i++) + i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); + i++) priv->bitmap_rates[2 + i] = le16_to_cpu(rate_scope-> ht_mcs_rate_bitmap[i]); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 42997e05d90f..a6077ab3efc3 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -654,9 +654,9 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv, */ int mwifiex_disable_auto_ds(struct mwifiex_private *priv) { - struct mwifiex_ds_auto_ds auto_ds; - - auto_ds.auto_ds = DEEP_SLEEP_OFF; + struct mwifiex_ds_auto_ds auto_ds = { + .auto_ds = DEEP_SLEEP_OFF, + }; return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH, DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, true); @@ -811,8 +811,8 @@ int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode) * is checked to determine WPA version. If buffer length is zero, the existing * WPA IE is reset. */ -static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv, - u8 *ie_data_ptr, u16 ie_len) +static int mwifiex_set_wpa_ie(struct mwifiex_private *priv, + u8 *ie_data_ptr, u16 ie_len) { if (ie_len) { if (ie_len > sizeof(priv->wpa_ie)) { @@ -1351,101 +1351,96 @@ static int mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, u16 ie_len) { - int ret = 0; struct ieee_types_vendor_header *pvendor_ie; const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 }; const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 }; - u16 unparsed_len = ie_len; - int find_wpa_ie = 0; + u16 unparsed_len = ie_len, cur_ie_len; /* If the passed length is zero, reset the buffer */ if (!ie_len) { priv->gen_ie_buf_len = 0; priv->wps.session_enable = false; - return 0; - } else if (!ie_data_ptr) { + } else if (!ie_data_ptr || + ie_len <= sizeof(struct ieee_types_header)) { return -1; } pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr; while (pvendor_ie) { + cur_ie_len = pvendor_ie->len + sizeof(struct ieee_types_header); + + if (pvendor_ie->element_id == WLAN_EID_RSN) { + /* IE is a WPA/WPA2 IE so call set_wpa function */ + mwifiex_set_wpa_ie(priv, (u8 *)pvendor_ie, cur_ie_len); + priv->wps.session_enable = false; + goto next_ie; + } + + if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) { + /* IE is a WAPI IE so call set_wapi function */ + mwifiex_set_wapi_ie(priv, (u8 *)pvendor_ie, + cur_ie_len); + goto next_ie; + } + if (pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) { - /* Test to see if it is a WPA IE, if not, then it is a - * gen IE + /* Test to see if it is a WPA IE, if not, then + * it is a gen IE */ if (!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui))) { - find_wpa_ie = 1; - break; + /* IE is a WPA/WPA2 IE so call set_wpa function + */ + mwifiex_set_wpa_ie(priv, (u8 *)pvendor_ie, + cur_ie_len); + priv->wps.session_enable = false; + goto next_ie; } - /* Test to see if it is a WPS IE, if so, enable - * wps session flag - */ if (!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui))) { + /* Test to see if it is a WPS IE, + * if so, enable wps session flag + */ priv->wps.session_enable = true; mwifiex_dbg(priv->adapter, MSG, - "info: WPS Session Enabled.\n"); - ret = mwifiex_set_wps_ie(priv, - (u8 *)pvendor_ie, - unparsed_len); + "WPS Session Enabled.\n"); + mwifiex_set_wps_ie(priv, (u8 *)pvendor_ie, + cur_ie_len); + goto next_ie; } } - if (pvendor_ie->element_id == WLAN_EID_RSN) { - find_wpa_ie = 1; - break; - } + /* Saved in gen_ie, such as P2P IE.etc.*/ - if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) { - /* IE is a WAPI IE so call set_wapi function */ - ret = mwifiex_set_wapi_ie(priv, (u8 *)pvendor_ie, - unparsed_len); - return ret; + /* Verify that the passed length is not larger than the + * available space remaining in the buffer + */ + if (cur_ie_len < + (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) { + /* Append the passed data to the end + * of the genIeBuffer + */ + memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, + (u8 *)pvendor_ie, cur_ie_len); + /* Increment the stored buffer length by the + * size passed + */ + priv->gen_ie_buf_len += cur_ie_len; } - unparsed_len -= (pvendor_ie->len + - sizeof(struct ieee_types_header)); +next_ie: + unparsed_len -= cur_ie_len; if (unparsed_len <= sizeof(struct ieee_types_header)) pvendor_ie = NULL; else pvendor_ie = (struct ieee_types_vendor_header *) - (((u8 *)pvendor_ie) + pvendor_ie->len + - sizeof(struct ieee_types_header)); + (((u8 *)pvendor_ie) + cur_ie_len); } - if (find_wpa_ie) { - /* IE is a WPA/WPA2 IE so call set_wpa function */ - ret = mwifiex_set_wpa_ie_helper(priv, (u8 *)pvendor_ie, - unparsed_len); - priv->wps.session_enable = false; - return ret; - } - - /* - * Verify that the passed length is not larger than the - * available space remaining in the buffer - */ - if (ie_len < (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) { - - /* Append the passed data to the end of the - genIeBuffer */ - memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, ie_data_ptr, - ie_len); - /* Increment the stored buffer length by the - size passed */ - priv->gen_ie_buf_len += ie_len; - } else { - /* Passed data does not fit in the remaining - buffer space */ - ret = -1; - } - - /* Return 0, or -1 for error case */ - return ret; + return 0; } /* diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 39cd677d4159..e76af2866a19 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -130,7 +130,7 @@ mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv, if (skb_tailroom(skb) < rates_size + 4) { mwifiex_dbg(priv->adapter, ERROR, - "Insuffient space while adding rates\n"); + "Insufficient space while adding rates\n"); return -ENOMEM; } diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index cb1753e43ef4..880ef1cb4088 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -1112,7 +1112,7 @@ static void mwifiex_usb_tx_aggr_tmo(unsigned long context) if (err) { mwifiex_dbg(adapter, ERROR, "prepare tx aggr skb failed, err=%d\n", err); - return; + goto unlock; } if (atomic_read(&port->tx_data_urb_pending) >= @@ -1133,6 +1133,7 @@ static void mwifiex_usb_tx_aggr_tmo(unsigned long context) done: if (err == -1) mwifiex_write_data_complete(adapter, skb_send, 0, -1); +unlock: spin_unlock_irqrestore(&port->tx_aggr_lock, flags); } diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index e3c090008125..856fa6e8327e 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -266,11 +266,19 @@ static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *settings) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + struct qtnf_wmac *mac = wiphy_priv(wiphy); struct qtnf_bss_config *bss_cfg; int ret; - bss_cfg = &vif->bss_cfg; + if (!cfg80211_chandef_identical(&mac->chandef, &settings->chandef)) { + memcpy(&mac->chandef, &settings->chandef, sizeof(mac->chandef)); + if (vif->vifid != 0) + pr_warn("%s: unexpected chan %u (%u MHz)\n", dev->name, + settings->chandef.chan->hw_value, + settings->chandef.chan->center_freq); + } + bss_cfg = &vif->bss_cfg; memset(bss_cfg, 0, sizeof(*bss_cfg)); bss_cfg->bcn_period = settings->beacon_interval; @@ -281,8 +289,6 @@ static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev, bss_cfg->ssid_len = settings->ssid_len; memcpy(&bss_cfg->ssid, settings->ssid, bss_cfg->ssid_len); - memcpy(&bss_cfg->chandef, &settings->chandef, - sizeof(struct cfg80211_chan_def)); memcpy(&bss_cfg->crypto, &settings->crypto, sizeof(struct cfg80211_crypto_settings)); @@ -573,19 +579,33 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev, return ret; } +static void qtnf_scan_timeout(unsigned long data) +{ + struct qtnf_wmac *mac = (struct qtnf_wmac *)data; + + pr_warn("mac%d scan timed out\n", mac->macid); + qtnf_scan_done(mac, true); +} + static int qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct qtnf_wmac *mac = wiphy_priv(wiphy); - int ret; mac->scan_req = request; - ret = qtnf_cmd_send_scan(mac); - if (ret) + if (qtnf_cmd_send_scan(mac)) { pr_err("MAC%u: failed to start scan\n", mac->macid); + mac->scan_req = NULL; + return -EFAULT; + } - return ret; + mac->scan_timeout.data = (unsigned long)mac; + mac->scan_timeout.function = qtnf_scan_timeout; + mod_timer(&mac->scan_timeout, + jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ); + + return 0; } static int @@ -593,6 +613,8 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct cfg80211_chan_def chandef; struct qtnf_bss_config *bss_cfg; int ret; @@ -605,9 +627,20 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, bss_cfg = &vif->bss_cfg; memset(bss_cfg, 0, sizeof(*bss_cfg)); + if (sme->channel) { + /* FIXME: need to set proper nl80211_channel_type value */ + cfg80211_chandef_create(&chandef, sme->channel, + NL80211_CHAN_HT20); + /* fall-back to minimal safe chandef description */ + if (!cfg80211_chandef_valid(&chandef)) + cfg80211_chandef_create(&chandef, sme->channel, + NL80211_CHAN_HT20); + + memcpy(&mac->chandef, &chandef, sizeof(mac->chandef)); + } + bss_cfg->ssid_len = sme->ssid_len; memcpy(&bss_cfg->ssid, sme->ssid, bss_cfg->ssid_len); - bss_cfg->chandef.chan = sme->channel; bss_cfg->auth_type = sme->auth_type; bss_cfg->privacy = sme->privacy; bss_cfg->mfp = sme->mfp; @@ -677,6 +710,175 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, return 0; } +static int +qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, + int idx, struct survey_info *survey) +{ + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct ieee80211_supported_band *sband; + struct cfg80211_chan_def *chandef; + struct ieee80211_channel *chan; + struct qtnf_chan_stats stats; + struct qtnf_vif *vif; + int ret; + + vif = qtnf_netdev_get_priv(dev); + chandef = &mac->chandef; + + sband = wiphy->bands[NL80211_BAND_2GHZ]; + if (sband && idx >= sband->n_channels) { + idx -= sband->n_channels; + sband = NULL; + } + + if (!sband) + sband = wiphy->bands[NL80211_BAND_5GHZ]; + + if (!sband || idx >= sband->n_channels) + return -ENOENT; + + chan = &sband->channels[idx]; + memset(&stats, 0, sizeof(stats)); + + survey->channel = chan; + survey->filled = 0x0; + + if (chandef->chan) { + if (chan->hw_value == chandef->chan->hw_value) + survey->filled = SURVEY_INFO_IN_USE; + } + + ret = qtnf_cmd_get_chan_stats(mac, chan->hw_value, &stats); + switch (ret) { + case 0: + if (unlikely(stats.chan_num != chan->hw_value)) { + pr_err("received stats for channel %d instead of %d\n", + stats.chan_num, chan->hw_value); + ret = -EINVAL; + break; + } + + survey->filled |= SURVEY_INFO_TIME | + SURVEY_INFO_TIME_SCAN | + SURVEY_INFO_TIME_BUSY | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX | + SURVEY_INFO_NOISE_DBM; + + survey->time_scan = stats.cca_try; + survey->time = stats.cca_try; + survey->time_tx = stats.cca_tx; + survey->time_rx = stats.cca_rx; + survey->time_busy = stats.cca_busy; + survey->noise = stats.chan_noise; + break; + case -ENOENT: + pr_debug("no stats for channel %u\n", chan->hw_value); + ret = 0; + break; + default: + pr_debug("failed to get chan(%d) stats from card\n", + chan->hw_value); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_chan_def *chandef) +{ + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct net_device *ndev = wdev->netdev; + struct qtnf_vif *vif; + + if (!ndev) + return -ENODEV; + + vif = qtnf_netdev_get_priv(wdev->netdev); + + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_STATION: + if (vif->sta_state == QTNF_STA_DISCONNECTED) { + pr_warn("%s: STA disconnected\n", ndev->name); + return -ENODATA; + } + break; + case NL80211_IFTYPE_AP: + if (!(vif->bss_status & QTNF_STATE_AP_START)) { + pr_warn("%s: AP not started\n", ndev->name); + return -ENODATA; + } + break; + default: + pr_err("unsupported vif type (%d)\n", vif->wdev.iftype); + return -ENODATA; + } + + if (!cfg80211_chandef_valid(&mac->chandef)) { + pr_err("invalid channel settings on %s\n", ndev->name); + return -ENODATA; + } + + memcpy(chandef, &mac->chandef, sizeof(*chandef)); + return 0; +} + +static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_csa_settings *params) +{ + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + int ret; + + pr_debug("%s: chan(%u) count(%u) radar(%u) block_tx(%u)\n", dev->name, + params->chandef.chan->hw_value, params->count, + params->radar_required, params->block_tx); + + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_AP: + if (!(vif->bss_status & QTNF_STATE_AP_START)) { + pr_warn("AP not started on %s\n", dev->name); + return -ENOTCONN; + } + break; + default: + pr_err("unsupported vif type (%d) on %s\n", + vif->wdev.iftype, dev->name); + return -EOPNOTSUPP; + } + + if (vif->vifid != 0) { + if (!(mac->status & QTNF_MAC_CSA_ACTIVE)) + return -EOPNOTSUPP; + + if (!cfg80211_chandef_identical(¶ms->chandef, + &mac->csa_chandef)) + return -EINVAL; + + return 0; + } + + if (!cfg80211_chandef_valid(¶ms->chandef)) { + pr_err("%s: invalid channel\n", dev->name); + return -EINVAL; + } + + if (cfg80211_chandef_identical(¶ms->chandef, &mac->chandef)) { + pr_err("%s: switch request to the same channel\n", dev->name); + return -EALREADY; + } + + ret = qtnf_cmd_send_chan_switch(mac, params); + if (ret) + pr_warn("%s: failed to switch to channel (%u)\n", + dev->name, params->chandef.chan->hw_value); + + return ret; +} + static struct cfg80211_ops qtn_cfg80211_ops = { .add_virtual_intf = qtnf_add_virtual_intf, .change_virtual_intf = qtnf_change_virtual_intf, @@ -697,69 +899,49 @@ static struct cfg80211_ops qtn_cfg80211_ops = { .set_default_mgmt_key = qtnf_set_default_mgmt_key, .scan = qtnf_scan, .connect = qtnf_connect, - .disconnect = qtnf_disconnect + .disconnect = qtnf_disconnect, + .dump_survey = qtnf_dump_survey, + .get_channel = qtnf_get_channel, + .channel_switch = qtnf_channel_switch }; -static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy, +static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, struct regulatory_request *req) { - struct qtnf_wmac *mac = wiphy_priv(wiphy); - struct qtnf_bus *bus; - struct qtnf_vif *vif; - struct qtnf_wmac *chan_mac; - int i; + struct qtnf_wmac *mac = wiphy_priv(wiphy_in); + struct qtnf_bus *bus = mac->bus; + struct wiphy *wiphy; + unsigned int mac_idx; enum nl80211_band band; - - bus = mac->bus; + int ret; pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator, req->alpha2[0], req->alpha2[1]); - vif = qtnf_mac_get_base_vif(mac); - if (!vif) { - pr_err("MAC%u: primary VIF is not configured\n", mac->macid); - return; - } - - /* ignore non-ISO3166 country codes */ - for (i = 0; i < sizeof(req->alpha2); i++) { - if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { - pr_err("MAC%u: not an ISO3166 code\n", mac->macid); - return; - } - } - if (!strncasecmp(req->alpha2, bus->hw_info.alpha2_code, - sizeof(req->alpha2))) { - pr_warn("MAC%u: unchanged country code\n", mac->macid); - return; - } - - if (qtnf_cmd_send_regulatory_config(mac, req->alpha2)) { - pr_err("MAC%u: failed to configure regulatory\n", mac->macid); + ret = qtnf_cmd_reg_notify(bus, req); + if (ret) { + if (ret != -EOPNOTSUPP && ret != -EALREADY) + pr_err("failed to update reg domain to %c%c\n", + req->alpha2[0], req->alpha2[1]); return; } - for (i = 0; i < bus->hw_info.num_mac; i++) { - chan_mac = bus->mac[i]; - - if (!chan_mac) + for (mac_idx = 0; mac_idx < QTNF_MAX_MAC; ++mac_idx) { + if (!(bus->hw_info.mac_bitmap & (1 << mac_idx))) continue; - if (!(bus->hw_info.mac_bitmap & BIT(i))) - continue; + mac = bus->mac[mac_idx]; + wiphy = priv_to_wiphy(mac); for (band = 0; band < NUM_NL80211_BANDS; ++band) { if (!wiphy->bands[band]) continue; - if (qtnf_cmd_get_mac_chan_info(chan_mac, - wiphy->bands[band])) { - pr_err("MAC%u: can't get channel info\n", - chan_mac->macid); - qtnf_core_detach(bus); - - return; - } + ret = qtnf_cmd_get_mac_chan_info(mac, + wiphy->bands[band]); + if (ret) + pr_err("failed to get chan info for mac %u band %u\n", + mac_idx, band); } } } @@ -844,10 +1026,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) } iface_comb = kzalloc(sizeof(*iface_comb), GFP_KERNEL); - if (!iface_comb) { - ret = -ENOMEM; - goto out; - } + if (!iface_comb) + return -ENOMEM; ret = qtnf_wiphy_setup_if_comb(wiphy, iface_comb, &mac->macinfo); if (ret) @@ -869,6 +1049,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->iface_combinations = iface_comb; wiphy->n_iface_combinations = 1; + wiphy->max_num_csa_counters = 2; /* Initialize cipher suits */ wiphy->cipher_suites = qtnf_cipher_suites; @@ -876,7 +1057,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | - WIPHY_FLAG_AP_UAPSD; + WIPHY_FLAG_AP_UAPSD | + WIPHY_FLAG_HAS_CHANNEL_SWITCH; wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2; @@ -889,21 +1071,26 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) ether_addr_copy(wiphy->perm_addr, mac->macaddr); if (hw_info->hw_capab & QLINK_HW_SUPPORTS_REG_UPDATE) { - pr_debug("device supports REG_UPDATE\n"); + wiphy->regulatory_flags |= REGULATORY_STRICT_REG | + REGULATORY_CUSTOM_REG; wiphy->reg_notifier = qtnf_cfg80211_reg_notifier; - pr_debug("hint regulatory about EP region: %c%c\n", - hw_info->alpha2_code[0], - hw_info->alpha2_code[1]); - regulatory_hint(wiphy, hw_info->alpha2_code); + wiphy_apply_custom_regulatory(wiphy, hw_info->rd); } else { - pr_debug("device doesn't support REG_UPDATE\n"); wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; } ret = wiphy_register(wiphy); + if (ret < 0) + goto out; + + if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) + ret = regulatory_set_wiphy_regd(wiphy, hw_info->rd); + else if (isalpha(hw_info->rd->alpha2[0]) && + isalpha(hw_info->rd->alpha2[1])) + ret = regulatory_hint(wiphy, hw_info->rd->alpha2); out: - if (ret < 0) { + if (ret) { kfree(iface_comb); return ret; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h index 5bd33124a7c8..6a4af52522b8 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h @@ -34,10 +34,14 @@ static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted) .aborted = aborted, }; + mutex_lock(&mac->mac_lock); + if (mac->scan_req) { cfg80211_scan_done(mac->scan_req, &info); mac->scan_req = NULL; } + + mutex_unlock(&mac->mac_lock); } #endif /* _QTN_FMAC_CFG80211_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index b39dbc3d3c1f..4206886b110c 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -181,43 +181,11 @@ out: return ret; } -int qtnf_cmd_send_regulatory_config(struct qtnf_wmac *mac, const char *alpha2) -{ - struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; - int ret; - - cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, - QLINK_CMD_REG_REGION, - sizeof(struct qlink_cmd)); - if (unlikely(!cmd_skb)) - return -ENOMEM; - - qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_COUNTRY, alpha2, - QTNF_MAX_ALPHA_LEN); - - ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; - goto out; - } - - memcpy(mac->bus->hw_info.alpha2_code, alpha2, - sizeof(mac->bus->hw_info.alpha2_code)); -out: - return ret; -} - int qtnf_cmd_send_config_ap(struct qtnf_vif *vif) { struct sk_buff *cmd_skb; struct qtnf_bss_config *bss_cfg = &vif->bss_cfg; - struct cfg80211_chan_def *chandef = &bss_cfg->chandef; + struct cfg80211_chan_def *chandef = &vif->mac->chandef; struct qlink_tlv_channel *qchan; struct qlink_auth_encr aen; u16 res_code = QLINK_CMD_RESULT_OK; @@ -848,25 +816,168 @@ out: return ret; } +static u32 qtnf_cmd_resp_reg_rule_flags_parse(u32 qflags) +{ + u32 flags = 0; + + if (qflags & QLINK_RRF_NO_OFDM) + flags |= NL80211_RRF_NO_OFDM; + + if (qflags & QLINK_RRF_NO_CCK) + flags |= NL80211_RRF_NO_CCK; + + if (qflags & QLINK_RRF_NO_INDOOR) + flags |= NL80211_RRF_NO_INDOOR; + + if (qflags & QLINK_RRF_NO_OUTDOOR) + flags |= NL80211_RRF_NO_OUTDOOR; + + if (qflags & QLINK_RRF_DFS) + flags |= NL80211_RRF_DFS; + + if (qflags & QLINK_RRF_PTP_ONLY) + flags |= NL80211_RRF_PTP_ONLY; + + if (qflags & QLINK_RRF_PTMP_ONLY) + flags |= NL80211_RRF_PTMP_ONLY; + + if (qflags & QLINK_RRF_NO_IR) + flags |= NL80211_RRF_NO_IR; + + if (qflags & QLINK_RRF_AUTO_BW) + flags |= NL80211_RRF_AUTO_BW; + + if (qflags & QLINK_RRF_IR_CONCURRENT) + flags |= NL80211_RRF_IR_CONCURRENT; + + if (qflags & QLINK_RRF_NO_HT40MINUS) + flags |= NL80211_RRF_NO_HT40MINUS; + + if (qflags & QLINK_RRF_NO_HT40PLUS) + flags |= NL80211_RRF_NO_HT40PLUS; + + if (qflags & QLINK_RRF_NO_80MHZ) + flags |= NL80211_RRF_NO_80MHZ; + + if (qflags & QLINK_RRF_NO_160MHZ) + flags |= NL80211_RRF_NO_160MHZ; + + return flags; +} + static int qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, - const struct qlink_resp_get_hw_info *resp) + const struct qlink_resp_get_hw_info *resp, + size_t info_len) { struct qtnf_hw_info *hwinfo = &bus->hw_info; + const struct qlink_tlv_hdr *tlv; + const struct qlink_tlv_reg_rule *tlv_rule; + struct ieee80211_reg_rule *rule; + u16 tlv_type; + u16 tlv_value_len; + unsigned int rule_idx = 0; + + if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) + return -E2BIG; + + hwinfo->rd = kzalloc(sizeof(*hwinfo->rd) + + sizeof(struct ieee80211_reg_rule) + * resp->n_reg_rules, GFP_KERNEL); + + if (!hwinfo->rd) + return -ENOMEM; hwinfo->num_mac = resp->num_mac; hwinfo->mac_bitmap = resp->mac_bitmap; hwinfo->fw_ver = le32_to_cpu(resp->fw_ver); hwinfo->ql_proto_ver = le16_to_cpu(resp->ql_proto_ver); - memcpy(hwinfo->alpha2_code, resp->alpha2_code, - sizeof(hwinfo->alpha2_code)); hwinfo->total_tx_chain = resp->total_tx_chain; hwinfo->total_rx_chain = resp->total_rx_chain; hwinfo->hw_capab = le32_to_cpu(resp->hw_capab); + hwinfo->rd->n_reg_rules = resp->n_reg_rules; + hwinfo->rd->alpha2[0] = resp->alpha2[0]; + hwinfo->rd->alpha2[1] = resp->alpha2[1]; + + switch (resp->dfs_region) { + case QLINK_DFS_FCC: + hwinfo->rd->dfs_region = NL80211_DFS_FCC; + break; + case QLINK_DFS_ETSI: + hwinfo->rd->dfs_region = NL80211_DFS_ETSI; + break; + case QLINK_DFS_JP: + hwinfo->rd->dfs_region = NL80211_DFS_JP; + break; + case QLINK_DFS_UNSET: + default: + hwinfo->rd->dfs_region = NL80211_DFS_UNSET; + break; + } + + tlv = (const struct qlink_tlv_hdr *)resp->info; + + while (info_len >= sizeof(*tlv)) { + tlv_type = le16_to_cpu(tlv->type); + tlv_value_len = le16_to_cpu(tlv->len); + + if (tlv_value_len + sizeof(*tlv) > info_len) { + pr_warn("malformed TLV 0x%.2X; LEN: %u\n", + tlv_type, tlv_value_len); + return -EINVAL; + } + + switch (tlv_type) { + case QTN_TLV_ID_REG_RULE: + if (rule_idx >= resp->n_reg_rules) { + pr_warn("unexpected number of rules: %u\n", + resp->n_reg_rules); + return -EINVAL; + } + + if (tlv_value_len != sizeof(*tlv_rule) - sizeof(*tlv)) { + pr_warn("malformed TLV 0x%.2X; LEN: %u\n", + tlv_type, tlv_value_len); + return -EINVAL; + } + + tlv_rule = (const struct qlink_tlv_reg_rule *)tlv; + rule = &hwinfo->rd->reg_rules[rule_idx++]; + + rule->freq_range.start_freq_khz = + le32_to_cpu(tlv_rule->start_freq_khz); + rule->freq_range.end_freq_khz = + le32_to_cpu(tlv_rule->end_freq_khz); + rule->freq_range.max_bandwidth_khz = + le32_to_cpu(tlv_rule->max_bandwidth_khz); + rule->power_rule.max_antenna_gain = + le32_to_cpu(tlv_rule->max_antenna_gain); + rule->power_rule.max_eirp = + le32_to_cpu(tlv_rule->max_eirp); + rule->dfs_cac_ms = + le32_to_cpu(tlv_rule->dfs_cac_ms); + rule->flags = qtnf_cmd_resp_reg_rule_flags_parse( + le32_to_cpu(tlv_rule->flags)); + break; + default: + break; + } + + info_len -= tlv_value_len + sizeof(*tlv); + tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); + } + + if (rule_idx != resp->n_reg_rules) { + pr_warn("unexpected number of rules: expected %u got %u\n", + resp->n_reg_rules, rule_idx); + kfree(hwinfo->rd); + hwinfo->rd = NULL; + return -EINVAL; + } pr_info("fw_version=%d, MACs map %#x, alpha2=\"%c%c\", chains Tx=%u Rx=%u\n", hwinfo->fw_ver, hwinfo->mac_bitmap, - hwinfo->alpha2_code[0], hwinfo->alpha2_code[1], + hwinfo->rd->alpha2[0], hwinfo->rd->alpha2[1], hwinfo->total_tx_chain, hwinfo->total_rx_chain); return 0; @@ -878,7 +989,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, struct ieee80211_iface_limit *limits = NULL; const struct qlink_iface_limit *limit_record; size_t record_count = 0, rec = 0; - u16 tlv_type, tlv_value_len, mask; + u16 tlv_type, tlv_value_len; struct qlink_iface_comb_num *comb; size_t tlv_full_len; const struct qlink_tlv_hdr *tlv; @@ -931,10 +1042,12 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, limit_record = (void *)tlv->val; limits[rec].max = le16_to_cpu(limit_record->max_num); - mask = le16_to_cpu(limit_record->type_mask); - limits[rec].types = qlink_iface_type_mask_to_nl(mask); - /* only AP and STA modes are supported */ + limits[rec].types = qlink_iface_type_to_nl_mask( + le16_to_cpu(limit_record->type)); + + /* supported modes: STA, AP */ limits[rec].types &= BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_AP_VLAN) | BIT(NL80211_IFTYPE_STATION); pr_debug("MAC%u: MAX: %u; TYPES: %.4X\n", mac->macid, @@ -946,6 +1059,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, default: break; } + tlv_buf_size -= tlv_full_len; tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); } @@ -1013,14 +1127,24 @@ qtnf_cmd_resp_fill_channels_info(struct ieee80211_supported_band *band, unsigned int chidx = 0; u32 qflags; - kfree(band->channels); - band->channels = NULL; + if (band->channels) { + if (band->n_channels == resp->num_chans) { + memset(band->channels, 0, + sizeof(*band->channels) * band->n_channels); + } else { + kfree(band->channels); + band->n_channels = 0; + band->channels = NULL; + } + } band->n_channels = resp->num_chans; if (band->n_channels == 0) return 0; - band->channels = kcalloc(band->n_channels, sizeof(*chan), GFP_KERNEL); + if (!band->channels) + band->channels = kcalloc(band->n_channels, sizeof(*chan), + GFP_KERNEL); if (!band->channels) { band->n_channels = 0; return -ENOMEM; @@ -1212,6 +1336,62 @@ static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac, return 0; } +static int +qtnf_cmd_resp_proc_chan_stat_info(struct qtnf_chan_stats *stats, + const u8 *payload, size_t payload_len) +{ + struct qlink_chan_stats *qlink_stats; + const struct qlink_tlv_hdr *tlv; + size_t tlv_full_len; + u16 tlv_value_len; + u16 tlv_type; + + tlv = (struct qlink_tlv_hdr *)payload; + while (payload_len >= sizeof(struct qlink_tlv_hdr)) { + tlv_type = le16_to_cpu(tlv->type); + tlv_value_len = le16_to_cpu(tlv->len); + tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr); + if (tlv_full_len > payload_len) { + pr_warn("malformed TLV 0x%.2X; LEN: %u\n", + tlv_type, tlv_value_len); + return -EINVAL; + } + switch (tlv_type) { + case QTN_TLV_ID_CHANNEL_STATS: + if (unlikely(tlv_value_len != sizeof(*qlink_stats))) { + pr_err("invalid CHANNEL_STATS entry size\n"); + return -EINVAL; + } + + qlink_stats = (void *)tlv->val; + + stats->chan_num = le32_to_cpu(qlink_stats->chan_num); + stats->cca_tx = le32_to_cpu(qlink_stats->cca_tx); + stats->cca_rx = le32_to_cpu(qlink_stats->cca_rx); + stats->cca_busy = le32_to_cpu(qlink_stats->cca_busy); + stats->cca_try = le32_to_cpu(qlink_stats->cca_try); + stats->chan_noise = qlink_stats->chan_noise; + + pr_debug("chan(%u) try(%u) busy(%u) noise(%d)\n", + stats->chan_num, stats->cca_try, + stats->cca_busy, stats->chan_noise); + break; + default: + pr_warn("Unknown TLV type: %#x\n", + le16_to_cpu(tlv->type)); + } + payload_len -= tlv_full_len; + tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); + } + + if (payload_len) { + pr_warn("malformed TLV buf; bytes left: %zu\n", payload_len); + return -EINVAL; + } + + return 0; +} + int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac) { struct sk_buff *cmd_skb, *resp_skb = NULL; @@ -1256,6 +1436,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) const struct qlink_resp_get_hw_info *resp; u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; + size_t info_len; cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, QLINK_CMD_GET_HW_INFO, @@ -1266,7 +1447,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) qtnf_bus_lock(bus); ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code, - sizeof(*resp), NULL); + sizeof(*resp), &info_len); if (unlikely(ret)) goto out; @@ -1278,7 +1459,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) } resp = (const struct qlink_resp_get_hw_info *)resp_skb->data; - ret = qtnf_cmd_resp_proc_hw_info(bus, resp); + ret = qtnf_cmd_resp_proc_hw_info(bus, resp, info_len); out: qtnf_bus_unlock(bus); @@ -1320,6 +1501,9 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac, cmd = (struct qlink_cmd_chans_info_get *)cmd_skb->data; cmd->band = qband; + + qtnf_bus_lock(mac->bus); + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, sizeof(*resp), &info_len); @@ -1343,6 +1527,7 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac, ret = qtnf_cmd_resp_fill_channels_info(band, resp, info_len); out: + qtnf_bus_unlock(mac->bus); consume_skb(resp_skb); return ret; @@ -1676,10 +1861,27 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, cmd = (struct qlink_cmd_change_sta *)cmd_skb->data; ether_addr_copy(cmd->sta_addr, mac); - cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags( - params->sta_flags_mask)); - cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags( - params->sta_flags_set)); + + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_AP: + cmd->if_type = cpu_to_le16(QLINK_IFTYPE_AP); + cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_mask)); + cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_set)); + break; + case NL80211_IFTYPE_STATION: + cmd->if_type = cpu_to_le16(QLINK_IFTYPE_STATION); + cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_mask)); + cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_set)); + break; + default: + pr_err("unsupported iftype %d\n", vif->wdev.iftype); + ret = -EINVAL; + goto out; + } ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); if (unlikely(ret)) @@ -1853,8 +2055,8 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, ether_addr_copy(cmd->bssid, bss_cfg->bssid); - if (bss_cfg->chandef.chan) - cmd->freq = cpu_to_le16(bss_cfg->chandef.chan->center_freq); + if (vif->mac->chandef.chan) + cmd->channel = cpu_to_le16(vif->mac->chandef.chan->hw_value); cmd->bg_scan_period = cpu_to_le16(bss_cfg->bg_scan_period); @@ -1976,3 +2178,183 @@ out: qtnf_bus_unlock(vif->mac->bus); return ret; } + +int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req) +{ + struct sk_buff *cmd_skb; + int ret; + u16 res_code; + struct qlink_cmd_reg_notify *cmd; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, + QLINK_CMD_REG_NOTIFY, + sizeof(*cmd)); + if (!cmd_skb) + return -ENOMEM; + + cmd = (struct qlink_cmd_reg_notify *)cmd_skb->data; + cmd->alpha2[0] = req->alpha2[0]; + cmd->alpha2[1] = req->alpha2[1]; + + switch (req->initiator) { + case NL80211_REGDOM_SET_BY_CORE: + cmd->initiator = QLINK_REGDOM_SET_BY_CORE; + break; + case NL80211_REGDOM_SET_BY_USER: + cmd->initiator = QLINK_REGDOM_SET_BY_USER; + break; + case NL80211_REGDOM_SET_BY_DRIVER: + cmd->initiator = QLINK_REGDOM_SET_BY_DRIVER; + break; + case NL80211_REGDOM_SET_BY_COUNTRY_IE: + cmd->initiator = QLINK_REGDOM_SET_BY_COUNTRY_IE; + break; + } + + switch (req->user_reg_hint_type) { + case NL80211_USER_REG_HINT_USER: + cmd->user_reg_hint_type = QLINK_USER_REG_HINT_USER; + break; + case NL80211_USER_REG_HINT_CELL_BASE: + cmd->user_reg_hint_type = QLINK_USER_REG_HINT_CELL_BASE; + break; + case NL80211_USER_REG_HINT_INDOOR: + cmd->user_reg_hint_type = QLINK_USER_REG_HINT_INDOOR; + break; + } + + qtnf_bus_lock(bus); + + ret = qtnf_cmd_send(bus, cmd_skb, &res_code); + if (ret) + goto out; + + switch (res_code) { + case QLINK_CMD_RESULT_ENOTSUPP: + pr_warn("reg update not supported\n"); + ret = -EOPNOTSUPP; + break; + case QLINK_CMD_RESULT_EALREADY: + pr_info("regulatory domain is already set to %c%c", + req->alpha2[0], req->alpha2[1]); + ret = -EALREADY; + break; + case QLINK_CMD_RESULT_OK: + ret = 0; + break; + default: + ret = -EFAULT; + break; + } + +out: + qtnf_bus_unlock(bus); + + return ret; +} + +int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, + struct qtnf_chan_stats *stats) +{ + struct sk_buff *cmd_skb, *resp_skb = NULL; + struct qlink_cmd_get_chan_stats *cmd; + struct qlink_resp_get_chan_stats *resp; + size_t var_data_len; + u16 res_code = QLINK_CMD_RESULT_OK; + int ret = 0; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, + QLINK_CMD_CHAN_STATS, + sizeof(*cmd)); + if (!cmd_skb) + return -ENOMEM; + + qtnf_bus_lock(mac->bus); + + cmd = (struct qlink_cmd_get_chan_stats *)cmd_skb->data; + cmd->channel = cpu_to_le16(channel); + + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, + sizeof(*resp), &var_data_len); + if (unlikely(ret)) { + qtnf_bus_unlock(mac->bus); + return ret; + } + + if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { + switch (res_code) { + case QLINK_CMD_RESULT_ENOTFOUND: + ret = -ENOENT; + break; + default: + pr_err("cmd exec failed: 0x%.4X\n", res_code); + ret = -EFAULT; + break; + } + goto out; + } + + resp = (struct qlink_resp_get_chan_stats *)resp_skb->data; + ret = qtnf_cmd_resp_proc_chan_stat_info(stats, resp->info, + var_data_len); + +out: + qtnf_bus_unlock(mac->bus); + consume_skb(resp_skb); + return ret; +} + +int qtnf_cmd_send_chan_switch(struct qtnf_wmac *mac, + struct cfg80211_csa_settings *params) +{ + struct qlink_cmd_chan_switch *cmd; + struct sk_buff *cmd_skb; + u16 res_code = QLINK_CMD_RESULT_OK; + int ret; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0x0, + QLINK_CMD_CHAN_SWITCH, + sizeof(*cmd)); + + if (unlikely(!cmd_skb)) + return -ENOMEM; + + qtnf_bus_lock(mac->bus); + + cmd = (struct qlink_cmd_chan_switch *)cmd_skb->data; + cmd->channel = cpu_to_le16(params->chandef.chan->hw_value); + cmd->radar_required = params->radar_required; + cmd->block_tx = params->block_tx; + cmd->beacon_count = params->count; + + ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); + + if (unlikely(ret)) + goto out; + + switch (res_code) { + case QLINK_CMD_RESULT_OK: + memcpy(&mac->csa_chandef, ¶ms->chandef, + sizeof(mac->csa_chandef)); + mac->status |= QTNF_MAC_CSA_ACTIVE; + ret = 0; + break; + case QLINK_CMD_RESULT_ENOTFOUND: + ret = -ENOENT; + break; + case QLINK_CMD_RESULT_ENOTSUPP: + ret = -EOPNOTSUPP; + break; + case QLINK_CMD_RESULT_EALREADY: + ret = -EALREADY; + break; + case QLINK_CMD_RESULT_INVALID: + default: + ret = -EFAULT; + break; + } + +out: + qtnf_bus_unlock(mac->bus); + return ret; +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index 6c51854ef5e7..783b20364296 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -70,5 +70,10 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code); int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up); +int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req); +int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, + struct qtnf_chan_stats *stats); +int qtnf_cmd_send_chan_switch(struct qtnf_wmac *mac, + struct cfg80211_csa_settings *params); #endif /* QLINK_COMMANDS_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index f053532c0e87..5e60180482d1 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -288,6 +288,8 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, mac->iflist[i].mac = mac; mac->iflist[i].vifid = i; qtnf_sta_list_init(&mac->iflist[i].sta_list); + mutex_init(&mac->mac_lock); + init_timer(&mac->scan_timeout); } qtnf_mac_init_primary_intf(mac); @@ -549,6 +551,9 @@ void qtnf_core_detach(struct qtnf_bus *bus) destroy_workqueue(bus->workqueue); } + kfree(bus->hw_info.rd); + bus->hw_info.rd = NULL; + qtnf_trans_free(bus); } EXPORT_SYMBOL_GPL(qtnf_core_detach); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index a616434281cf..066fcd1095a0 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -42,11 +42,11 @@ #define QTNF_MAX_SSID_LIST_LENGTH 2 #define QTNF_MAX_VSIE_LEN 255 -#define QTNF_MAX_ALPHA_LEN 2 #define QTNF_MAX_INTF 8 #define QTNF_MAX_EVENT_QUEUE_LEN 255 #define QTNF_DEFAULT_BG_SCAN_PERIOD 300 #define QTNF_MAX_BG_SCAN_PERIOD 0xffff +#define QTNF_SCAN_TIMEOUT_SEC 15 #define QTNF_DEF_BSS_PRIORITY 0 #define QTNF_DEF_WDOG_TIMEOUT 5 @@ -68,7 +68,6 @@ struct qtnf_bss_config { u16 auth_type; bool privacy; enum nl80211_mfp mfp; - struct cfg80211_chan_def chandef; struct cfg80211_crypto_settings crypto; u16 bg_scan_period; u32 connect_flags; @@ -90,6 +89,10 @@ enum qtnf_sta_state { QTNF_STA_CONNECTED }; +enum qtnf_mac_status { + QTNF_MAC_CSA_ACTIVE = BIT(0) +}; + struct qtnf_vif { struct wireless_dev wdev; u8 vifid; @@ -125,25 +128,39 @@ struct qtnf_mac_info { size_t n_limits; }; +struct qtnf_chan_stats { + u32 chan_num; + u32 cca_tx; + u32 cca_rx; + u32 cca_busy; + u32 cca_try; + s8 chan_noise; +}; + struct qtnf_wmac { u8 macid; u8 wiphy_registered; u8 macaddr[ETH_ALEN]; + u32 status; struct qtnf_bus *bus; struct qtnf_mac_info macinfo; struct qtnf_vif iflist[QTNF_MAX_INTF]; struct cfg80211_scan_request *scan_req; + struct cfg80211_chan_def chandef; + struct cfg80211_chan_def csa_chandef; + struct mutex mac_lock; /* lock during wmac speicific ops */ + struct timer_list scan_timeout; }; struct qtnf_hw_info { + u16 ql_proto_ver; u8 num_mac; u8 mac_bitmap; - u8 alpha2_code[QTNF_MAX_ALPHA_LEN]; u32 fw_ver; - u16 ql_proto_ver; + u32 hw_capab; + struct ieee80211_regdomain *rd; u8 total_tx_chain; u8 total_rx_chain; - u32 hw_capab; }; struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac); diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 9b61e9a83670..0fc2814eafad 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -211,8 +211,8 @@ qtnf_event_handle_bss_leave(struct qtnf_vif *vif, pr_debug("VIF%u.%u: disconnected\n", vif->mac->macid, vif->vifid); - cfg80211_disconnected(vif->netdev, leave_info->reason, NULL, 0, 0, - GFP_KERNEL); + cfg80211_disconnected(vif->netdev, le16_to_cpu(leave_info->reason), + NULL, 0, 0, GFP_KERNEL); vif->sta_state = QTNF_STA_DISCONNECTED; netif_carrier_off(vif->netdev); @@ -345,11 +345,70 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac, return -EINVAL; } + if (timer_pending(&mac->scan_timeout)) + del_timer_sync(&mac->scan_timeout); qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED); return 0; } +static int +qtnf_event_handle_freq_change(struct qtnf_wmac *mac, + const struct qlink_event_freq_change *data, + u16 len) +{ + struct wiphy *wiphy = priv_to_wiphy(mac); + struct cfg80211_chan_def chandef; + struct ieee80211_channel *chan; + struct qtnf_vif *vif; + int freq; + int i; + + if (len < sizeof(*data)) { + pr_err("payload is too short\n"); + return -EINVAL; + } + + freq = le32_to_cpu(data->freq); + chan = ieee80211_get_channel(wiphy, freq); + if (!chan) { + pr_err("channel at %d MHz not found\n", freq); + return -EINVAL; + } + + pr_debug("MAC%d switch to new channel %u MHz\n", mac->macid, freq); + + if (mac->status & QTNF_MAC_CSA_ACTIVE) { + mac->status &= ~QTNF_MAC_CSA_ACTIVE; + if (chan->hw_value != mac->csa_chandef.chan->hw_value) + pr_warn("unexpected switch to %u during CSA to %u\n", + chan->hw_value, + mac->csa_chandef.chan->hw_value); + } + + /* FIXME: need to figure out proper nl80211_channel_type value */ + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20); + /* fall-back to minimal safe chandef description */ + if (!cfg80211_chandef_valid(&chandef)) + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20); + + memcpy(&mac->chandef, &chandef, sizeof(mac->chandef)); + + for (i = 0; i < QTNF_MAX_INTF; i++) { + vif = &mac->iflist[i]; + if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) + continue; + + if (vif->netdev) { + mutex_lock(&vif->wdev.mtx); + cfg80211_ch_switch_notify(vif->netdev, &chandef); + mutex_unlock(&vif->wdev.mtx); + } + } + + return 0; +} + static int qtnf_event_parse(struct qtnf_wmac *mac, const struct sk_buff *event_skb) { @@ -400,6 +459,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac, ret = qtnf_event_handle_bss_leave(vif, (const void *)event, event_len); break; + case QLINK_EVENT_FREQ_CHANGE: + ret = qtnf_event_handle_freq_change(mac, (const void *)event, + event_len); + break; default: pr_warn("unknown event type: %x\n", event_id); break; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 7fc4f0d6a9ad..ae8acc1bf291 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -274,32 +274,6 @@ static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv) return 0; } -static int -qtnf_pcie_init_dma_mask(struct qtnf_pcie_bus_priv *priv, u64 dma_mask) -{ - int ret; - - ret = dma_supported(&priv->pdev->dev, dma_mask); - if (!ret) { - pr_err("DMA mask %llu not supported\n", dma_mask); - return ret; - } - - ret = pci_set_dma_mask(priv->pdev, dma_mask); - if (ret) { - pr_err("failed to set DMA mask %llu\n", dma_mask); - return ret; - } - - ret = pci_set_consistent_dma_mask(priv->pdev, dma_mask); - if (ret) { - pr_err("failed to set consistent DMA mask %llu\n", dma_mask); - return ret; - } - - return ret; -} - static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv) { struct pci_dev *pdev = priv->pdev; @@ -1212,7 +1186,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_base; } - ret = qtnf_pcie_init_dma_mask(pcie_priv, DMA_BIT_MASK(32)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { pr_err("PCIE DMA mask init failed\n"); goto err_base; @@ -1336,7 +1310,7 @@ static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend, qtnf_pcie_resume); #endif -static struct pci_device_id qtnf_pcie_devid_table[] = { +static const struct pci_device_id qtnf_pcie_devid_table[] = { { PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 6eafc15e0065..a8242f678496 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -19,7 +19,7 @@ #include <linux/ieee80211.h> -#define QLINK_PROTO_VER 3 +#define QLINK_PROTO_VER 5 #define QLINK_MACID_RSVD 0xFF #define QLINK_VIFID_RSVD 0xFF @@ -77,6 +77,7 @@ enum qlink_iface_type { QLINK_IFTYPE_ADHOC = 3, QLINK_IFTYPE_MONITOR = 4, QLINK_IFTYPE_WDS = 5, + QLINK_IFTYPE_AP_VLAN = 6, }; /** @@ -85,12 +86,12 @@ enum qlink_iface_type { * Data describing a single virtual interface. * * @if_type: Mode of interface operation, one of &enum qlink_iface_type - * @flags: interface flagsmap. + * @vlanid: VLAN ID for AP_VLAN interface type * @mac_addr: MAC address of virtual interface. */ struct qlink_intf_info { __le16 if_type; - __le16 flags; + __le16 vlanid; u8 mac_addr[ETH_ALEN]; u8 rsvd[2]; } __packed; @@ -133,6 +134,9 @@ enum qlink_channel_width { * number of operational channels and information on each of the channel. * This command is generic to a specified MAC, interface index must be set * to QLINK_VIFID_RSVD in command header. + * @QLINK_CMD_REG_NOTIFY: notify device about regulatory domain change. This + * command is supported only if device reports QLINK_HW_SUPPORTS_REG_UPDATE + * capability. */ enum qlink_cmd_type { QLINK_CMD_FW_INIT = 0x0001, @@ -148,8 +152,9 @@ enum qlink_cmd_type { QLINK_CMD_DEL_INTF = 0x0016, QLINK_CMD_CHANGE_INTF = 0x0017, QLINK_CMD_UPDOWN_INTF = 0x0018, - QLINK_CMD_REG_REGION = 0x0019, + QLINK_CMD_REG_NOTIFY = 0x0019, QLINK_CMD_CHANS_INFO_GET = 0x001A, + QLINK_CMD_CHAN_SWITCH = 0x001B, QLINK_CMD_CONFIG_AP = 0x0020, QLINK_CMD_START_AP = 0x0021, QLINK_CMD_STOP_AP = 0x0022, @@ -161,6 +166,7 @@ enum qlink_cmd_type { QLINK_CMD_CHANGE_STA = 0x0051, QLINK_CMD_DEL_STA = 0x0052, QLINK_CMD_SCAN = 0x0053, + QLINK_CMD_CHAN_STATS = 0x0054, QLINK_CMD_CONNECT = 0x0060, QLINK_CMD_DISCONNECT = 0x0061, }; @@ -287,6 +293,7 @@ struct qlink_cmd_get_sta_info { * @pairwise: whether to use pairwise key. * @addr: MAC address of a STA key is being installed to. * @cipher: cipher suite. + * @vlanid: VLAN ID for AP_VLAN interface type * @key_data: key data itself. */ struct qlink_cmd_add_key { @@ -295,6 +302,7 @@ struct qlink_cmd_add_key { u8 pairwise; u8 addr[ETH_ALEN]; __le32 cipher; + __le16 vlanid; u8 key_data[0]; } __packed; @@ -341,12 +349,16 @@ struct qlink_cmd_set_def_mgmt_key { * * @sta_flags_mask: STA flags mask, bitmap of &enum qlink_sta_flags * @sta_flags_set: STA flags values, bitmap of &enum qlink_sta_flags + * @if_type: Mode of interface operation, one of &enum qlink_iface_type + * @vlanid: VLAN ID to assign to specific STA * @sta_addr: address of the STA for which parameters are set. */ struct qlink_cmd_change_sta { struct qlink_cmd chdr; __le32 sta_flags_mask; __le32 sta_flags_set; + __le16 if_type; + __le16 vlanid; u8 sta_addr[ETH_ALEN]; } __packed; @@ -380,7 +392,7 @@ enum qlink_sta_connect_flags { struct qlink_cmd_connect { struct qlink_cmd chdr; __le32 flags; - __le16 freq; + __le16 channel; __le16 bg_scan_period; u8 bssid[ETH_ALEN]; u8 payload[0]; @@ -430,6 +442,70 @@ struct qlink_cmd_chans_info_get { u8 band; } __packed; +/** + * struct qlink_cmd_get_chan_stats - data for QLINK_CMD_CHAN_STATS command + * + * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J + */ +struct qlink_cmd_get_chan_stats { + struct qlink_cmd chdr; + __le16 channel; +} __packed; + +/** + * enum qlink_reg_initiator - Indicates the initiator of a reg domain request + * + * See &enum nl80211_reg_initiator for more info. + */ +enum qlink_reg_initiator { + QLINK_REGDOM_SET_BY_CORE, + QLINK_REGDOM_SET_BY_USER, + QLINK_REGDOM_SET_BY_DRIVER, + QLINK_REGDOM_SET_BY_COUNTRY_IE, +}; + +/** + * enum qlink_user_reg_hint_type - type of user regulatory hint + * + * See &enum nl80211_user_reg_hint_type for more info. + */ +enum qlink_user_reg_hint_type { + QLINK_USER_REG_HINT_USER = 0, + QLINK_USER_REG_HINT_CELL_BASE = 1, + QLINK_USER_REG_HINT_INDOOR = 2, +}; + +/** + * struct qlink_cmd_reg_notify - data for QLINK_CMD_REG_NOTIFY command + * + * @alpha2: the ISO / IEC 3166 alpha2 country code. + * @initiator: which entity sent the request, one of &enum qlink_reg_initiator. + * @user_reg_hint_type: type of hint for QLINK_REGDOM_SET_BY_USER request, one + * of &enum qlink_user_reg_hint_type. + */ +struct qlink_cmd_reg_notify { + struct qlink_cmd chdr; + u8 alpha2[2]; + u8 initiator; + u8 user_reg_hint_type; +} __packed; + +/** + * struct qlink_cmd_chan_switch - data for QLINK_CMD_CHAN_SWITCH command + * + * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J + * @radar_required: whether radar detection is required on the new channel + * @block_tx: whether transmissions should be blocked while changing + * @beacon_count: number of beacons until switch + */ +struct qlink_cmd_chan_switch { + struct qlink_cmd chdr; + __le16 channel; + u8 radar_required; + u8 block_tx; + u8 beacon_count; +} __packed; + /* QLINK Command Responses messages related definitions */ @@ -438,6 +514,7 @@ enum qlink_cmd_result { QLINK_CMD_RESULT_INVALID, QLINK_CMD_RESULT_ENOTSUPP, QLINK_CMD_RESULT_ENOTFOUND, + QLINK_CMD_RESULT_EALREADY, }; /** @@ -497,6 +574,18 @@ struct qlink_resp_get_mac_info { } __packed; /** + * enum qlink_dfs_regions - regulatory DFS regions + * + * Corresponds to &enum nl80211_dfs_regions. + */ +enum qlink_dfs_regions { + QLINK_DFS_UNSET = 0, + QLINK_DFS_FCC = 1, + QLINK_DFS_ETSI = 2, + QLINK_DFS_JP = 3, +}; + +/** * struct qlink_resp_get_hw_info - response for QLINK_CMD_GET_HW_INFO command * * Description of wireless hardware capabilities and features. @@ -504,22 +593,29 @@ struct qlink_resp_get_mac_info { * @fw_ver: wireless hardware firmware version. * @hw_capab: Bitmap of capabilities supported by firmware. * @ql_proto_ver: Version of QLINK protocol used by firmware. - * @country_code: country code ID firmware is configured to. * @num_mac: Number of separate physical radio devices provided by hardware. * @mac_bitmap: Bitmap of MAC IDs that are active and can be used in firmware. * @total_tx_chains: total number of transmit chains used by device. * @total_rx_chains: total number of receive chains. + * @alpha2: country code ID firmware is configured to. + * @n_reg_rules: number of regulatory rules TLVs in variable portion of the + * message. + * @dfs_region: regulatory DFS region, one of @enum qlink_dfs_region. + * @info: variable-length HW info, can contain QTN_TLV_ID_REG_RULE. */ struct qlink_resp_get_hw_info { struct qlink_resp rhdr; __le32 fw_ver; __le32 hw_capab; __le16 ql_proto_ver; - u8 alpha2_code[2]; u8 num_mac; u8 mac_bitmap; u8 total_tx_chain; u8 total_rx_chain; + u8 alpha2[2]; + u8 n_reg_rules; + u8 dfs_region; + u8 info[0]; } __packed; /** @@ -574,6 +670,16 @@ struct qlink_resp_phy_params { u8 info[0]; } __packed; +/** + * struct qlink_resp_get_chan_stats - response for QLINK_CMD_CHAN_STATS cmd + * + * @info: variable-length channel info. + */ +struct qlink_resp_get_chan_stats { + struct qlink_cmd rhdr; + u8 info[0]; +} __packed; + /* QLINK Events messages related definitions */ @@ -585,6 +691,7 @@ enum qlink_event_type { QLINK_EVENT_SCAN_COMPLETE = 0x0025, QLINK_EVENT_BSS_JOIN = 0x0026, QLINK_EVENT_BSS_LEAVE = 0x0027, + QLINK_EVENT_FREQ_CHANGE = 0x0028, }; /** @@ -651,7 +758,17 @@ struct qlink_event_bss_join { */ struct qlink_event_bss_leave { struct qlink_event ehdr; - u16 reason; + __le16 reason; +} __packed; + +/** + * struct qlink_event_freq_change - data for QLINK_EVENT_FREQ_CHANGE event + * + * @freq: new operating frequency in MHz + */ +struct qlink_event_freq_change { + struct qlink_event ehdr; + __le32 freq; } __packed; enum qlink_rxmgmt_flags { @@ -741,10 +858,12 @@ enum qlink_tlv_id { QTN_TLV_ID_LRETRY_LIMIT = 0x0204, QTN_TLV_ID_BCN_PERIOD = 0x0205, QTN_TLV_ID_DTIM = 0x0206, + QTN_TLV_ID_REG_RULE = 0x0207, QTN_TLV_ID_CHANNEL = 0x020F, QTN_TLV_ID_COVERAGE_CLASS = 0x0213, QTN_TLV_ID_IFACE_LIMIT = 0x0214, QTN_TLV_ID_NUM_IFACE_COMB = 0x0215, + QTN_TLV_ID_CHANNEL_STATS = 0x0216, QTN_TLV_ID_STA_BASIC_COUNTERS = 0x0300, QTN_TLV_ID_STA_GENERIC_INFO = 0x0301, QTN_TLV_ID_KEY = 0x0302, @@ -761,7 +880,7 @@ struct qlink_tlv_hdr { struct qlink_iface_limit { __le16 max_num; - __le16 type_mask; + __le16 type; } __packed; struct qlink_iface_comb_num { @@ -844,12 +963,54 @@ struct qlink_tlv_cclass { u8 cclass; } __packed; -enum qlink_dfs_state { - QLINK_DFS_USABLE, - QLINK_DFS_UNAVAILABLE, - QLINK_DFS_AVAILABLE, +/** + * enum qlink_reg_rule_flags - regulatory rule flags + * + * See description of &enum nl80211_reg_rule_flags + */ +enum qlink_reg_rule_flags { + QLINK_RRF_NO_OFDM = BIT(0), + QLINK_RRF_NO_CCK = BIT(1), + QLINK_RRF_NO_INDOOR = BIT(2), + QLINK_RRF_NO_OUTDOOR = BIT(3), + QLINK_RRF_DFS = BIT(4), + QLINK_RRF_PTP_ONLY = BIT(5), + QLINK_RRF_PTMP_ONLY = BIT(6), + QLINK_RRF_NO_IR = BIT(7), + QLINK_RRF_AUTO_BW = BIT(8), + QLINK_RRF_IR_CONCURRENT = BIT(9), + QLINK_RRF_NO_HT40MINUS = BIT(10), + QLINK_RRF_NO_HT40PLUS = BIT(11), + QLINK_RRF_NO_80MHZ = BIT(12), + QLINK_RRF_NO_160MHZ = BIT(13), }; +/** + * struct qlink_tlv_reg_rule - data for QTN_TLV_ID_REG_RULE TLV + * + * Regulatory rule description. + * + * @start_freq_khz: start frequency of the range the rule is attributed to. + * @end_freq_khz: end frequency of the range the rule is attributed to. + * @max_bandwidth_khz: max bandwidth that channels in specified range can be + * configured to. + * @max_antenna_gain: max antenna gain that can be used in the specified + * frequency range, dBi. + * @max_eirp: maximum EIRP. + * @flags: regulatory rule flags in &enum qlink_reg_rule_flags. + * @dfs_cac_ms: DFS CAC period. + */ +struct qlink_tlv_reg_rule { + struct qlink_tlv_hdr hdr; + __le32 start_freq_khz; + __le32 end_freq_khz; + __le32 max_bandwidth_khz; + __le32 max_antenna_gain; + __le32 max_eirp; + __le32 flags; + __le32 dfs_cac_ms; +} __packed; + enum qlink_channel_flags { QLINK_CHAN_DISABLED = BIT(0), QLINK_CHAN_NO_IR = BIT(1), @@ -865,6 +1026,12 @@ enum qlink_channel_flags { QLINK_CHAN_NO_10MHZ = BIT(12), }; +enum qlink_dfs_state { + QLINK_DFS_USABLE, + QLINK_DFS_UNAVAILABLE, + QLINK_DFS_AVAILABLE, +}; + struct qlink_tlv_channel { struct qlink_tlv_hdr hdr; __le16 hw_value; @@ -898,4 +1065,13 @@ struct qlink_auth_encr { u8 control_port_no_encrypt; } __packed; +struct qlink_chan_stats { + __le32 chan_num; + __le32 cca_tx; + __le32 cca_rx; + __le32 cca_busy; + __le32 cca_try; + s8 chan_noise; +} __packed; + #endif /* _QTN_QLINK_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c index 49ae652ad9a3..cf024c995fd6 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c @@ -17,24 +17,30 @@ #include "qlink_util.h" -u16 qlink_iface_type_mask_to_nl(u16 qlink_mask) +u16 qlink_iface_type_to_nl_mask(u16 qlink_type) { u16 result = 0; - if (qlink_mask & QLINK_IFTYPE_AP) + switch (qlink_type) { + case QLINK_IFTYPE_AP: result |= BIT(NL80211_IFTYPE_AP); - - if (qlink_mask & QLINK_IFTYPE_STATION) + break; + case QLINK_IFTYPE_STATION: result |= BIT(NL80211_IFTYPE_STATION); - - if (qlink_mask & QLINK_IFTYPE_ADHOC) + break; + case QLINK_IFTYPE_ADHOC: result |= BIT(NL80211_IFTYPE_ADHOC); - - if (qlink_mask & QLINK_IFTYPE_MONITOR) + break; + case QLINK_IFTYPE_MONITOR: result |= BIT(NL80211_IFTYPE_MONITOR); - - if (qlink_mask & QLINK_IFTYPE_WDS) + break; + case QLINK_IFTYPE_WDS: result |= BIT(NL80211_IFTYPE_WDS); + break; + case QLINK_IFTYPE_AP_VLAN: + result |= BIT(NL80211_IFTYPE_AP_VLAN); + break; + } return result; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h index 90d7d09a6c63..de06c1e20b5b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h @@ -22,14 +22,6 @@ #include "qlink.h" -static inline void qtnf_cmd_skb_put_action(struct sk_buff *skb, u16 action) -{ - __le16 *buf_ptr; - - buf_ptr = skb_put(skb, sizeof(action)); - *buf_ptr = cpu_to_le16(action); -} - static inline void qtnf_cmd_skb_put_buffer(struct sk_buff *skb, const u8 *buf_src, size_t len) { @@ -68,7 +60,7 @@ static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb, memcpy(hdr->val, &tmp, sizeof(tmp)); } -u16 qlink_iface_type_mask_to_nl(u16 qlink_mask); +u16 qlink_iface_type_to_nl_mask(u16 qlink_type); u8 qlink_chan_width_mask_to_nl(u16 qlink_mask); #endif /* _QTN_FMAC_QLINK_UTIL_H_ */ diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index e36ee592c660..ea18aa7afecb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -426,9 +426,8 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) hw->extra_tx_headroom = RTL_TX_HEADER_SIZE; /* TODO: Correct this value for our hw */ - /* TODO: define these hard code value */ - hw->max_listen_interval = 10; - hw->max_rate_tries = 4; + hw->max_listen_interval = MAX_LISTEN_INTERVAL; + hw->max_rate_tries = MAX_RATE_TRIES; /* hw->max_rates = 1; */ hw->sta_data_size = sizeof(struct rtl_sta_info); @@ -1166,9 +1165,9 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, } } - if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) + if (is_multicast_ether_addr(hdr->addr1)) tcb_desc->multicast = 1; - else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr))) + else if (is_broadcast_ether_addr(hdr->addr1)) tcb_desc->broadcast = 1; _rtl_txrate_selectmode(hw, sta, tcb_desc); @@ -1408,6 +1407,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx, return true; } else if (ETH_P_PAE == ether_type) { + /* EAPOL is seens as in-4way */ + rtlpriv->btcoexist.btc_info.in_4way = true; + rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies; + rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies; + RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"); @@ -1735,12 +1739,12 @@ void rtl_scan_list_expire(struct ieee80211_hw *hw) continue; list_del(&entry->list); - kfree(entry); rtlpriv->scan_list.num--; RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "BSSID=%pM is expire in scan list (total=%d)\n", entry->bssid, rtlpriv->scan_list.num); + kfree(entry); } spin_unlock_irqrestore(&rtlpriv->locks.scan_list_lock, flags); @@ -1959,6 +1963,12 @@ label_lps_done: if (rtlpriv->cfg->ops->get_btc_status()) rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv); + if (rtlpriv->btcoexist.btc_info.in_4way) { + if (time_after(jiffies, rtlpriv->btcoexist.btc_info.in_4way_ts + + msecs_to_jiffies(IN_4WAY_TIMEOUT_TIME))) + rtlpriv->btcoexist.btc_info.in_4way = false; + } + rtlpriv->link_info.bcn_rx_inperiod = 0; /* <6> scan list */ diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index ab7d81904d25..b56d1b7f5567 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -65,6 +65,8 @@ enum ap_peer { #define FRAME_OFFSET_ADDRESS3 16 #define FRAME_OFFSET_SEQUENCE 22 #define FRAME_OFFSET_ADDRESS4 24 +#define MAX_LISTEN_INTERVAL 10 +#define MAX_RATE_TRIES 4 #define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \ WRITEEF2BYTE(_hdr, _val) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h index 2ac989a4b2bb..02dff4c3f664 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h @@ -43,22 +43,6 @@ #define RT_SDIO_INTERFACE 3 #define DEV_BUS_TYPE RT_PCI_INTERFACE -/* IC type */ -#define RTL_HW_TYPE(adapter) (rtl_hal((struct rtl_priv *)adapter)->hw_type) - -#define IS_NEW_GENERATION_IC(adapter) \ - (RTL_HW_TYPE(adapter) >= HARDWARE_TYPE_RTL8192EE) -#define IS_HARDWARE_TYPE_8812(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8812AE) -#define IS_HARDWARE_TYPE_8821(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8821AE) -#define IS_HARDWARE_TYPE_8723A(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8723AE) -#define IS_HARDWARE_TYPE_8723B(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8723BE) -#define IS_HARDWARE_TYPE_8192E(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8192EE) - #include "halbtc8192e2ant.h" #include "halbtc8723b1ant.h" #include "halbtc8723b2ant.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index e6024b013ca5..c1eacd8352a2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -327,7 +327,22 @@ static void halbtc_aggregation_check(struct btc_coexist *btcoexist) static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist) { - return 0; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 cmd_buffer[4] = {0}; + u8 oper_ver = 0; + u8 req_num = 0x0E; + + if (btcoexist->bt_info.bt_real_fw_ver) + goto label_done; + + cmd_buffer[0] |= (oper_ver & 0x0f); /* Set OperVer */ + cmd_buffer[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */ + cmd_buffer[1] = 0; /* BT_OP_GET_BT_VERSION = 0 */ + rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4, + &cmd_buffer[0]); + +label_done: + return btcoexist->bt_info.bt_real_fw_ver; } u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c index 4366c9817e1e..7d296a401b6f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c @@ -41,6 +41,7 @@ static struct rtl_btc_ops rtl_btc_operation = { .btc_periodical = rtl_btc_periodical, .btc_halt_notify = rtl_btc_halt_notify, .btc_btinfo_notify = rtl_btc_btinfo_notify, + .btc_btmpinfo_notify = rtl_btc_btmpinfo_notify, .btc_is_limited_dig = rtl_btc_is_limited_dig, .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo, .btc_is_bt_disabled = rtl_btc_is_bt_disabled, @@ -165,6 +166,33 @@ void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length) exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length); } +void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length) +{ + u8 extid, seq, len; + u16 bt_real_fw_ver; + u8 bt_fw_ver; + + if ((length < 4) || (!tmp_buf)) + return; + + extid = tmp_buf[0]; + /* not response from BT FW then exit*/ + if (extid != 1) /* C2H_TRIG_BY_BT_FW = 1 */ + return; + + len = tmp_buf[1] >> 4; + seq = tmp_buf[2] >> 4; + + /* BT Firmware version response */ + if (seq == 0x0E) { + bt_real_fw_ver = tmp_buf[3] | (tmp_buf[4] << 8); + bt_fw_ver = tmp_buf[5]; + + gl_bt_coexist.bt_info.bt_real_fw_ver = bt_real_fw_ver; + gl_bt_coexist.bt_info.bt_fw_ver = bt_fw_ver; + } +} + bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv) { return gl_bt_coexist.bt_info.limited_dig; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h index 6fe521cbe7f0..ac1253c46f44 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h @@ -39,6 +39,7 @@ void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, void rtl_btc_periodical(struct rtl_priv *rtlpriv); void rtl_btc_halt_notify(void); void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length); +void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length); bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv); bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv); bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv); diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index b0ad061048c5..c53cbf3d52bd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -1505,6 +1505,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, u8 mac_addr[ETH_ALEN]; u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + rtlpriv->btcoexist.btc_info.in_4way = false; + if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "not open hw encryption\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 032b6317690d..08dc8919ef60 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -2257,7 +2257,7 @@ int rtl_pci_probe(struct pci_dev *pdev, /* find adapter */ if (!_rtl_pci_find_adapter(pdev, hw)) { err = -ENODEV; - goto fail3; + goto fail2; } /* Init IO handler */ @@ -2318,10 +2318,10 @@ fail3: pci_set_drvdata(pdev, NULL); rtl_deinit_core(hw); +fail2: if (rtlpriv->io.pci_mem_start != 0) pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); -fail2: pci_release_regions(pdev); complete(&rtlpriv->firmware_loading_complete); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index 774e72058d24..57e5d5c1d24b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -175,6 +175,8 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_info("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } @@ -376,7 +378,7 @@ static const struct rtl_hal_cfg rtl88ee_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl88ee_pci_ids[] = { +static const struct pci_device_id rtl88ee_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8179, rtl88ee_hal_cfg)}, {}, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index bcbb0c60f1f1..38f85bfdf0c7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -176,6 +176,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index f95a64507f17..530e80f0ef0b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -777,10 +777,6 @@ static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw, queue_sel); } -static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw) -{ -} - static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw) { u16 value16; @@ -870,7 +866,6 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) rtl92c_init_edca(hw); rtl92c_init_rate_fallback(hw); rtl92c_init_retry_function(hw); - _rtl92cu_init_usb_aggregation(hw); rtlpriv->cfg->ops->set_bw_mode(hw, NL80211_CHAN_HT20); rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version)); _rtl92cu_init_beacon_parameters(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c index 1b124eade846..5657b1e34ad0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c @@ -352,11 +352,10 @@ u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw) void rtl92c_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); - if (IS_HARDWARE_TYPE_8192CE(rtlhal)) { + if (IS_HARDWARE_TYPE_8192CE(rtlpriv)) { rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index 96c923b3feb4..dfbbd35bb966 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -85,6 +85,10 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); + if (err) { + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + } return err; } @@ -173,7 +177,7 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = { .rx_urb_num = RTL92C_NUM_RX_URBS, .rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER, .usb_rx_hdl = rtl8192cu_rx_hdl, - .usb_rx_segregate_hdl = NULL, /* rtl8192c_rx_segregate_hdl; */ + .usb_rx_segregate_hdl = NULL, /* tx */ .usb_tx_cleanup = rtl8192c_tx_cleanup, .usb_tx_post_hdl = rtl8192c_tx_post_hdl, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index de6c3428f7c6..ac4a82de40c7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -436,13 +436,6 @@ void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb) _rtl_rx_process(hw, skb); } -void rtl8192c_rx_segregate_hdl( - struct ieee80211_hw *hw, - struct sk_buff *skb, - struct sk_buff_head *skb_list) -{ -} - /*---------------------------------------------------------------------- * * Tx handler @@ -675,8 +668,3 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content", pdesc, RTL_TX_DESC_SIZE); } - -bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - return true; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h index 487eec89bc29..15a66c547287 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h @@ -385,8 +385,6 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, struct ieee80211_rx_status *rx_status, u8 *p_desc, struct sk_buff *skb); void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb); -void rtl8192c_rx_segregate_hdl(struct ieee80211_hw *, struct sk_buff *, - struct sk_buff_head *); void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb); int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb, struct sk_buff *skb); @@ -404,6 +402,5 @@ void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool b_firstseg, bool b_lastseg, struct sk_buff *skb); -bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 16132c66e5e1..a6549f5f6c59 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -183,6 +183,8 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } @@ -347,7 +349,7 @@ static const struct rtl_hal_cfg rtl92de_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15, }; -static struct pci_device_id rtl92de_pci_ids[] = { +static const struct pci_device_id rtl92de_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8193, rtl92de_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x002B, rtl92de_hal_cfg)}, {}, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index f5d4df985c37..7eae27f8e173 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -887,6 +887,7 @@ void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, u8 c2h_cmd_len, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { case C2H_8192E_DBG: @@ -905,12 +906,16 @@ void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, case C2H_8192E_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_INFO!!\n"); - rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; case C2H_8192E_BT_MP: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_MP!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; case C2H_8192E_RA_RPT: _rtl92ee_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index d84ac7adfd82..ef9394be7016 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -2133,7 +2133,12 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw) if ((*(u8 *)&hwinfo[EEPROM_RF_BOARD_OPTION_92E]) == 0xFF) rtlefuse->board_type = 0; + if (rtlpriv->btcoexist.btc_info.btcoexist == 1) + rtlefuse->board_type |= BIT(2); /* ODM_BOARD_BT */ + rtlhal->board_type = rtlefuse->board_type; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "board_type = 0x%x\n", rtlefuse->board_type); /*parse xtal*/ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_92E]; if (hwinfo[EEPROM_XTAL_92E] == 0xFF) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index eaa503b7c4b4..a3490080d066 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -177,6 +177,8 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } @@ -354,7 +356,7 @@ static const struct rtl_hal_cfg rtl92ee_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl92ee_pci_ids[] = { +static const struct pci_device_id rtl92ee_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x818B, rtl92ee_hal_cfg)}, {}, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 55f238a2a310..c58393eab6a1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -478,7 +478,6 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index) struct rtl_priv *rtlpriv = rtl_priv(hw); u16 read_point = 0, write_point = 0, remind_cnt = 0; u32 tmp_4byte = 0; - static u16 last_read_point; static bool start_rx; tmp_4byte = rtl_read_dword(rtlpriv, REG_RXQ_TXBD_IDX); @@ -506,7 +505,6 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index) rtlpci->rx_ring[queue_index].next_rx_rp = write_point; - last_read_point = read_point; return remind_cnt; } @@ -917,7 +915,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, struct rtl_priv *rtlpriv = rtl_priv(hw); u16 cur_tx_rp = 0; u16 cur_tx_wp = 0; - static u16 last_txw_point; static bool over_run; u32 tmp = 0; u8 q_idx = *val; @@ -951,9 +948,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, rtl_write_word(rtlpriv, get_desc_addr_fr_q_idx(q_idx), ring->cur_tx_wp); - - if (q_idx == 1) - last_txw_point = cur_tx_wp; } if (ring->avl_desc < (max_tx_desc - 15)) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 2006b09ea74f..d7945b9db493 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -216,6 +216,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) rtl92se_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } @@ -396,7 +398,7 @@ static const struct rtl_hal_cfg rtl92se_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15, }; -static struct pci_device_id rtl92se_pci_ids[] = { +static const struct pci_device_id rtl92se_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8192, rtl92se_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8171, rtl92se_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8172, rtl92se_hal_cfg)}, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 7bf9f2557920..97b8bd294aa8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -184,6 +184,8 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } return 0; @@ -367,7 +369,7 @@ static const struct rtl_hal_cfg rtl8723e_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl8723e_pci_ids[] = { +static const struct pci_device_id rtl8723e_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8723, rtl8723e_hal_cfg)}, {}, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index dd6f95cfaec9..4b963fd27d64 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -709,6 +709,7 @@ void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_len, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { case C2H_8723B_DBG: @@ -723,12 +724,16 @@ void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, case C2H_8723B_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_INFO!!\n"); - rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; case C2H_8723B_BT_MP: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_MP!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; default: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index cd5dc6dcb19f..4d47b97adfed 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -2111,6 +2111,13 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, rtlefuse->autoload_failflag, hwinfo); + if (rtlpriv->btcoexist.btc_info.btcoexist == 1) + rtlefuse->board_type |= BIT(2); /* ODM_BOARD_BT */ + + rtlhal->board_type = rtlefuse->board_type; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "board_type = 0x%x\n", rtlefuse->board_type); + rtlhal->package_type = _rtl8723be_read_package_type(hw); /* set channel plan from efuse */ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 9752175cc466..9606641519e7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -152,33 +152,86 @@ bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw) return rtl8723be_phy_rf6052_config(hw); } -static bool _rtl8723be_check_condition(struct ieee80211_hw *hw, - const u32 condition) +static bool _rtl8723be_check_positive(struct ieee80211_hw *hw, + const u32 condition1, + const u32 condition2) { - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u32 _board = rtlefuse->board_type; /*need efuse define*/ - u32 _interface = rtlhal->interface; - u32 _platform = 0x08;/*SupportPlatform */ - u32 cond = condition; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u32 cut_ver = ((rtlhal->version & CHIP_VER_RTL_MASK) + >> CHIP_VER_RTL_SHIFT); + u32 intf = (rtlhal->interface == INTF_USB ? BIT(1) : BIT(0)); + + u8 board_type = ((rtlhal->board_type & BIT(4)) >> 4) << 0 | /* _GLNA */ + ((rtlhal->board_type & BIT(3)) >> 3) << 1 | /* _GPA */ + ((rtlhal->board_type & BIT(7)) >> 7) << 2 | /* _ALNA */ + ((rtlhal->board_type & BIT(6)) >> 6) << 3 | /* _APA */ + ((rtlhal->board_type & BIT(2)) >> 2) << 4; /* _BT */ + + u32 cond1 = condition1, cond2 = condition2; + u32 driver1 = cut_ver << 24 | /* CUT ver */ + 0 << 20 | /* interface 2/2 */ + 0x04 << 16 | /* platform */ + rtlhal->package_type << 12 | + intf << 8 | /* interface 1/2 */ + board_type; + + u32 driver2 = rtlhal->type_glna << 0 | + rtlhal->type_gpa << 8 | + rtlhal->type_alna << 16 | + rtlhal->type_apa << 24; - if (condition == 0xCDCDCDCD) - return true; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n", + cond1, cond2); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n", + driver1, driver2); - cond = condition & 0xFF; - if ((_board & cond) == 0 && cond != 0x1F) - return false; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + " (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + " (Board, Package) = (0x%X, 0x%X)\n", + rtlhal->board_type, rtlhal->package_type); - cond = condition & 0xFF00; - cond = cond >> 8; - if ((_interface & cond) == 0 && cond != 0x07) - return false; + /*============== Value Defined Check ===============*/ + /*QFN Type [15:12] and Cut Version [27:24] need to do value check*/ - cond = condition & 0xFF0000; - cond = cond >> 16; - if ((_platform & cond) == 0 && cond != 0x0F) + if (((cond1 & 0x0000F000) != 0) && ((cond1 & 0x0000F000) != + (driver1 & 0x0000F000))) return false; - return true; + if (((cond1 & 0x0F000000) != 0) && ((cond1 & 0x0F000000) != + (driver1 & 0x0F000000))) + return false; + + /*=============== Bit Defined Check ================*/ + /* We don't care [31:28] */ + + cond1 &= 0x00FF0FFF; + driver1 &= 0x00FF0FFF; + + if ((cond1 & driver1) == cond1) { + u32 mask = 0; + + if ((cond1 & 0x0F) == 0) /* BoardType is DONTCARE*/ + return true; + + if ((cond1 & BIT(0)) != 0) /*GLNA*/ + mask |= 0x000000FF; + if ((cond1 & BIT(1)) != 0) /*GPA*/ + mask |= 0x0000FF00; + if ((cond1 & BIT(2)) != 0) /*ALNA*/ + mask |= 0x00FF0000; + if ((cond1 & BIT(3)) != 0) /*APA*/ + mask |= 0xFF000000; + + /* BoardType of each RF path is matched*/ + if ((cond2 & mask) == (driver2 & mask)) + return true; + else + return false; + } + return false; } static void _rtl8723be_config_rf_reg(struct ieee80211_hw *hw, u32 addr, @@ -464,6 +517,16 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); bool rtstatus; + /* switch ant to BT */ + if (rtlpriv->rtlhal.interface == INTF_USB) { + rtl_write_dword(rtlpriv, 0x948, 0x0); + } else { + if (rtlpriv->btcoexist.btc_info.single_ant_path == 0) + rtl_write_dword(rtlpriv, 0x948, 0x280); + else + rtl_write_dword(rtlpriv, 0x948, 0x0); + } + rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { @@ -493,142 +556,84 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) return true; } +static bool rtl8723be_phy_config_with_headerfile(struct ieee80211_hw *hw, + u32 *array_table, + u16 arraylen, + void (*set_reg)(struct ieee80211_hw *hw, u32 regaddr, u32 data)) +{ + #define COND_ELSE 2 + #define COND_ENDIF 3 + + int i = 0; + u8 cond; + bool matched = true, skipped = false; + + while ((i + 1) < arraylen) { + u32 v1 = array_table[i]; + u32 v2 = array_table[i + 1]; + + if (v1 & (BIT(31) | BIT(30))) {/*positive & negative condition*/ + if (v1 & BIT(31)) {/* positive condition*/ + cond = (u8)((v1 & (BIT(29) | BIT(28))) >> 28); + if (cond == COND_ENDIF) { /*end*/ + matched = true; + skipped = false; + } else if (cond == COND_ELSE) { /*else*/ + matched = skipped ? false : true; + } else {/*if , else if*/ + if (skipped) { + matched = false; + } else { + if (_rtl8723be_check_positive( + hw, v1, v2)) { + matched = true; + skipped = true; + } else { + matched = false; + skipped = false; + } + } + } + } else if (v1 & BIT(30)) { /*negative condition*/ + /*do nothing*/ + } + } else { + if (matched) + set_reg(hw, v1, v2); + } + i = i + 2; + } + + return true; +} + static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - u32 arraylength; - u32 *ptrarray; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n"); - arraylength = RTL8723BEMAC_1T_ARRAYLEN; - ptrarray = RTL8723BEMAC_1T_ARRAY; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Img:RTL8723bEMAC_1T_ARRAY LEN %d\n", arraylength); - for (i = 0; i < arraylength; i = i + 2) - rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]); - return true; + + return rtl8723be_phy_config_with_headerfile(hw, + RTL8723BEMAC_1T_ARRAY, RTL8723BEMAC_1T_ARRAYLEN, + rtl_write_byte_with_val32); } static bool _rtl8723be_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, u8 configtype) { - #define READ_NEXT_PAIR(v1, v2, i) \ - do { \ - i += 2; \ - v1 = array_table[i];\ - v2 = array_table[i+1]; \ - } while (0) - - int i; - u32 *array_table; - u16 arraylen; - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 v1 = 0, v2 = 0; - - if (configtype == BASEBAND_CONFIG_PHY_REG) { - arraylen = RTL8723BEPHY_REG_1TARRAYLEN; - array_table = RTL8723BEPHY_REG_1TARRAY; - - for (i = 0; i < arraylen; i = i + 2) { - v1 = array_table[i]; - v2 = array_table[i+1]; - if (v1 < 0xcdcdcdcd) { - _rtl8723be_config_bb_reg(hw, v1, v2); - } else {/*This line is the start line of branch.*/ - /* to protect READ_NEXT_PAIR not overrun */ - if (i >= arraylen - 2) - break; - - if (!_rtl8723be_check_condition(hw, - array_table[i])) { - /*Discard the following - *(offset, data) pairs - */ - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - READ_NEXT_PAIR(v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - /*Configure matched pairs and - *skip to end of if-else. - */ - } else { - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - _rtl8723be_config_bb_reg(hw, - v1, v2); - READ_NEXT_PAIR(v1, v2, i); - } - while (v2 != 0xDEAD && i < arraylen - 2) - READ_NEXT_PAIR(v1, v2, i); - } - } - } - } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { - arraylen = RTL8723BEAGCTAB_1TARRAYLEN; - array_table = RTL8723BEAGCTAB_1TARRAY; - - for (i = 0; i < arraylen; i = i + 2) { - v1 = array_table[i]; - v2 = array_table[i+1]; - if (v1 < 0xCDCDCDCD) { - rtl_set_bbreg(hw, array_table[i], - MASKDWORD, - array_table[i + 1]); - udelay(1); - continue; - } else {/*This line is the start line of branch.*/ - /* to protect READ_NEXT_PAIR not overrun */ - if (i >= arraylen - 2) - break; - - if (!_rtl8723be_check_condition(hw, - array_table[i])) { - /*Discard the following - *(offset, data) pairs - */ - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - READ_NEXT_PAIR(v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - /*Configure matched pairs and - *skip to end of if-else. - */ - } else { - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - rtl_set_bbreg(hw, array_table[i], - MASKDWORD, - array_table[i + 1]); - udelay(1); - READ_NEXT_PAIR(v1, v2, i); - } + if (configtype == BASEBAND_CONFIG_PHY_REG) + return rtl8723be_phy_config_with_headerfile(hw, + RTL8723BEPHY_REG_1TARRAY, + RTL8723BEPHY_REG_1TARRAYLEN, + _rtl8723be_config_bb_reg); + else if (configtype == BASEBAND_CONFIG_AGC_TAB) + return rtl8723be_phy_config_with_headerfile(hw, + RTL8723BEAGCTAB_1TARRAY, + RTL8723BEAGCTAB_1TARRAYLEN, + rtl_set_bbreg_with_dwmask); - while (v2 != 0xDEAD && i < arraylen - 2) - READ_NEXT_PAIR(v1, v2, i); - } - } - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n", - array_table[i], array_table[i + 1]); - } - } - return true; + return false; } static u8 _rtl8723be_get_rate_section_index(u32 regaddr) @@ -761,73 +766,17 @@ static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, enum radio_path rfpath) { - #define READ_NEXT_RF_PAIR(v1, v2, i) \ - do { \ - i += 2; \ - v1 = radioa_array_table[i]; \ - v2 = radioa_array_table[i+1]; \ - } while (0) - - int i; - bool rtstatus = true; - u32 *radioa_array_table; - u16 radioa_arraylen; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u32 v1 = 0, v2 = 0; + bool ret = true; - radioa_arraylen = RTL8723BE_RADIOA_1TARRAYLEN; - radioa_array_table = RTL8723BE_RADIOA_1TARRAY; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Radio_A:RTL8723BE_RADIOA_1TARRAY %d\n", radioa_arraylen); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath); - rtstatus = true; switch (rfpath) { case RF90_PATH_A: - for (i = 0; i < radioa_arraylen; i = i + 2) { - v1 = radioa_array_table[i]; - v2 = radioa_array_table[i+1]; - if (v1 < 0xcdcdcdcd) { - _rtl8723be_config_rf_radio_a(hw, v1, v2); - } else {/*This line is the start line of branch.*/ - /* to protect READ_NEXT_PAIR not overrun */ - if (i >= radioa_arraylen - 2) - break; - - if (!_rtl8723be_check_condition(hw, - radioa_array_table[i])) { - /*Discard the following - *(offset, data) pairs - */ - READ_NEXT_RF_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < radioa_arraylen - 2) { - READ_NEXT_RF_PAIR(v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - } else { - /*Configure matched pairs - *and skip to end of if-else. - */ - READ_NEXT_RF_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < radioa_arraylen - 2) { - _rtl8723be_config_rf_radio_a(hw, - v1, v2); - READ_NEXT_RF_PAIR(v1, v2, i); - } - - while (v2 != 0xDEAD && - i < radioa_arraylen - 2) { - READ_NEXT_RF_PAIR(v1, v2, i); - } - } - } - } + ret = rtl8723be_phy_config_with_headerfile(hw, + RTL8723BE_RADIOA_1TARRAY, + RTL8723BE_RADIOA_1TARRAYLEN, + _rtl8723be_config_rf_radio_a); if (rtlhal->oem_id == RT_CID_819X_HP) _rtl8723be_config_rf_radio_a(hw, 0x52, 0x7E4BD); @@ -840,7 +789,7 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, "switch case %#x not processed\n", rfpath); break; } - return true; + return ret; } void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) @@ -1350,7 +1299,7 @@ void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_phy *rtlphy = &rtlpriv->phy; - u32 delay; + u32 delay = 0; RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "switch to channel%d\n", rtlphy->current_channel); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index f9d10f1e7cf8..2b16a1467e78 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -187,16 +187,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - /* Failed to get firmware. Check if old version available */ - fw_name = "rtlwifi/rtl8723befw.bin"; - pr_info("Using firmware %s\n", fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, fw_name, - rtlpriv->io.dev, GFP_KERNEL, hw, - rtl_fw_cb); - if (err) { - pr_err("Failed to request firmware!\n"); - return 1; - } + pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + return 1; } return 0; } @@ -287,6 +281,7 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8723be_pci", + .alt_fw_name = "rtlwifi/rtl8723befw.bin", .ops = &rtl8723be_hal_ops, .mod_params = &rtl8723be_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, @@ -380,7 +375,7 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl8723be_pci_ids[] = { +static const struct pci_device_id rtl8723be_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB723, rtl8723be_hal_cfg)}, {}, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c index a180761e8810..381c16b9b3a9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c @@ -26,6 +26,7 @@ *****************************************************************************/ #include "table.h" + u32 RTL8723BEPHY_REG_1TARRAY[] = { 0x800, 0x80040000, 0x804, 0x00000003, @@ -36,7 +37,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0x818, 0x02200385, 0x81C, 0x00000000, 0x820, 0x01000100, - 0x824, 0x00390204, + 0x824, 0x00190204, 0x828, 0x00000000, 0x82C, 0x00000000, 0x830, 0x00000000, @@ -73,9 +74,8 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0x90C, 0x81121111, 0x910, 0x00000002, 0x914, 0x00000201, - 0x948, 0x00000280, 0xA00, 0x00D047C8, - 0xA04, 0x80FF000C, + 0xA04, 0x80FF800C, 0xA08, 0x8C838300, 0xA0C, 0x2E7F120F, 0xA10, 0x9500BB78, @@ -114,7 +114,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0xC4C, 0x007F037F, 0xC50, 0x69553420, 0xC54, 0x43BC0094, - 0xC58, 0x00023169, + 0xC58, 0x00013147, 0xC5C, 0x00250492, 0xC60, 0x00000000, 0xC64, 0x7112848B, @@ -125,7 +125,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0xC78, 0x0000001F, 0xC7C, 0x00B91612, 0xC80, 0x390000E4, - 0xC84, 0x20F60000, + 0xC84, 0x21F60000, 0xC88, 0x40000100, 0xC8C, 0x20200000, 0xC90, 0x00020E1A, @@ -224,15 +224,21 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { }; +u32 RTL8723BEPHY_REG_1TARRAYLEN = + sizeof(RTL8723BEPHY_REG_1TARRAY) / sizeof(u32); + u32 RTL8723BEPHY_REG_ARRAY_PG[] = { - 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00004000, - 0, 0, 0, 0x0000086c, 0xffffff00, 0x34363800, - 0, 0, 0, 0x00000e00, 0xffffffff, 0x42444646, - 0, 0, 0, 0x00000e04, 0xffffffff, 0x30343840, + 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003800, + 0, 0, 0, 0x0000086c, 0xffffff00, 0x32343600, + 0, 0, 0, 0x00000e00, 0xffffffff, 0x40424444, + 0, 0, 0, 0x00000e04, 0xffffffff, 0x28323638, 0, 0, 0, 0x00000e10, 0xffffffff, 0x38404244, 0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436 }; +u32 RTL8723BEPHY_REG_ARRAY_PGLEN = + sizeof(RTL8723BEPHY_REG_ARRAY_PG) / sizeof(u32); + u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x000, 0x00010000, 0x0B0, 0x000DFFE0, @@ -257,15 +263,37 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x01E, 0x00000000, 0x0DF, 0x00000780, 0x050, 0x00067435, + 0x80002000, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x0006F10E, + 0x052, 0x000007D3, + 0x90003000, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x0006F10E, + 0x052, 0x000007D3, + 0x90004000, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x0006F10E, + 0x052, 0x000007D3, + 0xA0000000, 0x00000000, 0x051, 0x0006B04E, 0x052, 0x000007D2, + 0xB0000000, 0x00000000, 0x053, 0x00000000, 0x054, 0x00050400, 0x055, 0x0004026E, 0x0DD, 0x0000004C, 0x070, 0x00067435, + 0x80002000, 0x00000000, 0x40000000, 0x00000000, + 0x071, 0x0006F10E, + 0x072, 0x000007D3, + 0x90003000, 0x00000000, 0x40000000, 0x00000000, + 0x071, 0x0006F10E, + 0x072, 0x000007D3, + 0x90004000, 0x00000000, 0x40000000, 0x00000000, + 0x071, 0x0006F10E, + 0x072, 0x000007D3, + 0xA0000000, 0x00000000, 0x071, 0x0006B04E, 0x072, 0x000007D2, + 0xB0000000, 0x00000000, 0x073, 0x00000000, 0x074, 0x00050400, 0x075, 0x0004026E, @@ -308,6 +336,7 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x044, 0x00000051, 0x0EF, 0x00000000, 0x0ED, 0x00000000, + 0x07F, 0x00020080, 0x0EF, 0x00002000, 0x03B, 0x000380EF, 0x03B, 0x000302FE, @@ -336,14 +365,24 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x0A3, 0x00008000, 0x0A4, 0x00048D80, 0x0A5, 0x00068000, - 0x000, 0x00033D80, + 0x0ED, 0x00000002, + 0x0EF, 0x00000002, + 0x056, 0x00000032, + 0x076, 0x00000032, + 0x001, 0x00000780, }; +u32 RTL8723BE_RADIOA_1TARRAYLEN = + sizeof(RTL8723BE_RADIOA_1TARRAY) / sizeof(u32); + u32 RTL8723BEMAC_1T_ARRAY[] = { 0x02F, 0x00000030, 0x035, 0x00000000, + 0x039, 0x00000008, + 0x064, 0x00000000, 0x067, 0x00000020, + 0x421, 0x0000000F, 0x428, 0x0000000A, 0x429, 0x00000010, 0x430, 0x00000000, @@ -439,9 +478,13 @@ u32 RTL8723BEMAC_1T_ARRAY[] = { 0x709, 0x00000043, 0x70A, 0x00000065, 0x70B, 0x00000087, + 0x765, 0x00000018, + 0x76E, 0x00000004, }; +u32 RTL8723BEMAC_1T_ARRAYLEN = sizeof(RTL8723BEMAC_1T_ARRAY) / sizeof(u32); + u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0xFD000001, 0xC78, 0xFC010001, @@ -466,21 +509,21 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0xE9140001, 0xC78, 0xE8150001, 0xC78, 0xE7160001, - 0xC78, 0xAA170001, - 0xC78, 0xA9180001, - 0xC78, 0xA8190001, - 0xC78, 0xA71A0001, - 0xC78, 0xA61B0001, - 0xC78, 0xA51C0001, - 0xC78, 0xA41D0001, - 0xC78, 0xA31E0001, - 0xC78, 0x671F0001, - 0xC78, 0x66200001, - 0xC78, 0x65210001, - 0xC78, 0x64220001, - 0xC78, 0x63230001, - 0xC78, 0x62240001, - 0xC78, 0x61250001, + 0xC78, 0xE6170001, + 0xC78, 0xE5180001, + 0xC78, 0xE4190001, + 0xC78, 0xE31A0001, + 0xC78, 0xA51B0001, + 0xC78, 0xA41C0001, + 0xC78, 0xA31D0001, + 0xC78, 0x671E0001, + 0xC78, 0x661F0001, + 0xC78, 0x65200001, + 0xC78, 0x64210001, + 0xC78, 0x63220001, + 0xC78, 0x4A230001, + 0xC78, 0x49240001, + 0xC78, 0x48250001, 0xC78, 0x47260001, 0xC78, 0x46270001, 0xC78, 0x45280001, @@ -491,22 +534,22 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0x282D0001, 0xC78, 0x272E0001, 0xC78, 0x262F0001, - 0xC78, 0x25300001, - 0xC78, 0x24310001, - 0xC78, 0x09320001, - 0xC78, 0x08330001, - 0xC78, 0x07340001, - 0xC78, 0x06350001, - 0xC78, 0x05360001, - 0xC78, 0x04370001, - 0xC78, 0x03380001, - 0xC78, 0x02390001, + 0xC78, 0x0A300001, + 0xC78, 0x09310001, + 0xC78, 0x08320001, + 0xC78, 0x07330001, + 0xC78, 0x06340001, + 0xC78, 0x05350001, + 0xC78, 0x04360001, + 0xC78, 0x03370001, + 0xC78, 0x02380001, + 0xC78, 0x01390001, 0xC78, 0x013A0001, - 0xC78, 0x003B0001, - 0xC78, 0x003C0001, - 0xC78, 0x003D0001, - 0xC78, 0x003E0001, - 0xC78, 0x003F0001, + 0xC78, 0x013B0001, + 0xC78, 0x013C0001, + 0xC78, 0x013D0001, + 0xC78, 0x013E0001, + 0xC78, 0x013F0001, 0xC78, 0xFC400001, 0xC78, 0xFB410001, 0xC78, 0xFA420001, @@ -531,47 +574,50 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0xE7550001, 0xC78, 0xE6560001, 0xC78, 0xE5570001, - 0xC78, 0xAA580001, - 0xC78, 0xA9590001, - 0xC78, 0xA85A0001, - 0xC78, 0xA75B0001, - 0xC78, 0xA65C0001, - 0xC78, 0xA55D0001, - 0xC78, 0xA45E0001, - 0xC78, 0x675F0001, - 0xC78, 0x66600001, - 0xC78, 0x65610001, - 0xC78, 0x64620001, - 0xC78, 0x63630001, - 0xC78, 0x62640001, - 0xC78, 0x61650001, + 0xC78, 0xE4580001, + 0xC78, 0xE3590001, + 0xC78, 0xA65A0001, + 0xC78, 0xA55B0001, + 0xC78, 0xA45C0001, + 0xC78, 0xA35D0001, + 0xC78, 0x675E0001, + 0xC78, 0x665F0001, + 0xC78, 0x65600001, + 0xC78, 0x64610001, + 0xC78, 0x63620001, + 0xC78, 0x62630001, + 0xC78, 0x61640001, + 0xC78, 0x48650001, 0xC78, 0x47660001, 0xC78, 0x46670001, 0xC78, 0x45680001, 0xC78, 0x44690001, 0xC78, 0x436A0001, 0xC78, 0x426B0001, - 0xC78, 0x296C0001, - 0xC78, 0x286D0001, - 0xC78, 0x276E0001, - 0xC78, 0x266F0001, - 0xC78, 0x25700001, - 0xC78, 0x24710001, - 0xC78, 0x09720001, - 0xC78, 0x08730001, - 0xC78, 0x07740001, - 0xC78, 0x06750001, - 0xC78, 0x05760001, - 0xC78, 0x04770001, - 0xC78, 0x03780001, - 0xC78, 0x02790001, + 0xC78, 0x286C0001, + 0xC78, 0x276D0001, + 0xC78, 0x266E0001, + 0xC78, 0x256F0001, + 0xC78, 0x24700001, + 0xC78, 0x09710001, + 0xC78, 0x08720001, + 0xC78, 0x07730001, + 0xC78, 0x06740001, + 0xC78, 0x05750001, + 0xC78, 0x04760001, + 0xC78, 0x03770001, + 0xC78, 0x02780001, + 0xC78, 0x01790001, 0xC78, 0x017A0001, - 0xC78, 0x007B0001, - 0xC78, 0x007C0001, - 0xC78, 0x007D0001, - 0xC78, 0x007E0001, - 0xC78, 0x007F0001, + 0xC78, 0x017B0001, + 0xC78, 0x017C0001, + 0xC78, 0x017D0001, + 0xC78, 0x017E0001, + 0xC78, 0x017F0001, 0xC50, 0x69553422, 0xC50, 0x69553420, + 0x824, 0x00390204, }; + +u32 RTL8723BEAGCTAB_1TARRAYLEN = sizeof(RTL8723BEAGCTAB_1TARRAY) / sizeof(u32); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h index dc17001632f7..1deaffe22251 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h @@ -29,15 +29,15 @@ #define __RTL8723BE_TABLE__H_ #include <linux/types.h> -#define RTL8723BEPHY_REG_1TARRAYLEN 388 +extern u32 RTL8723BEPHY_REG_1TARRAYLEN; extern u32 RTL8723BEPHY_REG_1TARRAY[]; -#define RTL8723BEPHY_REG_ARRAY_PGLEN 36 +extern u32 RTL8723BEPHY_REG_ARRAY_PGLEN; extern u32 RTL8723BEPHY_REG_ARRAY_PG[]; -#define RTL8723BE_RADIOA_1TARRAYLEN 206 +extern u32 RTL8723BE_RADIOA_1TARRAYLEN; extern u32 RTL8723BE_RADIOA_1TARRAY[]; -#define RTL8723BEMAC_1T_ARRAYLEN 196 +extern u32 RTL8723BEMAC_1T_ARRAYLEN; extern u32 RTL8723BEMAC_1T_ARRAY[]; -#define RTL8723BEAGCTAB_1TARRAYLEN 260 +extern u32 RTL8723BEAGCTAB_1TARRAYLEN; extern u32 RTL8723BEAGCTAB_1TARRAY[]; #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index 03259aa150fd..b84b4fa7b71c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -1923,6 +1923,7 @@ void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { case C2H_8812_DBG: @@ -1938,9 +1939,15 @@ void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_8812_BT_INFO!!\n"); if (rtlpriv->cfg->ops->get_btc_status()) - rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, - tmp_buf, - c2h_cmd_len); + btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); + break; + case C2H_8812_BT_MP: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_8812_BT_MP!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; default: break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 2bc6bace069c..4f73012978e9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -779,7 +779,7 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) _rtl8821ae_resume_tx_beacon(hw); break; } case HW_VAR_NAV_UPPER: { - u32 us_nav_upper = ((u32)*val); + u32 us_nav_upper = *(u32 *)val; if (us_nav_upper > HAL_92C_NAV_UPPER_UNIT * 0xFF) { RT_TRACE(rtlpriv, COMP_INIT , DBG_WARNING, @@ -2966,6 +2966,44 @@ static void _rtl8812ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, } } +static void _rtl8812ae_read_amplifier_type(struct ieee80211_hw *hw, u8 *hwinfo, + bool autoload_fail) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + u8 ext_type_pa_2g_a = (hwinfo[0xBD] & BIT(2)) >> 2; /* 0xBD[2] */ + u8 ext_type_pa_2g_b = (hwinfo[0xBD] & BIT(6)) >> 6; /* 0xBD[6] */ + u8 ext_type_pa_5g_a = (hwinfo[0xBF] & BIT(2)) >> 2; /* 0xBF[2] */ + u8 ext_type_pa_5g_b = (hwinfo[0xBF] & BIT(6)) >> 6; /* 0xBF[6] */ + /* 0xBD[1:0] */ + u8 ext_type_lna_2g_a = (hwinfo[0xBD] & (BIT(1) | BIT(0))) >> 0; + /* 0xBD[5:4] */ + u8 ext_type_lna_2g_b = (hwinfo[0xBD] & (BIT(5) | BIT(4))) >> 4; + /* 0xBF[1:0] */ + u8 ext_type_lna_5g_a = (hwinfo[0xBF] & (BIT(1) | BIT(0))) >> 0; + /* 0xBF[5:4] */ + u8 ext_type_lna_5g_b = (hwinfo[0xBF] & (BIT(5) | BIT(4))) >> 4; + + _rtl8812ae_read_pa_type(hw, hwinfo, autoload_fail); + + /* [2.4G] Path A and B are both extPA */ + if ((rtlhal->pa_type_2g & (BIT(5) | BIT(4))) == (BIT(5) | BIT(4))) + rtlhal->type_gpa = ext_type_pa_2g_b << 2 | ext_type_pa_2g_a; + + /* [5G] Path A and B are both extPA */ + if ((rtlhal->pa_type_5g & (BIT(1) | BIT(0))) == (BIT(1) | BIT(0))) + rtlhal->type_apa = ext_type_pa_5g_b << 2 | ext_type_pa_5g_a; + + /* [2.4G] Path A and B are both extLNA */ + if ((rtlhal->lna_type_2g & (BIT(7) | BIT(3))) == (BIT(7) | BIT(3))) + rtlhal->type_glna = ext_type_lna_2g_b << 2 | ext_type_lna_2g_a; + + /* [5G] Path A and B are both extLNA */ + if ((rtlhal->lna_type_5g & (BIT(7) | BIT(3))) == (BIT(7) | BIT(3))) + rtlhal->type_alna = ext_type_lna_5g_b << 2 | ext_type_lna_5g_a; +} + static void _rtl8821ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, bool autoload_fail) { @@ -3114,7 +3152,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_ hwinfo); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - _rtl8812ae_read_pa_type(hw, hwinfo, rtlefuse->autoload_failflag); + _rtl8812ae_read_amplifier_type(hw, hwinfo, + rtlefuse->autoload_failflag); _rtl8812ae_read_bt_coexist_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index d71d2776ca03..0894ef48ab87 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -196,6 +196,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.wowlan_firmware) { pr_err("Can't alloc buffer for wowlan fw.\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } @@ -214,16 +216,10 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - /* Failed to get firmware. Check if old version available */ - fw_name = "rtlwifi/rtl8821aefw.bin"; - pr_info("Using firmware %s\n", fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, fw_name, - rtlpriv->io.dev, GFP_KERNEL, hw, - rtl_fw_cb); - if (err) { - pr_err("Failed to request normal firmware!\n"); - return 1; - } + pr_err("Failed to request normal firmware!\n"); + vfree(rtlpriv->rtlhal.wowlan_firmware); + vfree(rtlpriv->rtlhal.pfirmware); + return 1; } /*load wowlan firmware*/ pr_info("Using firmware %s\n", wowlan_fw_name); @@ -233,6 +229,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtl_wowlan_fw_cb); if (err) { pr_err("Failed to request wowlan firmware!\n"); + vfree(rtlpriv->rtlhal.wowlan_firmware); + vfree(rtlpriv->rtlhal.pfirmware); return 1; } return 0; @@ -325,6 +323,7 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8821ae_pci", + .alt_fw_name = "rtlwifi/rtl8821aefw.bin", .ops = &rtl8821ae_hal_ops, .mod_params = &rtl8821ae_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, @@ -424,7 +423,7 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = { .maps[RTL_RC_VHT_RATE_2SS_MCS9] = DESC_RATEVHT2SS_MCS9, }; -static struct pci_device_id rtl8821ae_pci_ids[] = { +static const struct pci_device_id rtl8821ae_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)}, {}, diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 70723e67b7d7..1ab1024330fb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -314,35 +314,29 @@ enum hardware_type { HARDWARE_TYPE_RTL8192EE, HARDWARE_TYPE_RTL8821AE, HARDWARE_TYPE_RTL8812AE, + HARDWARE_TYPE_RTL8822BE, /* keep it last */ HARDWARE_TYPE_NUM }; -#define IS_HARDWARE_TYPE_8192SU(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SU) -#define IS_HARDWARE_TYPE_8192SE(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) -#define IS_HARDWARE_TYPE_8192CE(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) -#define IS_HARDWARE_TYPE_8192CU(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) -#define IS_HARDWARE_TYPE_8192DE(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) -#define IS_HARDWARE_TYPE_8192DU(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DU) -#define IS_HARDWARE_TYPE_8723E(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8723E) -#define IS_HARDWARE_TYPE_8723U(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U) -#define IS_HARDWARE_TYPE_8192S(rtlhal) \ -(IS_HARDWARE_TYPE_8192SE(rtlhal) || IS_HARDWARE_TYPE_8192SU(rtlhal)) -#define IS_HARDWARE_TYPE_8192C(rtlhal) \ -(IS_HARDWARE_TYPE_8192CE(rtlhal) || IS_HARDWARE_TYPE_8192CU(rtlhal)) -#define IS_HARDWARE_TYPE_8192D(rtlhal) \ -(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal)) -#define IS_HARDWARE_TYPE_8723(rtlhal) \ -(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal)) +#define RTL_HW_TYPE(rtlpriv) (rtl_hal((struct rtl_priv *)rtlpriv)->hw_type) +#define IS_NEW_GENERATION_IC(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) >= HARDWARE_TYPE_RTL8192EE) +#define IS_HARDWARE_TYPE_8192CE(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8192CE) +#define IS_HARDWARE_TYPE_8812(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8812AE) +#define IS_HARDWARE_TYPE_8821(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8821AE) +#define IS_HARDWARE_TYPE_8723A(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8723AE) +#define IS_HARDWARE_TYPE_8723B(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8723BE) +#define IS_HARDWARE_TYPE_8192E(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8192EE) +#define IS_HARDWARE_TYPE_8822B(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8822BE) #define RX_HAL_IS_CCK_RATE(rxmcs) \ ((rxmcs) == DESC_RATE1M || \ @@ -592,7 +586,7 @@ enum rtl_hal_state { _HAL_STATE_START = 1, }; -enum rtl_desc92_rate { +enum rtl_desc_rate { DESC_RATE1M = 0x00, DESC_RATE2M = 0x01, DESC_RATE5_5M = 0x02, @@ -2477,6 +2471,8 @@ struct rtl_global_var { spinlock_t glb_list_lock; }; +#define IN_4WAY_TIMEOUT_TIME (30 * MSEC_PER_SEC) /* 30 seconds */ + struct rtl_btc_info { u8 bt_type; u8 btcoexist; @@ -2485,6 +2481,7 @@ struct rtl_btc_info { u8 ap_num; bool in_4way; + unsigned long in_4way_ts; }; struct bt_coexist_info { @@ -2558,6 +2555,8 @@ struct rtl_btc_ops { void (*btc_halt_notify) (void); void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length); + void (*btc_btmpinfo_notify)(struct rtl_priv *rtlpriv, + u8 *tmp_buf, u8 length); bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv); bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv); bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv); diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 68f04a76769e..88a1a56a20ab 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -268,11 +268,11 @@ void rsi_core_qos_processor(struct rsi_common *common) break; } - mutex_lock(&common->tx_rxlock); + mutex_lock(&common->tx_lock); status = adapter->check_hw_queue_status(adapter, q_num); if ((status <= 0)) { - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); break; } @@ -287,7 +287,7 @@ void rsi_core_qos_processor(struct rsi_common *common) skb = rsi_core_dequeue_pkt(common, q_num); if (skb == NULL) { rsi_dbg(ERR_ZONE, "skb null\n"); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); break; } @@ -297,14 +297,14 @@ void rsi_core_qos_processor(struct rsi_common *common) status = rsi_send_data_pkt(common, skb); if (status) { - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); break; } common->tx_stats.total_tx_pkt_send[q_num]++; tstamp_2 = jiffies; - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); if (time_after(tstamp_2, tstamp_1 + (300 * HZ) / 1000)) schedule(); diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c index 4c0a493bd44e..e98eb55c26cc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c +++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c @@ -130,6 +130,7 @@ static int rsi_stats_read(struct seq_file *seq, void *data) "FSM_COMMON_DEV_PARAMS_SENT", "FSM_BOOT_PARAMS_SENT", "FSM_EEPROM_READ_MAC_ADDR", + "FSM_EEPROM_READ_RF_TYPE", "FSM_RESET_MAC_SENT", "FSM_RADIO_CAPS_SENT", "FSM_BB_RF_PROG_SENT", @@ -138,6 +139,8 @@ static int rsi_stats_read(struct seq_file *seq, void *data) seq_puts(seq, "==> RSI STA DRIVER STATUS <==\n"); seq_puts(seq, "DRIVER_FSM_STATE: "); + BUILD_BUG_ON(ARRAY_SIZE(fsm_state) != NUM_FSM_STATES); + if (common->fsm_state <= FSM_MAC_INIT_DONE) seq_printf(seq, "%s", fsm_state[common->fsm_state]); diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index c2303599c12e..b0a7a1511aee 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -25,54 +25,135 @@ static struct ta_metadata metadata_flash_content[] = { {"rsi/rs9113_wlan_qspi.rps", 0x00010000}, }; -/** - * rsi_send_data_pkt() - This function sends the recieved data packet from - * driver to device. - * @common: Pointer to the driver private structure. - * @skb: Pointer to the socket buffer structure. - * - * Return: status: 0 on success, -1 on failure. - */ -int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) +/*This function prepares descriptor for given management packet*/ + +static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; - struct ieee80211_hdr *tmp_hdr; + struct ieee80211_hdr *wh = NULL; + struct ieee80211_tx_info *info; + struct ieee80211_conf *conf = &adapter->hw->conf; + struct ieee80211_vif *vif = NULL; + struct rsi_mgmt_desc *mgmt_desc; + struct skb_info *tx_params; + struct ieee80211_bss_conf *bss = NULL; + struct xtended_desc *xtend_desc = NULL; + u8 header_size; + u32 dword_align_bytes = 0; + + info = IEEE80211_SKB_CB(skb); + tx_params = (struct skb_info *)info->driver_data; + + /* Update header size */ + header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc); + if (header_size > skb_headroom(skb)) { + rsi_dbg(ERR_ZONE, + "%s: Failed to add extended descriptor\n", + __func__); + return -ENOSPC; + } + skb_push(skb, header_size); + dword_align_bytes = ((unsigned long)skb->data & 0x3f); + if (dword_align_bytes > skb_headroom(skb)) { + rsi_dbg(ERR_ZONE, + "%s: Failed to add dword align\n", __func__); + return -ENOSPC; + } + skb_push(skb, dword_align_bytes); + header_size += dword_align_bytes; + + tx_params->internal_hdr_size = header_size; + memset(&skb->data[0], 0, header_size); + bss = &info->control.vif->bss_conf; + wh = (struct ieee80211_hdr *)&skb->data[header_size]; + vif = adapter->vifs[0]; + + mgmt_desc = (struct rsi_mgmt_desc *)skb->data; + xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; + + if (skb->len > MAX_MGMT_PKT_SIZE) { + rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__); + return -EINVAL; + } + rsi_set_len_qno(&mgmt_desc->len_qno, (skb->len - FRAME_DESC_SZ), + RSI_WIFI_MGMT_Q); + mgmt_desc->frame_type = TX_DOT11_MGMT; + mgmt_desc->header_len = MIN_802_11_HDR_LEN; + mgmt_desc->xtend_desc_size = header_size - FRAME_DESC_SZ; + mgmt_desc->frame_info |= cpu_to_le16(RATE_INFO_ENABLE); + if (is_broadcast_ether_addr(wh->addr1)) + mgmt_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT); + + mgmt_desc->seq_ctrl = + cpu_to_le16(IEEE80211_SEQ_TO_SN(le16_to_cpu(wh->seq_ctrl))); + if (common->band == NL80211_BAND_2GHZ) + mgmt_desc->rate_info = RSI_RATE_1; + else + mgmt_desc->rate_info = RSI_RATE_6; + + if (conf_is_ht40(conf)) + mgmt_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE); + + if (ieee80211_is_probe_req(wh->frame_control)) { + if (!bss->assoc) { + rsi_dbg(INFO_ZONE, + "%s: blocking mgmt queue\n", __func__); + mgmt_desc->misc_flags = RSI_DESC_REQUIRE_CFM_TO_HOST; + xtend_desc->confirm_frame_type = PROBEREQ_CONFIRM; + common->mgmt_q_block = true; + rsi_dbg(INFO_ZONE, "Mgmt queue blocked\n"); + } + } + + return 0; +} + +/* This function prepares descriptor for given data packet */ +static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) +{ + struct ieee80211_hdr *wh = NULL; struct ieee80211_tx_info *info; struct skb_info *tx_params; struct ieee80211_bss_conf *bss; - int status; + struct rsi_data_desc *data_desc; + struct xtended_desc *xtend_desc; u8 ieee80211_size = MIN_802_11_HDR_LEN; - u8 extnd_size; - __le16 *frame_desc; + u8 header_size; + u8 vap_id = 0; + u8 dword_align_bytes; u16 seq_num; info = IEEE80211_SKB_CB(skb); bss = &info->control.vif->bss_conf; tx_params = (struct skb_info *)info->driver_data; - if (!bss->assoc) { - status = -EINVAL; - goto err; + header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc); + if (header_size > skb_headroom(skb)) { + rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); + return -ENOSPC; } + skb_push(skb, header_size); + dword_align_bytes = ((unsigned long)skb->data & 0x3f); + if (header_size > skb_headroom(skb)) { + rsi_dbg(ERR_ZONE, "%s: Not enough headroom\n", __func__); + return -ENOSPC; + } + skb_push(skb, dword_align_bytes); + header_size += dword_align_bytes; - tmp_hdr = (struct ieee80211_hdr *)&skb->data[0]; - seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4); - - extnd_size = ((uintptr_t)skb->data & 0x3); + tx_params->internal_hdr_size = header_size; + data_desc = (struct rsi_data_desc *)skb->data; + memset(data_desc, 0, header_size); - if ((FRAME_DESC_SZ + extnd_size) > skb_headroom(skb)) { - rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); - status = -ENOSPC; - goto err; - } + xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; + wh = (struct ieee80211_hdr *)&skb->data[header_size]; + seq_num = (le16_to_cpu(wh->seq_ctrl) >> 4); - skb_push(skb, (FRAME_DESC_SZ + extnd_size)); - frame_desc = (__le16 *)&skb->data[0]; - memset((u8 *)frame_desc, 0, FRAME_DESC_SZ); + data_desc->xtend_desc_size = header_size - FRAME_DESC_SZ; - if (ieee80211_is_data_qos(tmp_hdr->frame_control)) { + if (ieee80211_is_data_qos(wh->frame_control)) { ieee80211_size += 2; - frame_desc[6] |= cpu_to_le16(BIT(12)); + data_desc->mac_flags |= cpu_to_le16(RSI_QOS_ENABLE); } if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) && @@ -81,43 +162,85 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) ieee80211_size += 4; else ieee80211_size += 8; - frame_desc[6] |= cpu_to_le16(BIT(15)); + data_desc->mac_flags |= cpu_to_le16(RSI_ENCRYPT_PKT); } + rsi_set_len_qno(&data_desc->len_qno, (skb->len - FRAME_DESC_SZ), + RSI_WIFI_DATA_Q); + data_desc->header_len = ieee80211_size; - frame_desc[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) | - (RSI_WIFI_DATA_Q << 12)); - frame_desc[2] = cpu_to_le16((extnd_size) | (ieee80211_size) << 8); - - if (common->min_rate != 0xffff) { + if (common->min_rate != RSI_RATE_AUTO) { /* Send fixed rate */ - frame_desc[3] = cpu_to_le16(RATE_INFO_ENABLE); - frame_desc[4] = cpu_to_le16(common->min_rate); + data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); + data_desc->rate_info = cpu_to_le16(common->min_rate); if (conf_is_ht40(&common->priv->hw->conf)) - frame_desc[5] = cpu_to_le16(FULL40M_ENABLE); + data_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE); if (common->vif_info[0].sgi) { if (common->min_rate & 0x100) /* Only MCS rates */ - frame_desc[4] |= + data_desc->rate_info |= cpu_to_le16(ENABLE_SHORTGI_RATE); } } - frame_desc[6] |= cpu_to_le16(seq_num & 0xfff); - frame_desc[7] = cpu_to_le16(((tx_params->tid & 0xf) << 4) | - (skb->priority & 0xf) | - (tx_params->sta_id << 8)); + if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n"); + + data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); + if (common->band == NL80211_BAND_5GHZ) + data_desc->rate_info = cpu_to_le16(RSI_RATE_6); + else + data_desc->rate_info = cpu_to_le16(RSI_RATE_1); + data_desc->mac_flags |= cpu_to_le16(RSI_REKEY_PURPOSE); + data_desc->misc_flags |= RSI_FETCH_RETRY_CNT_FRM_HST; +#define EAPOL_RETRY_CNT 15 + xtend_desc->retry_cnt = EAPOL_RETRY_CNT; + } + + data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff); + data_desc->qid_tid = ((skb->priority & 0xf) | + ((tx_params->tid & 0xf) << 4)); + data_desc->sta_id = tx_params->sta_id; + + if ((is_broadcast_ether_addr(wh->addr1)) || + (is_multicast_ether_addr(wh->addr1))) { + data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); + data_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT); + data_desc->sta_id = vap_id; + } + + return 0; +} + +/* This function sends received data packet from driver to device */ +int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) +{ + struct rsi_hw *adapter = common->priv; + struct ieee80211_tx_info *info; + struct ieee80211_bss_conf *bss; + int status = -EIO; + + info = IEEE80211_SKB_CB(skb); + bss = &info->control.vif->bss_conf; + + if (!bss->assoc) { + status = -EINVAL; + goto err; + } + + status = rsi_prepare_data_desc(common, skb); + if (status) + goto err; status = adapter->host_intf_ops->write_pkt(common->priv, skb->data, skb->len); if (status) - rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", - __func__); + rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", __func__); err: ++common->tx_stats.total_tx_pkt_freed[skb->priority]; - rsi_indicate_tx_status(common->priv, skb, status); + rsi_indicate_tx_status(adapter, skb, status); return status; } @@ -133,22 +256,17 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; - struct ieee80211_hdr *wh; struct ieee80211_tx_info *info; - struct ieee80211_bss_conf *bss; - struct ieee80211_hw *hw = adapter->hw; - struct ieee80211_conf *conf = &hw->conf; struct skb_info *tx_params; int status = -E2BIG; - __le16 *msg; u8 extnd_size; - u8 vap_id = 0; info = IEEE80211_SKB_CB(skb); tx_params = (struct skb_info *)info->driver_data; extnd_size = ((uintptr_t)skb->data & 0x3); if (tx_params->flags & INTERNAL_MGMT_PKT) { + skb->data[1] |= BIT(7); /* Immediate Wakeup bit*/ if ((extnd_size) > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); dev_kfree_skb(skb); @@ -167,52 +285,12 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, return status; } - bss = &info->control.vif->bss_conf; - wh = (struct ieee80211_hdr *)&skb->data[0]; - if (FRAME_DESC_SZ > skb_headroom(skb)) goto err; - skb_push(skb, FRAME_DESC_SZ); - memset(skb->data, 0, FRAME_DESC_SZ); - msg = (__le16 *)skb->data; - - if (skb->len > MAX_MGMT_PKT_SIZE) { - rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__); - goto err; - } - - msg[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - msg[1] = cpu_to_le16(TX_DOT11_MGMT); - msg[2] = cpu_to_le16(MIN_802_11_HDR_LEN << 8); - msg[3] = cpu_to_le16(RATE_INFO_ENABLE); - msg[6] = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4); - - if (wh->addr1[0] & BIT(0)) - msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT); - - if (common->band == NL80211_BAND_2GHZ) - msg[4] = cpu_to_le16(RSI_11B_MODE); - else - msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE); - - if (conf_is_ht40(conf)) { - msg[4] = cpu_to_le16(0xB | RSI_11G_MODE); - msg[5] = cpu_to_le16(0x6); - } - - /* Indicate to firmware to give cfm */ - if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) { - msg[1] |= cpu_to_le16(BIT(10)); - msg[7] = cpu_to_le16(PROBEREQ_CONFIRM); - common->mgmt_q_block = true; - } - - msg[7] |= cpu_to_le16(vap_id << 8); - - status = adapter->host_intf_ops->write_pkt(common->priv, (u8 *)msg, - skb->len); + rsi_prepare_mgmt_desc(common, skb); + status = adapter->host_intf_ops->write_pkt(common->priv, + (u8 *)skb->data, skb->len); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__); diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 021e5ac5f107..c91d6efa7c84 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -229,12 +229,20 @@ void rsi_indicate_tx_status(struct rsi_hw *adapter, int status) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct skb_info *tx_params; - memset(info->driver_data, 0, IEEE80211_TX_INFO_DRIVER_DATA_SIZE); + if (!adapter->hw) { + rsi_dbg(ERR_ZONE, "##### No MAC #####\n"); + return; + } if (!status) info->flags |= IEEE80211_TX_STAT_ACK; + tx_params = (struct skb_info *)info->driver_data; + skb_pull(skb, tx_params->internal_hdr_size); + memset(info->driver_data, 0, IEEE80211_TX_INFO_DRIVER_DATA_SIZE); + ieee80211_tx_status_irqsafe(adapter->hw, skb); } @@ -293,6 +301,10 @@ static void rsi_mac80211_stop(struct ieee80211_hw *hw) mutex_lock(&common->mutex); common->iface_down = true; + + /* Block all rx frames */ + rsi_send_rx_filter_frame(common, 0xffff); + mutex_unlock(&common->mutex); } @@ -1151,6 +1163,21 @@ static int rsi_mac80211_get_antenna(struct ieee80211_hw *hw, return 0; } +static int rsi_map_region_code(enum nl80211_dfs_regions region_code) +{ + switch (region_code) { + case NL80211_DFS_FCC: + return RSI_REGION_FCC; + case NL80211_DFS_ETSI: + return RSI_REGION_ETSI; + case NL80211_DFS_JP: + return RSI_REGION_TELEC; + case NL80211_DFS_UNSET: + return RSI_REGION_WORLD; + } + return RSI_REGION_WORLD; +} + static void rsi_reg_notify(struct wiphy *wiphy, struct regulatory_request *request) { @@ -1158,23 +1185,33 @@ static void rsi_reg_notify(struct wiphy *wiphy, struct ieee80211_channel *ch; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct rsi_hw * adapter = hw->priv; + struct rsi_common *common = adapter->priv; int i; - - sband = wiphy->bands[NL80211_BAND_5GHZ]; - for (i = 0; i < sband->n_channels; i++) { - ch = &sband->channels[i]; - if (ch->flags & IEEE80211_CHAN_DISABLED) - continue; + mutex_lock(&common->mutex); + + rsi_dbg(INFO_ZONE, "country = %s dfs_region = %d\n", + request->alpha2, request->dfs_region); + + if (common->num_supp_bands > 1) { + sband = wiphy->bands[NL80211_BAND_5GHZ]; + + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + if (ch->flags & IEEE80211_CHAN_DISABLED) + continue; - if (ch->flags & IEEE80211_CHAN_RADAR) - ch->flags |= IEEE80211_CHAN_NO_IR; + if (ch->flags & IEEE80211_CHAN_RADAR) + ch->flags |= IEEE80211_CHAN_NO_IR; + } } + adapter->dfs_region = rsi_map_region_code(request->dfs_region); + rsi_dbg(INFO_ZONE, "RSI region code = %d\n", adapter->dfs_region); - rsi_dbg(INFO_ZONE, - "country = %s dfs_region = %d\n", - request->alpha2, request->dfs_region); - adapter->dfs_region = request->dfs_region; + adapter->country[0] = request->alpha2[0]; + adapter->country[1] = request->alpha2[1]; + + mutex_unlock(&common->mutex); } static struct ieee80211_ops mac80211_ops = { diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index f1cde0ca81f9..bb0febb17be0 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -220,7 +220,8 @@ struct rsi_hw *rsi_91x_init(void) rsi_init_event(&common->tx_thread.event); mutex_init(&common->mutex); - mutex_init(&common->tx_rxlock); + mutex_init(&common->tx_lock); + mutex_init(&common->rx_lock); if (rsi_create_kthread(common, &common->tx_thread, diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index d4d365b5d2d6..1fba7bba3a10 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -230,6 +230,8 @@ static void rsi_set_default_parameters(struct rsi_common *common) common->rf_power_val = 0; /* Default 1.9V */ common->wlan_rf_power_mode = 0; common->obm_ant_sel_val = 2; + common->beacon_interval = RSI_BEACON_INTERVAL; + common->dtim_cnt = RSI_DTIM_COUNT; } /** @@ -266,11 +268,14 @@ static int rsi_send_internal_mgmt_frame(struct rsi_common *common, struct sk_buff *skb) { struct skb_info *tx_params; + struct rsi_cmd_desc *desc; if (skb == NULL) { rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__); return -ENOMEM; } + desc->desc_dword0.len_qno |= cpu_to_le16(DESC_IMMEDIATE_WAKEUP); + skb->priority = MGMT_SOFT_Q; tx_params = (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data; tx_params->flags |= INTERNAL_MGMT_PKT; skb_queue_tail(&common->tx_queue[MGMT_SOFT_Q], skb); @@ -298,10 +303,11 @@ static int rsi_load_radio_caps(struct rsi_common *common) 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0}; struct sk_buff *skb; + u16 frame_len = sizeof(struct rsi_radio_caps); rsi_dbg(INFO_ZONE, "%s: Sending rate symbol req frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_radio_caps)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", @@ -309,37 +315,40 @@ static int rsi_load_radio_caps(struct rsi_common *common) return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_radio_caps)); + memset(skb->data, 0, frame_len); radio_caps = (struct rsi_radio_caps *)skb->data; - radio_caps->desc_word[1] = cpu_to_le16(RADIO_CAPABILITIES); - radio_caps->desc_word[4] = cpu_to_le16(RSI_RF_TYPE << 8); + radio_caps->desc_dword0.frame_type = RADIO_CAPABILITIES; + radio_caps->channel_num = common->channel; + radio_caps->rf_model = RSI_RF_TYPE; if (common->channel_width == BW_40MHZ) { - radio_caps->desc_word[7] |= cpu_to_le16(RSI_LMAC_CLOCK_80MHZ); - radio_caps->desc_word[7] |= cpu_to_le16(RSI_ENABLE_40MHZ); + radio_caps->radio_cfg_info = RSI_LMAC_CLOCK_80MHZ; + radio_caps->radio_cfg_info |= RSI_ENABLE_40MHZ; if (common->fsm_state == FSM_MAC_INIT_DONE) { struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; + if (conf_is_ht40_plus(conf)) { - radio_caps->desc_word[5] = - cpu_to_le16(LOWER_20_ENABLE); - radio_caps->desc_word[5] |= - cpu_to_le16(LOWER_20_ENABLE >> 12); + radio_caps->radio_cfg_info = + RSI_CMDDESC_LOWER_20_ENABLE; + radio_caps->radio_info = + RSI_CMDDESC_LOWER_20_ENABLE; } else if (conf_is_ht40_minus(conf)) { - radio_caps->desc_word[5] = - cpu_to_le16(UPPER_20_ENABLE); - radio_caps->desc_word[5] |= - cpu_to_le16(UPPER_20_ENABLE >> 12); + radio_caps->radio_cfg_info = + RSI_CMDDESC_UPPER_20_ENABLE; + radio_caps->radio_info = + RSI_CMDDESC_UPPER_20_ENABLE; } else { - radio_caps->desc_word[5] = - cpu_to_le16(BW_40MHZ << 12); - radio_caps->desc_word[5] |= - cpu_to_le16(FULL40M_ENABLE); + radio_caps->radio_cfg_info = + RSI_CMDDESC_40MHZ; + radio_caps->radio_info = + RSI_CMDDESC_FULL_40_ENABLE; } } } + radio_caps->radio_info |= radio_id; radio_caps->sifs_tx_11n = cpu_to_le16(SIFS_TX_11N_VALUE); radio_caps->sifs_tx_11b = cpu_to_le16(SIFS_TX_11B_VALUE); @@ -348,8 +357,6 @@ static int rsi_load_radio_caps(struct rsi_common *common) radio_caps->cck_ack_tout = cpu_to_le16(CCK_ACK_TOUT_VALUE); radio_caps->preamble_type = cpu_to_le16(LONG_PREAMBLE); - radio_caps->desc_word[7] |= cpu_to_le16(radio_id << 8); - for (ii = 0; ii < MAX_HW_QUEUES; ii++) { radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(3); radio_caps->qos_params[ii].cont_win_max_q = cpu_to_le16(0x3f); @@ -357,7 +364,7 @@ static int rsi_load_radio_caps(struct rsi_common *common) radio_caps->qos_params[ii].txop_q = 0; } - for (ii = 0; ii < MAX_HW_QUEUES - 4; ii++) { + for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) { radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(common->edca_params[ii].cw_min); radio_caps->qos_params[ii].cont_win_max_q = @@ -368,17 +375,19 @@ static int rsi_load_radio_caps(struct rsi_common *common) cpu_to_le16(common->edca_params[ii].txop); } + radio_caps->qos_params[BROADCAST_HW_Q].txop_q = cpu_to_le16(0xffff); + radio_caps->qos_params[MGMT_HW_Q].txop_q = 0; + radio_caps->qos_params[BEACON_HW_Q].txop_q = cpu_to_le16(0xffff); + memcpy(&common->rate_pwr[0], &gc[0], 40); for (ii = 0; ii < 20; ii++) radio_caps->gcpd_per_rate[inx++] = cpu_to_le16(common->rate_pwr[ii] & 0x00FF); - radio_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_radio_caps) - - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - + rsi_set_len_qno(&radio_caps->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); - skb_put(skb, (sizeof(struct rsi_radio_caps))); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -394,8 +403,7 @@ static int rsi_load_radio_caps(struct rsi_common *common) */ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, u8 *msg, - s32 msg_len, - u8 type) + s32 msg_len) { struct rsi_hw *adapter = common->priv; struct ieee80211_tx_info *info; @@ -403,37 +411,30 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, u8 pad_bytes = msg[4]; struct sk_buff *skb; - if (type == RX_DOT11_MGMT) { - if (!adapter->sc_nvifs) - return -ENOLINK; + if (!adapter->sc_nvifs) + return -ENOLINK; - msg_len -= pad_bytes; - if (msg_len <= 0) { - rsi_dbg(MGMT_RX_ZONE, - "%s: Invalid rx msg of len = %d\n", - __func__, msg_len); - return -EINVAL; - } + msg_len -= pad_bytes; + if (msg_len <= 0) { + rsi_dbg(MGMT_RX_ZONE, + "%s: Invalid rx msg of len = %d\n", + __func__, msg_len); + return -EINVAL; + } - skb = dev_alloc_skb(msg_len); - if (!skb) { - rsi_dbg(ERR_ZONE, "%s: Failed to allocate skb\n", - __func__); - return -ENOMEM; - } + skb = dev_alloc_skb(msg_len); + if (!skb) + return -ENOMEM; - skb_put_data(skb, - (u8 *)(msg + FRAME_DESC_SZ + pad_bytes), - msg_len); + skb_put_data(skb, + (u8 *)(msg + FRAME_DESC_SZ + pad_bytes), + msg_len); - info = IEEE80211_SKB_CB(skb); - rx_params = (struct skb_info *)info->driver_data; - rx_params->rssi = rsi_get_rssi(msg); - rx_params->channel = rsi_get_channel(msg); - rsi_indicate_pkt_to_os(common, skb); - } else { - rsi_dbg(MGMT_TX_ZONE, "%s: Internal Packet\n", __func__); - } + info = IEEE80211_SKB_CB(skb); + rx_params = (struct skb_info *)info->driver_data; + rx_params->rssi = rsi_get_rssi(msg); + rx_params->channel = rsi_get_channel(msg); + rsi_indicate_pkt_to_os(common, skb); return 0; } @@ -461,10 +462,11 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, struct rsi_peer_notify *peer_notify; u16 vap_id = 0; int status; + u16 frame_len = sizeof(struct rsi_peer_notify); rsi_dbg(MGMT_TX_ZONE, "%s: Sending sta notify frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_peer_notify)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", @@ -472,7 +474,7 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_peer_notify)); + memset(skb->data, 0, frame_len); peer_notify = (struct rsi_peer_notify *)skb->data; peer_notify->command = cpu_to_le16(opmode << 1); @@ -490,16 +492,16 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, peer_notify->command |= cpu_to_le16((aid & 0xfff) << 4); ether_addr_copy(peer_notify->mac_addr, bssid); - + peer_notify->mpdu_density = cpu_to_le16(RSI_MPDU_DENSITY); peer_notify->sta_flags = cpu_to_le32((qos_enable) ? 1 : 0); - peer_notify->desc_word[0] = - cpu_to_le16((sizeof(struct rsi_peer_notify) - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - peer_notify->desc_word[1] = cpu_to_le16(PEER_NOTIFY); - peer_notify->desc_word[7] |= cpu_to_le16(vap_id << 8); + rsi_set_len_qno(&peer_notify->desc.desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), + RSI_WIFI_MGMT_Q); + peer_notify->desc.desc_dword0.frame_type = PEER_NOTIFY; + peer_notify->desc.desc_dword3.sta_id = vap_id; - skb_put(skb, sizeof(struct rsi_peer_notify)); + skb_put(skb, frame_len); status = rsi_send_internal_mgmt_frame(common, skb); @@ -528,10 +530,11 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, u8 event) { struct sk_buff *skb = NULL; - struct rsi_mac_frame *mgmt_frame; + struct rsi_aggr_params *aggr_params; u8 peer_id = 0; + u16 frame_len = sizeof(struct rsi_aggr_params); - skb = dev_alloc_skb(FRAME_DESC_SZ); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", @@ -539,37 +542,29 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, return -ENOMEM; } - memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + memset(skb->data, 0, frame_len); + aggr_params = (struct rsi_aggr_params *)skb->data; rsi_dbg(MGMT_TX_ZONE, "%s: Sending AMPDU indication frame\n", __func__); - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(AMPDU_IND); + rsi_set_len_qno(&aggr_params->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + aggr_params->desc_dword0.frame_type = AMPDU_IND; + aggr_params->aggr_params = tid & RSI_AGGR_PARAMS_TID_MASK; + aggr_params->peer_id = peer_id; if (event == STA_TX_ADDBA_DONE) { - mgmt_frame->desc_word[4] = cpu_to_le16(ssn); - mgmt_frame->desc_word[5] = cpu_to_le16(buf_size); - mgmt_frame->desc_word[7] = - cpu_to_le16((tid | (START_AMPDU_AGGR << 4) | (peer_id << 8))); + aggr_params->seq_start = cpu_to_le16(ssn); + aggr_params->baw_size = cpu_to_le16(buf_size); + aggr_params->aggr_params |= RSI_AGGR_PARAMS_START; } else if (event == STA_RX_ADDBA_DONE) { - mgmt_frame->desc_word[4] = cpu_to_le16(ssn); - mgmt_frame->desc_word[7] = cpu_to_le16(tid | - (START_AMPDU_AGGR << 4) | - (RX_BA_INDICATION << 5) | - (peer_id << 8)); - } else if (event == STA_TX_DELBA) { - mgmt_frame->desc_word[7] = cpu_to_le16(tid | - (STOP_AMPDU_AGGR << 4) | - (peer_id << 8)); + aggr_params->seq_start = cpu_to_le16(ssn); + aggr_params->aggr_params |= (RSI_AGGR_PARAMS_START | + RSI_AGGR_PARAMS_RX_AGGR); } else if (event == STA_RX_DELBA) { - mgmt_frame->desc_word[7] = cpu_to_le16(tid | - (STOP_AMPDU_AGGR << 4) | - (RX_BA_INDICATION << 5) | - (peer_id << 8)); + aggr_params->aggr_params |= RSI_AGGR_PARAMS_RX_AGGR; } - skb_put(skb, FRAME_DESC_SZ); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -584,34 +579,36 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, static int rsi_program_bb_rf(struct rsi_common *common) { struct sk_buff *skb; - struct rsi_mac_frame *mgmt_frame; + struct rsi_bb_rf_prog *bb_rf_prog; + u16 frame_len = sizeof(struct rsi_bb_rf_prog); rsi_dbg(MGMT_TX_ZONE, "%s: Sending program BB/RF frame\n", __func__); - skb = dev_alloc_skb(FRAME_DESC_SZ); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } - memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + memset(skb->data, 0, frame_len); + bb_rf_prog = (struct rsi_bb_rf_prog *)skb->data; - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(BBP_PROG_IN_TA); - mgmt_frame->desc_word[4] = cpu_to_le16(common->endpoint); + rsi_set_len_qno(&bb_rf_prog->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + bb_rf_prog->desc_dword0.frame_type = BBP_PROG_IN_TA; + bb_rf_prog->endpoint = common->endpoint; + bb_rf_prog->rf_power_mode = common->wlan_rf_power_mode; if (common->rf_reset) { - mgmt_frame->desc_word[7] = cpu_to_le16(RF_RESET_ENABLE); + bb_rf_prog->flags = cpu_to_le16(RF_RESET_ENABLE); rsi_dbg(MGMT_TX_ZONE, "%s: ===> RF RESET REQUEST SENT <===\n", __func__); common->rf_reset = 0; } common->bb_rf_prog_count = 1; - mgmt_frame->desc_word[7] |= cpu_to_le16(PUT_BBP_RESET | - BBP_REG_WRITE | (RSI_RF_TYPE << 4)); - skb_put(skb, FRAME_DESC_SZ); + bb_rf_prog->flags |= cpu_to_le16(PUT_BBP_RESET | BBP_REG_WRITE | + (RSI_RF_TYPE << 4)); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -632,59 +629,61 @@ int rsi_set_vap_capabilities(struct rsi_common *common, struct rsi_hw *adapter = common->priv; struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; + u16 frame_len = sizeof(struct rsi_vap_caps); u16 vap_id = 0; rsi_dbg(MGMT_TX_ZONE, "%s: Sending VAP capabilities frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_vap_caps)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_vap_caps)); + memset(skb->data, 0, frame_len); vap_caps = (struct rsi_vap_caps *)skb->data; - vap_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_vap_caps) - - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - vap_caps->desc_word[1] = cpu_to_le16(VAP_CAPABILITIES); - vap_caps->desc_word[2] = cpu_to_le16(vap_status << 8); - vap_caps->desc_word[4] = cpu_to_le16(mode | - (common->channel_width << 8)); - vap_caps->desc_word[7] = cpu_to_le16((vap_id << 8) | - (common->mac_id << 4) | - common->radio_id); + rsi_set_len_qno(&vap_caps->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); + vap_caps->desc_dword0.frame_type = VAP_CAPABILITIES; + vap_caps->status = vap_status; + vap_caps->vif_type = mode; + vap_caps->channel_bw = common->channel_width; + vap_caps->vap_id = vap_id; + vap_caps->radioid_macid = ((common->mac_id & 0xf) << 4) | + (common->radio_id & 0xf); memcpy(vap_caps->mac_addr, common->mac_addr, IEEE80211_ADDR_LEN); vap_caps->keep_alive_period = cpu_to_le16(90); vap_caps->frag_threshold = cpu_to_le16(IEEE80211_MAX_FRAG_THRESHOLD); vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold); - vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6); if (common->band == NL80211_BAND_5GHZ) { - vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6); - if (conf_is_ht40(&common->priv->hw->conf)) { - vap_caps->default_ctrl_rate |= - cpu_to_le32(FULL40M_ENABLE << 16); - } + vap_caps->default_ctrl_rate = cpu_to_le16(RSI_RATE_6); + vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6); } else { - vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_1); + vap_caps->default_ctrl_rate = cpu_to_le16(RSI_RATE_1); + vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_1); + } + if (conf_is_ht40(conf)) { if (conf_is_ht40_minus(conf)) - vap_caps->default_ctrl_rate |= - cpu_to_le32(UPPER_20_ENABLE << 16); + vap_caps->ctrl_rate_flags = + cpu_to_le16(UPPER_20_ENABLE); else if (conf_is_ht40_plus(conf)) - vap_caps->default_ctrl_rate |= - cpu_to_le32(LOWER_20_ENABLE << 16); + vap_caps->ctrl_rate_flags = + cpu_to_le16(LOWER_20_ENABLE); + else + vap_caps->ctrl_rate_flags = + cpu_to_le16(FULL40M_ENABLE); } vap_caps->default_data_rate = 0; - vap_caps->beacon_interval = cpu_to_le16(200); - vap_caps->dtim_period = cpu_to_le16(4); + vap_caps->beacon_interval = cpu_to_le16(common->beacon_interval); + vap_caps->dtim_period = cpu_to_le16(common->dtim_cnt); - skb_put(skb, sizeof(*vap_caps)); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -710,53 +709,55 @@ int rsi_hal_load_key(struct rsi_common *common, struct sk_buff *skb = NULL; struct rsi_set_key *set_key; u16 key_descriptor = 0; + u16 frame_len = sizeof(struct rsi_set_key); rsi_dbg(MGMT_TX_ZONE, "%s: Sending load key frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_set_key)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_set_key)); + memset(skb->data, 0, frame_len); set_key = (struct rsi_set_key *)skb->data; + if (key_type == RSI_GROUP_KEY) + key_descriptor = RSI_KEY_TYPE_BROADCAST; if ((cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104)) { - key_len += 1; - key_descriptor |= BIT(2); + key_id = 0; + key_descriptor |= RSI_WEP_KEY; if (key_len >= 13) - key_descriptor |= BIT(3); + key_descriptor |= RSI_WEP_KEY_104; } else if (cipher != KEY_TYPE_CLEAR) { - key_descriptor |= BIT(4); - if (key_type == RSI_PAIRWISE_KEY) - key_id = 0; + key_descriptor |= RSI_CIPHER_WPA; if (cipher == WLAN_CIPHER_SUITE_TKIP) - key_descriptor |= BIT(5); + key_descriptor |= RSI_CIPHER_TKIP; } - key_descriptor |= (key_type | BIT(13) | (key_id << 14)); - - set_key->desc_word[0] = cpu_to_le16((sizeof(struct rsi_set_key) - - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - set_key->desc_word[1] = cpu_to_le16(SET_KEY_REQ); - set_key->desc_word[4] = cpu_to_le16(key_descriptor); - - if ((cipher == WLAN_CIPHER_SUITE_WEP40) || - (cipher == WLAN_CIPHER_SUITE_WEP104)) { - memcpy(&set_key->key[key_id][1], - data, - key_len * 2); + key_descriptor |= RSI_PROTECT_DATA_FRAMES; + key_descriptor |= ((key_id << RSI_KEY_ID_OFFSET) & RSI_KEY_ID_MASK); + + rsi_set_len_qno(&set_key->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); + set_key->desc_dword0.frame_type = SET_KEY_REQ; + set_key->key_desc = cpu_to_le16(key_descriptor); + + if (data) { + if ((cipher == WLAN_CIPHER_SUITE_WEP40) || + (cipher == WLAN_CIPHER_SUITE_WEP104)) { + memcpy(&set_key->key[key_id][1], data, key_len * 2); + } else { + memcpy(&set_key->key[0][0], data, key_len); + } + memcpy(set_key->tx_mic_key, &data[16], 8); + memcpy(set_key->rx_mic_key, &data[24], 8); } else { - memcpy(&set_key->key[0][0], data, key_len); + memset(&set_key[FRAME_DESC_SZ], 0, frame_len - FRAME_DESC_SZ); } - memcpy(set_key->tx_mic_key, &data[16], 8); - memcpy(set_key->rx_mic_key, &data[24], 8); - - skb_put(skb, sizeof(struct rsi_set_key)); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -970,12 +971,13 @@ int rsi_set_channel(struct rsi_common *common, struct ieee80211_channel *channel) { struct sk_buff *skb = NULL; - struct rsi_mac_frame *mgmt_frame; + struct rsi_chan_config *chan_cfg; + u16 frame_len = sizeof(struct rsi_chan_config); rsi_dbg(MGMT_TX_ZONE, "%s: Sending scan req frame\n", __func__); - skb = dev_alloc_skb(FRAME_DESC_SZ); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); @@ -986,37 +988,33 @@ int rsi_set_channel(struct rsi_common *common, dev_kfree_skb(skb); return 0; } - memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; - - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST); - mgmt_frame->desc_word[4] = cpu_to_le16(channel->hw_value); - - mgmt_frame->desc_word[4] |= - cpu_to_le16(((char)(channel->max_antenna_gain)) << 8); - mgmt_frame->desc_word[5] = - cpu_to_le16((char)(channel->max_antenna_gain)); - - mgmt_frame->desc_word[7] = cpu_to_le16(PUT_BBP_RESET | - BBP_REG_WRITE | - (RSI_RF_TYPE << 4)); - - if (!(channel->flags & IEEE80211_CHAN_NO_IR) && - !(channel->flags & IEEE80211_CHAN_RADAR)) { + memset(skb->data, 0, frame_len); + chan_cfg = (struct rsi_chan_config *)skb->data; + + rsi_set_len_qno(&chan_cfg->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + chan_cfg->desc_dword0.frame_type = SCAN_REQUEST; + chan_cfg->channel_number = channel->hw_value; + chan_cfg->antenna_gain_offset_2g = channel->max_antenna_gain; + chan_cfg->antenna_gain_offset_5g = channel->max_antenna_gain; + chan_cfg->region_rftype = (RSI_RF_TYPE & 0xf) << 4; + + if ((channel->flags & IEEE80211_CHAN_NO_IR) || + (channel->flags & IEEE80211_CHAN_RADAR)) { + chan_cfg->antenna_gain_offset_2g |= RSI_CHAN_RADAR; + } else { if (common->tx_power < channel->max_power) - mgmt_frame->desc_word[6] = cpu_to_le16(common->tx_power); + chan_cfg->tx_power = cpu_to_le16(common->tx_power); else - mgmt_frame->desc_word[6] = cpu_to_le16(channel->max_power); + chan_cfg->tx_power = cpu_to_le16(channel->max_power); } - mgmt_frame->desc_word[7] = cpu_to_le16(common->priv->dfs_region); + chan_cfg->region_rftype |= (common->priv->dfs_region & 0xf); if (common->channel_width == BW_40MHZ) - mgmt_frame->desc_word[5] |= cpu_to_le16(0x1 << 8); + chan_cfg->channel_width = 0x1; common->channel = channel->hw_value; - skb_put(skb, FRAME_DESC_SZ); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } @@ -1201,6 +1199,9 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) conf_is_ht40(&common->priv->hw->conf)) auto_rate->supported_rates[ii++] = cpu_to_le16(rsi_mcsrates[kk] | BIT(9)); + else + auto_rate->supported_rates[ii++] = + cpu_to_le16(rsi_mcsrates[kk]); auto_rate->supported_rates[ii] = cpu_to_le16(rsi_mcsrates[kk--]); } @@ -1249,6 +1250,7 @@ void rsi_inform_bss_status(struct rsi_common *common, u16 aid) { if (status) { + common->hw_data_qs_blocked = true; rsi_hal_send_sta_notify_frame(common, RSI_IFTYPE_STATION, STA_CONNECTED, @@ -1257,13 +1259,17 @@ void rsi_inform_bss_status(struct rsi_common *common, aid); if (common->min_rate == 0xffff) rsi_send_auto_rate_request(common); + if (!rsi_send_block_unblock_frame(common, false)) + common->hw_data_qs_blocked = false; } else { + common->hw_data_qs_blocked = true; rsi_hal_send_sta_notify_frame(common, RSI_IFTYPE_STATION, STA_DISCONNECTED, bssid, qos_enable, aid); + rsi_send_block_unblock_frame(common, true); } } @@ -1276,7 +1282,8 @@ void rsi_inform_bss_status(struct rsi_common *common, */ static int rsi_eeprom_read(struct rsi_common *common) { - struct rsi_mac_frame *mgmt_frame; + struct rsi_eeprom_read_frame *mgmt_frame; + struct rsi_hw *adapter = common->priv; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending EEPROM read req frame\n", __func__); @@ -1289,18 +1296,21 @@ static int rsi_eeprom_read(struct rsi_common *common) } memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + mgmt_frame = (struct rsi_eeprom_read_frame *)skb->data; /* FrameType */ - mgmt_frame->desc_word[1] = cpu_to_le16(EEPROM_READ_TYPE); - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); + rsi_set_len_qno(&mgmt_frame->len_qno, 0, RSI_WIFI_MGMT_Q); + mgmt_frame->pkt_type = EEPROM_READ; + /* Number of bytes to read */ - mgmt_frame->desc_word[3] = cpu_to_le16(ETH_ALEN + - WLAN_MAC_MAGIC_WORD_LEN + - WLAN_HOST_MODE_LEN + - WLAN_FW_VERSION_LEN); + mgmt_frame->pkt_info = + cpu_to_le32((adapter->eeprom.length << RSI_EEPROM_LEN_OFFSET) & + RSI_EEPROM_LEN_MASK); + mgmt_frame->pkt_info |= cpu_to_le32((3 << RSI_EEPROM_HDR_SIZE_OFFSET) & + RSI_EEPROM_HDR_SIZE_MASK); + /* Address to read */ - mgmt_frame->desc_word[4] = cpu_to_le16(WLAN_MAC_EEPROM_ADDR); + mgmt_frame->eeprom_offset = cpu_to_le32(adapter->eeprom.offset); skb_put(skb, FRAME_DESC_SZ); @@ -1317,7 +1327,7 @@ static int rsi_eeprom_read(struct rsi_common *common) */ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event) { - struct rsi_mac_frame *mgmt_frame; + struct rsi_block_unblock_data *mgmt_frame; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending block/unblock frame\n", __func__); @@ -1330,23 +1340,25 @@ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event) } memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + mgmt_frame = (struct rsi_block_unblock_data *)skb->data; - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(BLOCK_HW_QUEUE); + rsi_set_len_qno(&mgmt_frame->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + mgmt_frame->desc_dword0.frame_type = BLOCK_HW_QUEUE; + mgmt_frame->host_quiet_info = QUIET_INFO_VALID; if (block_event) { rsi_dbg(INFO_ZONE, "blocking the data qs\n"); - mgmt_frame->desc_word[4] = cpu_to_le16(0xf); + mgmt_frame->block_q_bitmap = cpu_to_le16(0xf); + mgmt_frame->block_q_bitmap |= cpu_to_le16(0xf << 4); } else { rsi_dbg(INFO_ZONE, "unblocking the data qs\n"); - mgmt_frame->desc_word[5] = cpu_to_le16(0xf); + mgmt_frame->unblock_q_bitmap = cpu_to_le16(0xf); + mgmt_frame->unblock_q_bitmap |= cpu_to_le16(0xf << 4); } skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); - } /** @@ -1426,19 +1438,25 @@ int rsi_set_antenna(struct rsi_common *common, u8 antenna) static int rsi_handle_ta_confirm_type(struct rsi_common *common, u8 *msg) { + struct rsi_hw *adapter = common->priv; u8 sub_type = (msg[15] & 0xff); + u16 msg_len = ((u16 *)msg)[0] & 0xfff; + u8 offset; switch (sub_type) { case BOOTUP_PARAMS_REQUEST: rsi_dbg(FSM_ZONE, "%s: Boot up params confirm received\n", __func__); if (common->fsm_state == FSM_BOOT_PARAMS_SENT) { + adapter->eeprom.length = (IEEE80211_ADDR_LEN + + WLAN_MAC_MAGIC_WORD_LEN + + WLAN_HOST_MODE_LEN); + adapter->eeprom.offset = WLAN_MAC_EEPROM_ADDR; if (rsi_eeprom_read(common)) { common->fsm_state = FSM_CARD_NOT_READY; goto out; - } else { - common->fsm_state = FSM_EEPROM_READ_MAC_ADDR; } + common->fsm_state = FSM_EEPROM_READ_MAC_ADDR; } else { rsi_dbg(INFO_ZONE, "%s: Received bootup params cfm in %d state\n", @@ -1447,30 +1465,52 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common, } break; - case EEPROM_READ_TYPE: + case EEPROM_READ: + rsi_dbg(FSM_ZONE, "EEPROM READ confirm received\n"); + if (msg_len <= 0) { + rsi_dbg(FSM_ZONE, + "%s: [EEPROM_READ] Invalid len %d\n", + __func__, msg_len); + goto out; + } + if (msg[16] != MAGIC_WORD) { + rsi_dbg(FSM_ZONE, + "%s: [EEPROM_READ] Invalid token\n", __func__); + common->fsm_state = FSM_CARD_NOT_READY; + goto out; + } if (common->fsm_state == FSM_EEPROM_READ_MAC_ADDR) { - if (msg[16] == MAGIC_WORD) { - u8 offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN - + WLAN_MAC_MAGIC_WORD_LEN); - memcpy(common->mac_addr, - &msg[offset], - ETH_ALEN); - memcpy(&common->fw_ver, - &msg[offset + ETH_ALEN], - sizeof(struct version_info)); - - } else { + offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN + + WLAN_MAC_MAGIC_WORD_LEN); + memcpy(common->mac_addr, &msg[offset], ETH_ALEN); + adapter->eeprom.length = + ((WLAN_MAC_MAGIC_WORD_LEN + 3) & (~3)); + adapter->eeprom.offset = WLAN_EEPROM_RFTYPE_ADDR; + if (rsi_eeprom_read(common)) { + rsi_dbg(ERR_ZONE, + "%s: Failed reading RF band\n", + __func__); common->fsm_state = FSM_CARD_NOT_READY; - break; + goto out; + } + common->fsm_state = FSM_EEPROM_READ_RF_TYPE; + } else if (common->fsm_state == FSM_EEPROM_READ_RF_TYPE) { + if ((msg[17] & 0x3) == 0x3) { + rsi_dbg(INIT_ZONE, "Dual band supported\n"); + common->band = NL80211_BAND_5GHZ; + common->num_supp_bands = 2; + } else if ((msg[17] & 0x3) == 0x1) { + rsi_dbg(INIT_ZONE, + "Only 2.4Ghz band supported\n"); + common->band = NL80211_BAND_2GHZ; + common->num_supp_bands = 1; } if (rsi_send_reset_mac(common)) goto out; - else - common->fsm_state = FSM_RESET_MAC_SENT; + common->fsm_state = FSM_RESET_MAC_SENT; } else { - rsi_dbg(ERR_ZONE, - "%s: Received eeprom mac addr in %d state\n", - __func__, common->fsm_state); + rsi_dbg(ERR_ZONE, "%s: Invalid EEPROM read type\n", + __func__); return 0; } break; @@ -1602,8 +1642,10 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg) rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n", __func__); } + } else if (msg_type == RX_DOT11_MGMT) { + return rsi_mgmt_pkt_to_core(common, msg, msg_len); } else { - return rsi_mgmt_pkt_to_core(common, msg, msg_len, msg_type); + rsi_dbg(INFO_ZONE, "Received packet type: 0x%x\n", msg_type); } return 0; } diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index e5ea99bb2dd8..42d558b61721 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -138,12 +138,15 @@ static int rsi_issue_sdiocommand(struct sdio_func *func, static void rsi_handle_interrupt(struct sdio_func *function) { struct rsi_hw *adapter = sdio_get_drvdata(function); + struct rsi_91x_sdiodev *dev = + (struct rsi_91x_sdiodev *)adapter->rsi_dev; if (adapter->priv->fsm_state == FSM_FW_NOT_LOADED) return; - sdio_release_host(function); + + dev->sdio_irq_task = current; rsi_interrupt_handler(adapter); - sdio_claim_host(function); + dev->sdio_irq_task = NULL; } /** @@ -219,26 +222,18 @@ static void rsi_reset_card(struct sdio_func *pfunction) if (err) rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err); - if (!host->ocr_avail) { - /* Issue CMD5, arg = 0 */ - err = rsi_issue_sdiocommand(pfunction, - SD_IO_SEND_OP_COND, - 0, - (MMC_RSP_R4 | MMC_CMD_BCR), - &resp); - if (err) - rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", - __func__, err); - host->ocr_avail = resp; - } + /* Issue CMD5, arg = 0 */ + err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0, + (MMC_RSP_R4 | MMC_CMD_BCR), &resp); + if (err) + rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", __func__, err); + card->ocr = resp; /* Issue CMD5, arg = ocr. Wait till card is ready */ for (i = 0; i < 100; i++) { - err = rsi_issue_sdiocommand(pfunction, - SD_IO_SEND_OP_COND, - host->ocr_avail, - (MMC_RSP_R4 | MMC_CMD_BCR), - &resp); + err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, + card->ocr, + (MMC_RSP_R4 | MMC_CMD_BCR), &resp); if (err) { rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", __func__, err); @@ -415,14 +410,16 @@ int rsi_sdio_read_register(struct rsi_hw *adapter, u8 fun_num = 0; int status; - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); if (fun_num == 0) *data = sdio_f0_readb(dev->pfunction, addr, &status); else *data = sdio_readb(dev->pfunction, addr, &status); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); return status; } @@ -446,14 +443,16 @@ int rsi_sdio_write_register(struct rsi_hw *adapter, (struct rsi_91x_sdiodev *)adapter->rsi_dev; int status = 0; - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); if (function == 0) sdio_f0_writeb(dev->pfunction, *data, addr, &status); else sdio_writeb(dev->pfunction, *data, addr, &status); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); return status; } @@ -498,11 +497,13 @@ static int rsi_sdio_read_register_multiple(struct rsi_hw *adapter, (struct rsi_91x_sdiodev *)adapter->rsi_dev; u32 status; - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); status = sdio_readsb(dev->pfunction, data, addr, count); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); if (status != 0) rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 read failed\n", __func__); @@ -540,11 +541,13 @@ int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, dev->write_fail++; } - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); status = sdio_writesb(dev->pfunction, addr, data, count); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); if (status) { rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 write failed %d\n", @@ -941,6 +944,84 @@ fail: return 1; } +static void ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, + u16 len_in_bits) +{ + rsi_sdio_master_reg_write(adapter, RSI_GSPI_DATA_REG1, + ((addr << 6) | ((data >> 16) & 0xffff)), 2); + rsi_sdio_master_reg_write(adapter, RSI_GSPI_DATA_REG0, + (data & 0xffff), 2); + rsi_sdio_master_reg_write(adapter, RSI_GSPI_CTRL_REG0, + RSI_GSPI_CTRL_REG0_VALUE, 2); + rsi_sdio_master_reg_write(adapter, RSI_GSPI_CTRL_REG1, + ((len_in_bits - 1) | RSI_GSPI_TRIG), 2); + msleep(20); +} + +/*This function resets and re-initializes the chip.*/ +static void rsi_reset_chip(struct rsi_hw *adapter) +{ + __le32 data; + u8 sdio_interrupt_status = 0; + u8 request = 1; + int ret; + + rsi_dbg(INFO_ZONE, "Writing disable to wakeup register\n"); + ret = rsi_sdio_write_register(adapter, 0, SDIO_WAKEUP_REG, &request); + if (ret < 0) { + rsi_dbg(ERR_ZONE, + "%s: Failed to write SDIO wakeup register\n", __func__); + return; + } + msleep(20); + ret = rsi_sdio_read_register(adapter, RSI_FN1_INT_REGISTER, + &sdio_interrupt_status); + if (ret < 0) { + rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n", + __func__); + return; + } + rsi_dbg(INFO_ZONE, "%s: Intr Status Register value = %d\n", + __func__, sdio_interrupt_status); + + /* Put Thread-Arch processor on hold */ + if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) { + rsi_dbg(ERR_ZONE, + "%s: Unable to set ms word to common reg\n", + __func__); + return; + } + + data = TA_HOLD_THREAD_VALUE; + if (rsi_sdio_write_register_multiple(adapter, TA_HOLD_THREAD_REG | + RSI_SD_REQUEST_MASTER, + (u8 *)&data, 4)) { + rsi_dbg(ERR_ZONE, + "%s: Unable to hold Thread-Arch processor threads\n", + __func__); + return; + } + + /* This msleep will ensure Thread-Arch processor to go to hold + * and any pending dma transfers to rf spi in device to finish. + */ + msleep(100); + + ulp_read_write(adapter, RSI_ULP_RESET_REG, RSI_ULP_WRITE_0, 32); + ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1, RSI_ULP_WRITE_2, 32); + ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2, RSI_ULP_WRITE_0, 32); + ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1, RSI_ULP_WRITE_50, + 32); + ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2, RSI_ULP_WRITE_0, + 32); + ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE, + RSI_ULP_TIMER_ENABLE, 32); + /* This msleep will be sufficient for the ulp + * read write operations to complete for chip reset. + */ + msleep(500); +} + /** * rsi_disconnect() - This function performs the reverse of the probe function. * @pfunction: Pointer to the sdio_func structure. @@ -956,17 +1037,26 @@ static void rsi_disconnect(struct sdio_func *pfunction) return; dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; + sdio_claim_host(pfunction); + sdio_release_irq(pfunction); + sdio_release_host(pfunction); + mdelay(10); - dev->write_fail = 2; rsi_mac80211_detach(adapter); + mdelay(10); + + /* Reset Chip */ + rsi_reset_chip(adapter); - sdio_claim_host(pfunction); - sdio_release_irq(pfunction); - sdio_disable_func(pfunction); - rsi_91x_deinit(adapter); /* Resetting to take care of the case, where-in driver is re-loaded */ + sdio_claim_host(pfunction); rsi_reset_card(pfunction); + sdio_disable_func(pfunction); sdio_release_host(pfunction); + dev->write_fail = 2; + rsi_91x_deinit(adapter); + rsi_dbg(ERR_ZONE, "##### RSI SDIO device disconnected #####\n"); + } #ifdef CONFIG_PM diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index df2a63b1f15c..b3f7adc9d085 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -69,20 +69,37 @@ int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word) static int rsi_process_pkt(struct rsi_common *common) { struct rsi_hw *adapter = common->priv; + struct rsi_91x_sdiodev *dev = + (struct rsi_91x_sdiodev *)adapter->rsi_dev; u8 num_blks = 0; u32 rcv_pkt_len = 0; int status = 0; + u8 value = 0; - status = rsi_sdio_read_register(adapter, - SDIO_RX_NUM_BLOCKS_REG, - &num_blks); + num_blks = ((adapter->interrupt_status & 1) | + ((adapter->interrupt_status >> RECV_NUM_BLOCKS) << 1)); - if (status) { - rsi_dbg(ERR_ZONE, - "%s: Failed to read pkt length from the card:\n", - __func__); - return status; + if (!num_blks) { + status = rsi_sdio_read_register(adapter, + SDIO_RX_NUM_BLOCKS_REG, + &value); + if (status) { + rsi_dbg(ERR_ZONE, + "%s: Failed to read pkt length from the card:\n", + __func__); + return status; + } + num_blks = value & 0x1f; } + + if (dev->write_fail == 2) + rsi_sdio_ack_intr(common->priv, (1 << MSDU_PKT_PENDING)); + + if (unlikely(!num_blks)) { + dev->write_fail = 2; + return -1; + } + rcv_pkt_len = (num_blks * 256); common->rx_data_pkt = kmalloc(rcv_pkt_len, GFP_KERNEL); @@ -213,7 +230,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) dev->rx_info.sdio_int_counter++; do { - mutex_lock(&common->tx_rxlock); + mutex_lock(&common->rx_lock); status = rsi_sdio_read_register(common->priv, RSI_FN1_INT_REGISTER, &isr_status); @@ -221,14 +238,15 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n", __func__); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } + adapter->interrupt_status = isr_status; if (isr_status == 0) { rsi_set_event(&common->tx_thread.event); dev->rx_info.sdio_intr_status_zero++; - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } @@ -286,7 +304,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) rsi_dbg(ERR_ZONE, "%s: Failed to read pkt\n", __func__); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } break; @@ -301,7 +319,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) } isr_status ^= BIT(isr_type - 1); } while (isr_status); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); } while (1); } diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index bcd7f454ef30..9097f7e6229e 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -29,19 +29,24 @@ * Return: status: 0 on success, a negative error code on failure. */ static int rsi_usb_card_write(struct rsi_hw *adapter, - void *buf, + u8 *buf, u16 len, u8 endpoint) { struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; int status; - s32 transfer; + u8 *seg = dev->tx_buffer; + int transfer; + int ep = dev->bulkout_endpoint_addr[endpoint - 1]; + memset(seg, 0, len + RSI_USB_TX_HEAD_ROOM); + memcpy(seg + RSI_USB_TX_HEAD_ROOM, buf, len); + len += RSI_USB_TX_HEAD_ROOM; + transfer = len; status = usb_bulk_msg(dev->usbdev, - usb_sndbulkpipe(dev->usbdev, - dev->bulkout_endpoint_addr[endpoint - 1]), - buf, - len, + usb_sndbulkpipe(dev->usbdev, ep), + (void *)seg, + (int)len, &transfer, HZ * 5); @@ -68,23 +73,19 @@ static int rsi_write_multiple(struct rsi_hw *adapter, u8 *data, u32 count) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; - u8 *seg = dev->tx_buffer; + struct rsi_91x_usbdev *dev = + (struct rsi_91x_usbdev *)adapter->rsi_dev; - if (dev->write_fail) - return 0; + if (!adapter) + return -ENODEV; - if (endpoint == MGMT_EP) { - memset(seg, 0, RSI_USB_TX_HEAD_ROOM); - memcpy(seg + RSI_USB_TX_HEAD_ROOM, data, count); - } else { - seg = ((u8 *)data - RSI_USB_TX_HEAD_ROOM); - } + if (endpoint == 0) + return -EINVAL; - return rsi_usb_card_write(adapter, - seg, - count + RSI_USB_TX_HEAD_ROOM, - endpoint); + if (dev->write_fail) + return -ENETDOWN; + + return rsi_usb_card_write(adapter, data, count, endpoint); } /** @@ -161,10 +162,13 @@ static int rsi_usb_reg_read(struct usb_device *usbdev, u8 *buf; int status = -ENOMEM; - buf = kmalloc(0x04, GFP_KERNEL); + buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!buf) return status; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + status = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), USB_VENDOR_REGISTER_READ, @@ -203,10 +207,13 @@ static int rsi_usb_reg_write(struct usb_device *usbdev, u8 *usb_reg_buf; int status = -ENOMEM; - usb_reg_buf = kmalloc(0x04, GFP_KERNEL); + usb_reg_buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!usb_reg_buf) return status; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + usb_reg_buf[0] = (value & 0x00ff); usb_reg_buf[1] = (value & 0xff00) >> 8; usb_reg_buf[2] = 0x0; @@ -380,10 +387,11 @@ static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter, u8 *pkt, u32 len) { - u32 queueno = ((pkt[1] >> 4) & 0xf); + u32 queueno = ((pkt[1] >> 4) & 0x7); u8 endpoint; - endpoint = ((queueno == RSI_WIFI_MGMT_Q) ? MGMT_EP : DATA_EP); + endpoint = ((queueno == RSI_WIFI_MGMT_Q || queueno == RSI_WIFI_DATA_Q || + queueno == RSI_COEX_Q) ? WLAN_EP : BT_EP); return rsi_write_multiple(adapter, endpoint, @@ -396,8 +404,15 @@ static int rsi_usb_master_reg_read(struct rsi_hw *adapter, u32 reg, { struct usb_device *usbdev = ((struct rsi_91x_usbdev *)adapter->rsi_dev)->usbdev; + u16 temp; + int ret; + + ret = rsi_usb_reg_read(usbdev, reg, &temp, len); + if (ret < 0) + return ret; + *value = temp; - return rsi_usb_reg_read(usbdev, reg, (u16 *)value, len); + return 0; } static int rsi_usb_master_reg_write(struct rsi_hw *adapter, @@ -558,6 +573,77 @@ fail_tx: return status; } +static int usb_ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, + u16 len_in_bits) +{ + int ret; + + ret = rsi_usb_master_reg_write + (adapter, RSI_GSPI_DATA_REG1, + ((addr << 6) | ((data >> 16) & 0xffff)), 2); + if (ret < 0) + return ret; + + ret = rsi_usb_master_reg_write(adapter, RSI_GSPI_DATA_REG0, + (data & 0xffff), 2); + if (ret < 0) + return ret; + + /* Initializing GSPI for ULP read/writes */ + rsi_usb_master_reg_write(adapter, RSI_GSPI_CTRL_REG0, + RSI_GSPI_CTRL_REG0_VALUE, 2); + + ret = rsi_usb_master_reg_write(adapter, RSI_GSPI_CTRL_REG1, + ((len_in_bits - 1) | RSI_GSPI_TRIG), 2); + if (ret < 0) + return ret; + + msleep(20); + + return 0; +} + +static int rsi_reset_card(struct rsi_hw *adapter) +{ + int ret; + + rsi_dbg(INFO_ZONE, "Resetting Card...\n"); + rsi_usb_master_reg_write(adapter, RSI_TA_HOLD_REG, 0xE, 4); + + /* This msleep will ensure Thread-Arch processor to go to hold + * and any pending dma transfers to rf in device to finish. + */ + msleep(100); + + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1, + RSI_ULP_WRITE_2, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2, + RSI_ULP_WRITE_0, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1, + RSI_ULP_WRITE_50, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2, + RSI_ULP_WRITE_0, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE, + RSI_ULP_TIMER_ENABLE, 32); + if (ret < 0) + goto fail; + + rsi_dbg(INFO_ZONE, "Reset card done\n"); + return ret; + +fail: + rsi_dbg(ERR_ZONE, "Reset card failed\n"); + return ret; +} + /** * rsi_probe() - This function is called by kernel when the driver provided * Vendor and device IDs are matched. All the initialization @@ -641,6 +727,7 @@ static void rsi_disconnect(struct usb_interface *pfunction) return; rsi_mac80211_detach(adapter); + rsi_reset_card(adapter); rsi_deinit_usb_interface(adapter); rsi_91x_deinit(adapter); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index d3e0a07604a6..465692b3c351 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -37,14 +37,14 @@ void rsi_usb_rx_thread(struct rsi_common *common) if (atomic_read(&dev->rx_thread.thread_done)) goto out; - mutex_lock(&common->tx_rxlock); + mutex_lock(&common->rx_lock); status = rsi_read_pkt(common, 0); if (status) { rsi_dbg(ERR_ZONE, "%s: Failed To read data", __func__); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); rsi_reset_event(&dev->rx_thread.event); if (adapter->rx_urb_submit(adapter)) { rsi_dbg(ERR_ZONE, diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index 902dc540849c..00c6a0c5a891 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -52,6 +52,39 @@ #define FW_LOADING_SUCCESSFUL 'S' #define LOADING_INITIATED '1' +#define RSI_ULP_RESET_REG 0x161 +#define RSI_WATCH_DOG_TIMER_1 0x16c +#define RSI_WATCH_DOG_TIMER_2 0x16d +#define RSI_WATCH_DOG_DELAY_TIMER_1 0x16e +#define RSI_WATCH_DOG_DELAY_TIMER_2 0x16f +#define RSI_WATCH_DOG_TIMER_ENABLE 0x170 + +#define RSI_ULP_WRITE_0 00 +#define RSI_ULP_WRITE_2 02 +#define RSI_ULP_WRITE_50 50 + +#define RSI_RESTART_WDT BIT(11) +#define RSI_BYPASS_ULP_ON_WDT BIT(1) + +#define RSI_ULP_TIMER_ENABLE ((0xaa000) | RSI_RESTART_WDT | \ + RSI_BYPASS_ULP_ON_WDT) +#define RSI_RF_SPI_PROG_REG_BASE_ADDR 0x40080000 + +#define RSI_GSPI_CTRL_REG0 (RSI_RF_SPI_PROG_REG_BASE_ADDR) +#define RSI_GSPI_CTRL_REG1 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x2) +#define RSI_GSPI_DATA_REG0 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x4) +#define RSI_GSPI_DATA_REG1 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x6) +#define RSI_GSPI_DATA_REG2 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x8) + +#define RSI_GSPI_CTRL_REG0_VALUE 0x340 + +#define RSI_GSPI_DMA_MODE BIT(13) + +#define RSI_GSPI_2_ULP BIT(12) +#define RSI_GSPI_TRIG BIT(7) +#define RSI_GSPI_READ BIT(6) +#define RSI_GSPI_RF_SPI_ACTIVE BIT(8) + /* Boot loader commands */ #define SEND_RPS_FILE '2' @@ -66,6 +99,8 @@ #define RSI_DEV_OPMODE_WIFI_ALONE 1 #define RSI_DEV_COEX_MODE_WIFI_ALONE 1 +#define BBP_INFO_40MHZ 0x6 + struct bl_header { __le32 flags; __le32 image_no; @@ -79,6 +114,35 @@ struct ta_metadata { unsigned int address; }; +struct rsi_mgmt_desc { + __le16 len_qno; + u8 frame_type; + u8 misc_flags; + u8 xtend_desc_size; + u8 header_len; + __le16 frame_info; + u8 rate_info; + u8 reserved1; + __le16 bbp_info; + __le16 seq_ctrl; + u8 reserved2; + u8 vap_info; +} __packed; + +struct rsi_data_desc { + __le16 len_qno; + u8 cfm_frame_type; + u8 misc_flags; + u8 xtend_desc_size; + u8 header_len; + __le16 frame_info; + __le16 rate_info; + __le16 bbp_info; + __le16 mac_flags; + u8 qid_tid; + u8 sta_id; +} __packed; + int rsi_hal_device_init(struct rsi_hw *adapter); #endif diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index f3985250b593..6a8e8e7ed1fb 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -37,10 +37,13 @@ enum RSI_FSM_STATES { FSM_COMMON_DEV_PARAMS_SENT, FSM_BOOT_PARAMS_SENT, FSM_EEPROM_READ_MAC_ADDR, + FSM_EEPROM_READ_RF_TYPE, FSM_RESET_MAC_SENT, FSM_RADIO_CAPS_SENT, FSM_BB_RF_PROG_SENT, - FSM_MAC_INIT_DONE + FSM_MAC_INIT_DONE, + + NUM_FSM_STATES }; extern u32 rsi_zone_enabled; @@ -58,11 +61,16 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...); #define MAC_80211_HDR_FRAME_CONTROL 0 #define WME_NUM_AC 4 #define NUM_SOFT_QUEUES 5 -#define MAX_HW_QUEUES 8 +#define MAX_HW_QUEUES 12 #define INVALID_QUEUE 0xff #define MAX_CONTINUOUS_VO_PKTS 8 #define MAX_CONTINUOUS_VI_PKTS 4 +/* Hardware queue info */ +#define BROADCAST_HW_Q 9 +#define MGMT_HW_Q 10 +#define BEACON_HW_Q 11 + /* Queue information */ #define RSI_COEX_Q 0x0 #define RSI_WIFI_MGMT_Q 0x4 @@ -102,6 +110,7 @@ struct skb_info { u16 channel; s8 tid; s8 sta_id; + u8 internal_hdr_size; }; enum edca_queue { @@ -155,6 +164,19 @@ struct cqm_info { u32 rssi_hyst; }; +struct xtended_desc { + u8 confirm_frame_type; + u8 retry_cnt; + u16 reserved; +}; + +enum rsi_dfs_regions { + RSI_REGION_FCC = 0, + RSI_REGION_ETSI, + RSI_REGION_TELEC, + RSI_REGION_WORLD +}; + struct rsi_hw; struct rsi_common { @@ -169,12 +191,15 @@ struct rsi_common { struct sk_buff_head tx_queue[NUM_EDCA_QUEUES + 1]; /* Mutex declaration */ struct mutex mutex; - /* Mutex used between tx/rx threads */ - struct mutex tx_rxlock; + /* Mutex used for tx thread */ + struct mutex tx_lock; + /* Mutex used for rx thread */ + struct mutex rx_lock; u8 endpoint; /* Channel/band related */ u8 band; + u8 num_supp_bands; u8 channel_width; u16 rts_threshold; @@ -221,6 +246,9 @@ struct rsi_common { u8 obm_ant_sel_val; int tx_power; u8 ant_in_use; + + u16 beacon_interval; + u8 dtim_cnt; }; enum host_intf { @@ -228,6 +256,19 @@ enum host_intf { RSI_HOST_INTF_USB }; +struct eepromrw_info { + u32 offset; + u32 length; + u8 write; + u16 eeprom_erase; + u8 data[480]; +}; + +struct eeprom_read { + u16 length; + u16 off_set; +}; + struct rsi_hw { struct rsi_common *priv; u8 device_model; @@ -250,7 +291,10 @@ struct rsi_hw { struct timer_list bl_cmd_timer; bool blcmd_timer_expired; u32 flash_capacity; + struct eepromrw_info eeprom; + u32 interrupt_status; u8 dfs_region; + char country[2]; void *rsi_dev; struct rsi_host_intf_ops *host_intf_ops; int (*check_hw_queue_status)(struct rsi_hw *adapter, u8 q_num); diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index dcb6db728cbd..cb0b17ec48d0 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -43,6 +43,7 @@ #define WLAN_HOST_MODE_LEN 0x04 #define WLAN_FW_VERSION_LEN 0x08 #define MAGIC_WORD 0x5A +#define WLAN_EEPROM_RFTYPE_ADDR 424 /* Receive Frame Types */ #define TA_CONFIRM_TYPE 0x01 @@ -62,7 +63,17 @@ #define RF_RESET_ENABLE BIT(3) #define RATE_INFO_ENABLE BIT(0) #define RSI_BROADCAST_PKT BIT(9) - +#define RSI_DESC_REQUIRE_CFM_TO_HOST BIT(2) +#define RSI_ADD_DELTA_TSF_VAP_ID BIT(3) +#define RSI_FETCH_RETRY_CNT_FRM_HST BIT(4) +#define RSI_QOS_ENABLE BIT(12) +#define RSI_REKEY_PURPOSE BIT(13) +#define RSI_ENCRYPT_PKT BIT(15) + +#define RSI_CMDDESC_40MHZ BIT(4) +#define RSI_CMDDESC_UPPER_20_ENABLE BIT(5) +#define RSI_CMDDESC_LOWER_20_ENABLE BIT(6) +#define RSI_CMDDESC_FULL_40_ENABLE (BIT(5) | BIT(6)) #define UPPER_20_ENABLE (0x2 << 12) #define LOWER_20_ENABLE (0x4 << 12) #define FULL40M_ENABLE 0x6 @@ -120,6 +131,7 @@ #define RSI_RATE_MCS6 0x106 #define RSI_RATE_MCS7 0x107 #define RSI_RATE_MCS7_SG 0x307 +#define RSI_RATE_AUTO 0xffff #define BW_20MHZ 0 #define BW_40MHZ 1 @@ -153,6 +165,11 @@ #define ALLOW_CONN_PEER_MGMT_WHILE_BUF_FULL BIT(5) #define DISALLOW_BROADCAST_DATA BIT(6) +#define RSI_MPDU_DENSITY 0x8 +#define RSI_CHAN_RADAR BIT(7) +#define RSI_BEACON_INTERVAL 200 +#define RSI_DTIM_COUNT 2 + enum opmode { STA_OPMODE = 1, AP_OPMODE = 2 @@ -192,7 +209,7 @@ enum cmd_frame_type { AUTO_RATE_IND, BOOTUP_PARAMS_REQUEST, VAP_CAPABILITIES, - EEPROM_READ_TYPE , + EEPROM_READ, EEPROM_WRITE, GPIO_PIN_CONFIG , SET_RX_FILTER, @@ -213,13 +230,52 @@ struct rsi_mac_frame { __le16 desc_word[8]; } __packed; +#define PWR_SAVE_WAKEUP_IND BIT(0) +#define TCP_CHECK_SUM_OFFLOAD BIT(1) +#define CONFIRM_REQUIRED_TO_HOST BIT(2) +#define ADD_DELTA_TSF BIT(3) +#define FETCH_RETRY_CNT_FROM_HOST_DESC BIT(4) +#define EOSP_INDICATION BIT(5) +#define REQUIRE_TSF_SYNC_CONFIRM BIT(6) +#define ENCAP_MGMT_PKT BIT(7) +#define DESC_IMMEDIATE_WAKEUP BIT(15) + +struct rsi_cmd_desc_dword0 { + __le16 len_qno; + u8 frame_type; + u8 misc_flags; +}; + +struct rsi_cmd_desc_dword1 { + u8 xtend_desc_size; + u8 reserved1; + __le16 reserved2; +}; + +struct rsi_cmd_desc_dword2 { + __le32 pkt_info; /* Packet specific data */ +}; + +struct rsi_cmd_desc_dword3 { + __le16 token; + u8 qid_tid; + u8 sta_id; +}; + +struct rsi_cmd_desc { + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword1 desc_dword1; + struct rsi_cmd_desc_dword2 desc_dword2; + struct rsi_cmd_desc_dword3 desc_dword3; +}; + struct rsi_boot_params { __le16 desc_word[8]; struct bootup_params bootup_params; } __packed; struct rsi_peer_notify { - __le16 desc_word[8]; + struct rsi_cmd_desc desc; u8 mac_addr[6]; __le16 command; __le16 mpdu_density; @@ -227,24 +283,87 @@ struct rsi_peer_notify { __le32 sta_flags; } __packed; +/* Aggregation params flags */ +#define RSI_AGGR_PARAMS_TID_MASK 0xf +#define RSI_AGGR_PARAMS_START BIT(4) +#define RSI_AGGR_PARAMS_RX_AGGR BIT(5) +struct rsi_aggr_params { + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword0 desc_dword1; + __le16 seq_start; + __le16 baw_size; + __le16 token; + u8 aggr_params; + u8 peer_id; +} __packed; + +struct rsi_bb_rf_prog { + struct rsi_cmd_desc_dword0 desc_dword0; + __le16 reserved1; + u8 rf_power_mode; + u8 reserved2; + u8 endpoint; + u8 reserved3; + __le16 reserved4; + __le16 reserved5; + __le16 flags; +} __packed; + +struct rsi_chan_config { + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword1 desc_dword1; + u8 channel_number; + u8 antenna_gain_offset_2g; + u8 antenna_gain_offset_5g; + u8 channel_width; + __le16 tx_power; + u8 region_rftype; + u8 flags; +} __packed; + struct rsi_vap_caps { - __le16 desc_word[8]; + struct rsi_cmd_desc_dword0 desc_dword0; + u8 reserved1; + u8 status; + __le16 reserved2; + u8 vif_type; + u8 channel_bw; + __le16 antenna_info; + u8 radioid_macid; + u8 vap_id; + __le16 reserved3; u8 mac_addr[6]; __le16 keep_alive_period; u8 bssid[6]; - __le16 reserved; + __le16 reserved4; __le32 flags; __le16 frag_threshold; __le16 rts_threshold; __le32 default_mgmt_rate; - __le32 default_ctrl_rate; + __le16 default_ctrl_rate; + __le16 ctrl_rate_flags; __le32 default_data_rate; __le16 beacon_interval; __le16 dtim_period; + __le16 beacon_miss_threshold; } __packed; +/* Key descriptor flags */ +#define RSI_KEY_TYPE_BROADCAST BIT(1) +#define RSI_WEP_KEY BIT(2) +#define RSI_WEP_KEY_104 BIT(3) +#define RSI_CIPHER_WPA BIT(4) +#define RSI_CIPHER_TKIP BIT(5) +#define RSI_PROTECT_DATA_FRAMES BIT(13) +#define RSI_KEY_ID_MASK 0xC0 +#define RSI_KEY_ID_OFFSET 14 struct rsi_set_key { - __le16 desc_word[8]; + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword1 desc_dword1; + __le16 key_desc; + __le32 bpn; + u8 sta_id; + u8 vap_id; u8 key[4][32]; u8 tx_mic_key[8]; u8 rx_mic_key[8]; @@ -262,6 +381,19 @@ struct rsi_auto_rate { __le16 supported_rates[40]; } __packed; +#define QUIET_INFO_VALID BIT(0) +#define QUIET_ENABLE BIT(1) +struct rsi_block_unblock_data { + struct rsi_cmd_desc_dword0 desc_dword0; + u8 xtend_desc_size; + u8 host_quiet_info; + __le16 reserved; + __le16 block_q_bitmap; + __le16 unblock_q_bitmap; + __le16 token; + __le16 flush_q_bitmap; +} __packed; + struct qos_params { __le16 cont_win_min_q; __le16 cont_win_max_q; @@ -270,7 +402,14 @@ struct qos_params { } __packed; struct rsi_radio_caps { - __le16 desc_word[8]; + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword0 desc_dword1; + u8 channel_num; + u8 rf_model; + __le16 ppe_ack_rate; + __le16 mode_11j; + u8 radio_cfg_info; + u8 radio_info; struct qos_params qos_params[MAX_HW_QUEUES]; u8 num_11n_rates; u8 num_11ac_rates; @@ -353,6 +492,22 @@ struct rsi_config_vals { u8 reserved2[16]; } __packed; +/* Packet info flags */ +#define RSI_EEPROM_HDR_SIZE_OFFSET 8 +#define RSI_EEPROM_HDR_SIZE_MASK 0x300 +#define RSI_EEPROM_LEN_OFFSET 20 +#define RSI_EEPROM_LEN_MASK 0xFFF00000 + +struct rsi_eeprom_read_frame { + __le16 len_qno; + u8 pkt_type; + u8 misc_flags; + __le32 pkt_info; + __le32 eeprom_offset; + __le16 delay_ms; + __le16 reserved3; +} __packed; + static inline u32 rsi_get_queueno(u8 *addr, u16 offset) { return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12; diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 9fb73f68282a..3cf67565feb1 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -41,6 +41,7 @@ enum sdio_interrupt_type { #define PKT_BUFF_FULL 1 #define PKT_MGMT_BUFF_FULL 2 #define MSDU_PKT_PENDING 3 +#define RECV_NUM_BLOCKS 4 /* Interrupt Bit Related Macros */ #define PKT_BUFF_AVAILABLE 1 #define FW_ASSERT_IND 2 @@ -58,6 +59,7 @@ enum sdio_interrupt_type { #define SDIO_READ_START_LVL 0x000FC #define SDIO_READ_FIFO_CTL 0x000FD #define SDIO_WRITE_FIFO_CTL 0x000FE +#define SDIO_WAKEUP_REG 0x000FF #define SDIO_FUN1_INTR_CLR_REG 0x0008 #define SDIO_REG_HIGH_SPEED 0x0013 @@ -103,7 +105,7 @@ struct receive_info { struct rsi_91x_sdiodev { struct sdio_func *pfunction; - struct task_struct *in_sdio_litefi_irq; + struct task_struct *sdio_irq_task; struct receive_info rx_info; u32 next_read_delay; u32 sdio_high_speed_enable; diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 59513ac61fb3..891daea2d932 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -25,6 +25,7 @@ #define USB_INTERNAL_REG_1 0x25000 #define RSI_USB_READY_MAGIC_NUM 0xab #define FW_STATUS_REG 0x41050012 +#define RSI_TA_HOLD_REG 0x22000844 #define USB_VENDOR_REGISTER_READ 0x15 #define USB_VENDOR_REGISTER_WRITE 0x16 @@ -32,10 +33,11 @@ #define MAX_RX_URBS 1 #define MAX_BULK_EP 8 -#define MGMT_EP 1 -#define DATA_EP 2 +#define WLAN_EP 1 +#define BT_EP 2 #define RSI_USB_BUF_SIZE 4096 +#define RSI_USB_CTRL_BUF_SIZE 0x04 struct rsi_91x_usbdev { struct rsi_thread rx_thread; diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 08f0477f78d9..9915d83a4a30 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -1571,6 +1571,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void) wl->state = WL1251_STATE_OFF; mutex_init(&wl->mutex); + spin_lock_init(&wl->wl_lock); wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE; wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE; diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index acec0d9ec422..da62220b9c01 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -965,7 +965,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev, &addr4, sizeof(addr4)); if (!(addr4[0] == 0xAA && addr4[1] == 0xAA && addr4[2] == 0x03 && addr4[4] == 0x00)) { - printk(KERN_INFO "Insupported packet type!\n"); + printk(KERN_INFO "Unsupported packet type!\n"); return; } pkt_len = sig.size + 12 - 24 - 4 - 6; diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c index a93f657a41c7..d4e512f50945 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c @@ -61,7 +61,7 @@ static void dump_regwrite(u32 rw) switch (reg) { case 0: - PDEBUG("reg0 CFG1 ref_sel %d hybernate %d rf_vco_reg_en %d" + PDEBUG("reg0 CFG1 ref_sel %d hibernate %d rf_vco_reg_en %d" " if_vco_reg_en %d if_vga_en %d", bits(rw, 14, 15), bit(rw, 3), bit(rw, 2), bit(rw, 1), bit(rw, 0)); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index e0dbd6e48a98..94ca3470e943 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -421,14 +421,12 @@ int of_phy_register_fixed_link(struct device_node *np) { struct fixed_phy_status status = {}; struct device_node *fixed_link_node; - const __be32 *fixed_link_prop; - int link_gpio; - int len, err; + u32 fixed_link_prop[5]; struct phy_device *phy; const char *managed; + int link_gpio; - err = of_property_read_string(np, "managed", &managed); - if (err == 0) { + if (of_property_read_string(np, "managed", &managed) == 0) { if (strcmp(managed, "in-band-status") == 0) { /* status is zeroed, namely its .link member */ phy = fixed_phy_register(PHY_POLL, &status, -1, np); @@ -461,13 +459,13 @@ int of_phy_register_fixed_link(struct device_node *np) } /* Old binding */ - fixed_link_prop = of_get_property(np, "fixed-link", &len); - if (fixed_link_prop && len == (5 * sizeof(__be32))) { + if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop, + ARRAY_SIZE(fixed_link_prop)) == 0) { status.link = 1; - status.duplex = be32_to_cpu(fixed_link_prop[1]); - status.speed = be32_to_cpu(fixed_link_prop[2]); - status.pause = be32_to_cpu(fixed_link_prop[3]); - status.asym_pause = be32_to_cpu(fixed_link_prop[4]); + status.duplex = fixed_link_prop[1]; + status.speed = fixed_link_prop[2]; + status.pause = fixed_link_prop[3]; + status.asym_pause = fixed_link_prop[4]; phy = fixed_phy_register(PHY_POLL, &status, -1, np); return PTR_ERR_OR_ZERO(phy); } diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 06d044862e58..ba08b78ed630 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -533,6 +533,7 @@ static void handle_tx(struct vhost_net *net) ubuf->callback = vhost_zerocopy_callback; ubuf->ctx = nvq->ubufs; ubuf->desc = nvq->upend_idx; + atomic_set(&ubuf->refcnt, 1); msg.msg_control = ubuf; msg.msg_controllen = sizeof(ubuf); ubufs = nvq->ubufs; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5e1b548828e6..9aaa177e8209 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -391,7 +391,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, vq->desc_state[head].data = data; if (indirect) vq->desc_state[head].indir_desc = desc; - if (ctx) + else vq->desc_state[head].indir_desc = ctx; /* Put entry in available array (but don't update avail->idx until they |