diff options
Diffstat (limited to 'drivers')
549 files changed, 29396 insertions, 7026 deletions
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 0c13cac903de..9e4bd751db79 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -1784,12 +1784,6 @@ set_tct(struct idt77252_dev *card, struct vc_map *vc) /*****************************************************************************/ static __inline__ int -idt77252_fbq_level(struct idt77252_dev *card, int queue) -{ - return (readl(SAR_REG_STAT) >> (16 + (queue << 2))) & 0x0f; -} - -static __inline__ int idt77252_fbq_full(struct idt77252_dev *card, int queue) { return (readl(SAR_REG_STAT) >> (16 + (queue << 2))) == 0x0f; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index e2cf3b29123e..37a5e5f8b221 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -292,14 +292,16 @@ int driver_deferred_probe_check_state(struct device *dev) static void deferred_probe_timeout_work_func(struct work_struct *work) { - struct device_private *private, *p; + struct device_private *p; driver_deferred_probe_timeout = 0; driver_deferred_probe_trigger(); flush_work(&deferred_probe_work); - list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe) - dev_info(private->device, "deferred probe pending\n"); + mutex_lock(&deferred_probe_mutex); + list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe) + dev_info(p->device, "deferred probe pending\n"); + mutex_unlock(&deferred_probe_mutex); wake_up_all(&probe_timeout_waitqueue); } static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 87760aa60446..12aca34e8db0 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -52,13 +52,6 @@ static inline u32 mips_read32(struct bcma_drv_mips *mcore, return bcma_read32(mcore->core, offset); } -static inline void mips_write32(struct bcma_drv_mips *mcore, - u16 offset, - u32 value) -{ - bcma_write32(mcore->core, offset, value); -} - static u32 bcma_core_mips_irqflag(struct bcma_device *dev) { u32 flag; diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index b20fdcbd035b..fd87a59837fa 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -2,7 +2,7 @@ /* * Turris Mox module configuration bus driver * - * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> + * Copyright (C) 2019 Marek BehĂșn <kabel@kernel.org> */ #include <dt-bindings/bus/moxtet.h> @@ -879,6 +879,6 @@ static void __exit moxtet_exit(void) } module_exit(moxtet_exit); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index 4f7bf3929d6d..4e4b6d367612 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -66,7 +66,14 @@ EXPORT_SYMBOL_GPL(clk_fixed_factor_ops); static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *res) { - clk_hw_unregister_fixed_factor(&((struct clk_fixed_factor *)res)->hw); + struct clk_fixed_factor *fix = res; + + /* + * We can not use clk_hw_unregister_fixed_factor, since it will kfree() + * the hw, resulting in double free. Just unregister the hw and let + * devres code kfree() it. + */ + clk_hw_unregister(&fix->hw); } static struct clk_hw * diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5052541a0986..39cfc6c6a8d2 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -4357,20 +4357,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) /* search the list of notifiers for this clk */ list_for_each_entry(cn, &clk_notifier_list, node) if (cn->clk == clk) - break; + goto found; /* if clk wasn't in the notifier list, allocate new clk_notifier */ - if (cn->clk != clk) { - cn = kzalloc(sizeof(*cn), GFP_KERNEL); - if (!cn) - goto out; + cn = kzalloc(sizeof(*cn), GFP_KERNEL); + if (!cn) + goto out; - cn->clk = clk; - srcu_init_notifier_head(&cn->notifier_head); + cn->clk = clk; + srcu_init_notifier_head(&cn->notifier_head); - list_add(&cn->node, &clk_notifier_list); - } + list_add(&cn->node, &clk_notifier_list); +found: ret = srcu_notifier_chain_register(&cn->notifier_head, nb); clk->core->notifier_count++; @@ -4395,32 +4394,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register); */ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) { - struct clk_notifier *cn = NULL; - int ret = -EINVAL; + struct clk_notifier *cn; + int ret = -ENOENT; if (!clk || !nb) return -EINVAL; clk_prepare_lock(); - list_for_each_entry(cn, &clk_notifier_list, node) - if (cn->clk == clk) - break; - - if (cn->clk == clk) { - ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); + list_for_each_entry(cn, &clk_notifier_list, node) { + if (cn->clk == clk) { + ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); - clk->core->notifier_count--; + clk->core->notifier_count--; - /* XXX the notifier code should handle this better */ - if (!cn->notifier_head.head) { - srcu_cleanup_notifier_head(&cn->notifier_head); - list_del(&cn->node); - kfree(cn); + /* XXX the notifier code should handle this better */ + if (!cn->notifier_head.head) { + srcu_cleanup_notifier_head(&cn->notifier_head); + list_del(&cn->node); + kfree(cn); + } + break; } - - } else { - ret = -ENOENT; } clk_prepare_unlock(); diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c index dbac5651ab85..9bcf2f8ed4de 100644 --- a/drivers/clk/qcom/camcc-sc7180.c +++ b/drivers/clk/qcom/camcc-sc7180.c @@ -304,7 +304,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = { .name = "cam_cc_bps_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -325,7 +325,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = { .name = "cam_cc_cci_0_clk_src", .parent_data = cam_cc_parent_data_5, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -339,7 +339,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = { .name = "cam_cc_cci_1_clk_src", .parent_data = cam_cc_parent_data_5, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -360,7 +360,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { .name = "cam_cc_cphy_rx_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -379,7 +379,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { .name = "cam_cc_csi0phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -393,7 +393,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { .name = "cam_cc_csi1phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -407,7 +407,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = { .name = "cam_cc_csi2phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -421,7 +421,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { .name = "cam_cc_csi3phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -443,7 +443,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { .name = "cam_cc_fast_ahb_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -466,7 +466,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = { .name = "cam_cc_icp_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -488,7 +488,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = { .name = "cam_cc_ife_0_clk_src", .parent_data = cam_cc_parent_data_4, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -510,7 +510,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = { .name = "cam_cc_ife_0_csid_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -524,7 +524,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = { .name = "cam_cc_ife_1_clk_src", .parent_data = cam_cc_parent_data_4, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -538,7 +538,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = { .name = "cam_cc_ife_1_csid_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -553,7 +553,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = { .parent_data = cam_cc_parent_data_4, .num_parents = 4, .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -567,7 +567,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = { .name = "cam_cc_ife_lite_csid_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -590,7 +590,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = { .name = "cam_cc_ipe_0_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -613,7 +613,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = { .name = "cam_cc_jpeg_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -635,7 +635,7 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = { .name = "cam_cc_lrme_clk_src", .parent_data = cam_cc_parent_data_6, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -656,7 +656,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = { .name = "cam_cc_mclk0_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -670,7 +670,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = { .name = "cam_cc_mclk1_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -684,7 +684,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = { .name = "cam_cc_mclk2_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -698,7 +698,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = { .name = "cam_cc_mclk3_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -712,7 +712,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = { .name = "cam_cc_mclk4_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -732,7 +732,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = 4, .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index 43ecd507bf83..cf94a12459ea 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; val &= GENMASK(socfpgaclk->width - 1, 0); /* Check for GPIO_DB_CLK by its offset */ - if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) + if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) div = val + 1; else div = (1 << val); diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index 244cb7d89678..2acc6173da36 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -4,6 +4,7 @@ #include <linux/security.h> #include <linux/debugfs.h> #include <linux/module.h> +#include <linux/sizes.h> #include <linux/mutex.h> #include <linux/cdev.h> #include <linux/idr.h> @@ -96,21 +97,18 @@ struct mbox_cmd { * @dev: driver core device object * @cdev: char dev core object for ioctl operations * @cxlm: pointer to the parent device driver data - * @ops_active: active user of @cxlm in ops handlers - * @ops_dead: completion when all @cxlm ops users have exited * @id: id number of this memdev instance. */ struct cxl_memdev { struct device dev; struct cdev cdev; struct cxl_mem *cxlm; - struct percpu_ref ops_active; - struct completion ops_dead; int id; }; static int cxl_mem_major; static DEFINE_IDA(cxl_memdev_ida); +static DECLARE_RWSEM(cxl_memdev_rwsem); static struct dentry *cxl_debugfs; static bool cxl_raw_allow_all; @@ -169,7 +167,7 @@ struct cxl_mem_command { * table will be validated against the user's input. For example, if size_in is * 0, and the user passed in 1, it is an error. */ -static struct cxl_mem_command mem_commands[] = { +static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), #ifdef CONFIG_CXL_MEM_RAW_COMMANDS CXL_CMD(RAW, ~0, ~0, 0), @@ -776,26 +774,43 @@ static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct cxl_memdev *cxlmd; - struct inode *inode; - int rc = -ENOTTY; + struct cxl_memdev *cxlmd = file->private_data; + int rc = -ENXIO; - inode = file_inode(file); - cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev); + down_read(&cxl_memdev_rwsem); + if (cxlmd->cxlm) + rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); + up_read(&cxl_memdev_rwsem); - if (!percpu_ref_tryget_live(&cxlmd->ops_active)) - return -ENXIO; + return rc; +} - rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); +static int cxl_memdev_open(struct inode *inode, struct file *file) +{ + struct cxl_memdev *cxlmd = + container_of(inode->i_cdev, typeof(*cxlmd), cdev); - percpu_ref_put(&cxlmd->ops_active); + get_device(&cxlmd->dev); + file->private_data = cxlmd; - return rc; + return 0; +} + +static int cxl_memdev_release_file(struct inode *inode, struct file *file) +{ + struct cxl_memdev *cxlmd = + container_of(inode->i_cdev, typeof(*cxlmd), cdev); + + put_device(&cxlmd->dev); + + return 0; } static const struct file_operations cxl_memdev_fops = { .owner = THIS_MODULE, .unlocked_ioctl = cxl_memdev_ioctl, + .open = cxl_memdev_open, + .release = cxl_memdev_release_file, .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; @@ -984,7 +999,7 @@ static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo, return NULL; } - offset = ((u64)reg_hi << 32) | FIELD_GET(CXL_REGLOC_ADDR_MASK, reg_lo); + offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); /* Basic sanity check that BAR is big enough */ @@ -1049,7 +1064,6 @@ static void cxl_memdev_release(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - percpu_ref_exit(&cxlmd->ops_active); ida_free(&cxl_memdev_ida, cxlmd->id); kfree(cxlmd); } @@ -1066,7 +1080,7 @@ static ssize_t firmware_version_show(struct device *dev, struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_mem *cxlm = cxlmd->cxlm; - return sprintf(buf, "%.16s\n", cxlm->firmware_version); + return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version); } static DEVICE_ATTR_RO(firmware_version); @@ -1076,7 +1090,7 @@ static ssize_t payload_max_show(struct device *dev, struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_mem *cxlm = cxlmd->cxlm; - return sprintf(buf, "%zu\n", cxlm->payload_size); + return sysfs_emit(buf, "%zu\n", cxlm->payload_size); } static DEVICE_ATTR_RO(payload_max); @@ -1087,7 +1101,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, struct cxl_mem *cxlm = cxlmd->cxlm; unsigned long long len = range_len(&cxlm->ram_range); - return sprintf(buf, "%#llx\n", len); + return sysfs_emit(buf, "%#llx\n", len); } static struct device_attribute dev_attr_ram_size = @@ -1100,7 +1114,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, struct cxl_mem *cxlm = cxlmd->cxlm; unsigned long long len = range_len(&cxlm->pmem_range); - return sprintf(buf, "%#llx\n", len); + return sysfs_emit(buf, "%#llx\n", len); } static struct device_attribute dev_attr_pmem_size = @@ -1150,27 +1164,24 @@ static const struct device_type cxl_memdev_type = { .groups = cxl_memdev_attribute_groups, }; -static void cxlmdev_unregister(void *_cxlmd) +static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd) { - struct cxl_memdev *cxlmd = _cxlmd; - struct device *dev = &cxlmd->dev; - - percpu_ref_kill(&cxlmd->ops_active); - cdev_device_del(&cxlmd->cdev, dev); - wait_for_completion(&cxlmd->ops_dead); + down_write(&cxl_memdev_rwsem); cxlmd->cxlm = NULL; - put_device(dev); + up_write(&cxl_memdev_rwsem); } -static void cxlmdev_ops_active_release(struct percpu_ref *ref) +static void cxl_memdev_unregister(void *_cxlmd) { - struct cxl_memdev *cxlmd = - container_of(ref, typeof(*cxlmd), ops_active); + struct cxl_memdev *cxlmd = _cxlmd; + struct device *dev = &cxlmd->dev; - complete(&cxlmd->ops_dead); + cdev_device_del(&cxlmd->cdev, dev); + cxl_memdev_shutdown(cxlmd); + put_device(dev); } -static int cxl_mem_add_memdev(struct cxl_mem *cxlm) +static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm) { struct pci_dev *pdev = cxlm->pdev; struct cxl_memdev *cxlmd; @@ -1180,22 +1191,11 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm) cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL); if (!cxlmd) - return -ENOMEM; - init_completion(&cxlmd->ops_dead); - - /* - * @cxlm is deallocated when the driver unbinds so operations - * that are using it need to hold a live reference. - */ - cxlmd->cxlm = cxlm; - rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0, - GFP_KERNEL); - if (rc) - goto err_ref; + return ERR_PTR(-ENOMEM); rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL); if (rc < 0) - goto err_id; + goto err; cxlmd->id = rc; dev = &cxlmd->dev; @@ -1204,30 +1204,54 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm) dev->bus = &cxl_bus_type; dev->devt = MKDEV(cxl_mem_major, cxlmd->id); dev->type = &cxl_memdev_type; - dev_set_name(dev, "mem%d", cxlmd->id); + device_set_pm_not_required(dev); cdev = &cxlmd->cdev; cdev_init(cdev, &cxl_memdev_fops); + return cxlmd; + +err: + kfree(cxlmd); + return ERR_PTR(rc); +} + +static int cxl_mem_add_memdev(struct cxl_mem *cxlm) +{ + struct cxl_memdev *cxlmd; + struct device *dev; + struct cdev *cdev; + int rc; + + cxlmd = cxl_memdev_alloc(cxlm); + if (IS_ERR(cxlmd)) + return PTR_ERR(cxlmd); + + dev = &cxlmd->dev; + rc = dev_set_name(dev, "mem%d", cxlmd->id); + if (rc) + goto err; + + /* + * Activate ioctl operations, no cxl_memdev_rwsem manipulation + * needed as this is ordered with cdev_add() publishing the device. + */ + cxlmd->cxlm = cxlm; + cdev = &cxlmd->cdev; rc = cdev_device_add(cdev, dev); if (rc) - goto err_add; + goto err; - return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd); + return devm_add_action_or_reset(dev->parent, cxl_memdev_unregister, + cxlmd); -err_add: - ida_free(&cxl_memdev_ida, cxlmd->id); -err_id: +err: /* - * Theoretically userspace could have already entered the fops, - * so flush ops_active. + * The cdev was briefly live, shutdown any ioctl operations that + * saw that state. */ - percpu_ref_kill(&cxlmd->ops_active); - wait_for_completion(&cxlmd->ops_dead); - percpu_ref_exit(&cxlmd->ops_active); -err_ref: - kfree(cxlmd); - + cxl_memdev_shutdown(cxlmd); + put_device(dev); return rc; } @@ -1396,6 +1420,7 @@ out: */ static int cxl_mem_identify(struct cxl_mem *cxlm) { + /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ struct cxl_mbox_identify { char fw_revision[0x10]; __le64 total_capacity; @@ -1424,10 +1449,11 @@ static int cxl_mem_identify(struct cxl_mem *cxlm) * For now, only the capacity is exported in sysfs */ cxlm->ram_range.start = 0; - cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) - 1; + cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1; cxlm->pmem_range.start = 0; - cxlm->pmem_range.end = le64_to_cpu(id.persistent_capacity) - 1; + cxlm->pmem_range.end = + le64_to_cpu(id.persistent_capacity) * SZ_256M - 1; memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index 452e85ae87a8..5aee26e1bbd6 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -90,13 +90,11 @@ static ssize_t do_id_store(struct device_driver *drv, const char *buf, list_add(&dax_id->list, &dax_drv->ids); } else rc = -ENOMEM; - } else - /* nothing to remove */; + } } else if (action == ID_REMOVE) { list_del(&dax_id->list); kfree(dax_id); - } else - /* dax_id already added */; + } mutex_unlock(&dax_bus_lock); if (rc < 0) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index fe6a460c4373..af3ee288bc11 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device, kfree(chan->dev); err_free_local: free_percpu(chan->local); + chan->local = NULL; return rc; } diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index e5162690de8f..db25f9b7778c 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -10,6 +10,7 @@ config DW_DMAC_CORE config DW_DMAC tristate "Synopsys DesignWare AHB DMA platform driver" + depends on HAS_IOMEM select DW_DMAC_CORE help Support the Synopsys DesignWare AHB DMA controller. This @@ -18,6 +19,7 @@ config DW_DMAC config DW_DMAC_PCI tristate "Synopsys DesignWare AHB DMA PCI driver" depends on PCI + depends on HAS_IOMEM select DW_DMAC_CORE help Support the Synopsys DesignWare AHB DMA controller on the diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 84a6ea60ecf0..31c819544a22 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -282,6 +282,22 @@ void idxd_wq_drain(struct idxd_wq *wq) idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL); } +void idxd_wq_reset(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 operand; + + if (wq->state != IDXD_WQ_ENABLED) { + dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); + return; + } + + operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); + idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL); + wq->state = IDXD_WQ_DISABLED; +} + int idxd_wq_map_portal(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; @@ -363,8 +379,6 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq) void idxd_wq_disable_cleanup(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; - struct device *dev = &idxd->pdev->dev; - int i, wq_offset; lockdep_assert_held(&idxd->dev_lock); memset(wq->wqcfg, 0, idxd->wqcfg_size); @@ -376,14 +390,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq) wq->ats_dis = 0; clear_bit(WQ_FLAG_DEDICATED, &wq->flags); memset(wq->name, 0, WQ_NAME_SIZE); - - for (i = 0; i < WQCFG_STRIDES(idxd); i++) { - wq_offset = WQCFG_OFFSET(idxd, wq->id, i); - iowrite32(0, idxd->reg_base + wq_offset); - dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", - wq->id, i, wq_offset, - ioread32(idxd->reg_base + wq_offset)); - } } /* Device control bits */ @@ -574,6 +580,36 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid) } /* Device configuration bits */ +void idxd_msix_perm_setup(struct idxd_device *idxd) +{ + union msix_perm mperm; + int i, msixcnt; + + msixcnt = pci_msix_vec_count(idxd->pdev); + if (msixcnt < 0) + return; + + mperm.bits = 0; + mperm.pasid = idxd->pasid; + mperm.pasid_en = device_pasid_enabled(idxd); + for (i = 1; i < msixcnt; i++) + iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8); +} + +void idxd_msix_perm_clear(struct idxd_device *idxd) +{ + union msix_perm mperm; + int i, msixcnt; + + msixcnt = pci_msix_vec_count(idxd->pdev); + if (msixcnt < 0) + return; + + mperm.bits = 0; + for (i = 1; i < msixcnt; i++) + iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8); +} + static void idxd_group_config_write(struct idxd_group *group) { struct idxd_device *idxd = group->idxd; @@ -642,7 +678,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq) if (!wq->group) return 0; - memset(wq->wqcfg, 0, idxd->wqcfg_size); + /* + * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after + * wq reset. This will copy back the sticky values that are present on some devices. + */ + for (i = 0; i < WQCFG_STRIDES(idxd); i++) { + wq_offset = WQCFG_OFFSET(idxd, wq->id, i); + wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset); + } /* byte 0-3 */ wq->wqcfg->wq_size = wq->size; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 81a0e65fd316..76014c14f473 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -316,6 +316,8 @@ void idxd_unregister_driver(void); struct bus_type *idxd_get_bus_type(struct idxd_device *idxd); /* device interrupt control */ +void idxd_msix_perm_setup(struct idxd_device *idxd); +void idxd_msix_perm_clear(struct idxd_device *idxd); irqreturn_t idxd_irq_handler(int vec, void *data); irqreturn_t idxd_misc_thread(int vec, void *data); irqreturn_t idxd_wq_thread(int irq, void *data); @@ -341,6 +343,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq); int idxd_wq_enable(struct idxd_wq *wq); int idxd_wq_disable(struct idxd_wq *wq); void idxd_wq_drain(struct idxd_wq *wq); +void idxd_wq_reset(struct idxd_wq *wq); int idxd_wq_map_portal(struct idxd_wq *wq); void idxd_wq_unmap_portal(struct idxd_wq *wq); void idxd_wq_disable_cleanup(struct idxd_wq *wq); diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 085a0c3b62c6..6584b0ec07d5 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -65,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) struct idxd_irq_entry *irq_entry; int i, msixcnt; int rc = 0; - union msix_perm mperm; msixcnt = pci_msix_vec_count(pdev); if (msixcnt < 0) { @@ -144,14 +143,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) } idxd_unmask_error_interrupts(idxd); - - /* Setup MSIX permission table */ - mperm.bits = 0; - mperm.pasid = idxd->pasid; - mperm.pasid_en = device_pasid_enabled(idxd); - for (i = 1; i < msixcnt; i++) - iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8); - + idxd_msix_perm_setup(idxd); return 0; err_no_irq: @@ -510,6 +502,7 @@ static void idxd_shutdown(struct pci_dev *pdev) idxd_flush_work_list(irq_entry); } + idxd_msix_perm_clear(idxd); destroy_workqueue(idxd->wq); } diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index a60ca11a5784..f1463fc58112 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -124,7 +124,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) for (i = 0; i < 4; i++) idxd->sw_err.bits[i] = ioread64(idxd->reg_base + IDXD_SWERR_OFFSET + i * sizeof(u64)); - iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET); + + iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK, + idxd->reg_base + IDXD_SWERR_OFFSET); if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { int id = idxd->sw_err.wq_idx; diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 4dbb03c545e4..18bf4d148989 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -275,7 +275,6 @@ static void disable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; - int rc; mutex_lock(&wq->wq_lock); dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); @@ -296,17 +295,13 @@ static void disable_wq(struct idxd_wq *wq) idxd_wq_unmap_portal(wq); idxd_wq_drain(wq); - rc = idxd_wq_disable(wq); + idxd_wq_reset(wq); idxd_wq_free_resources(wq); wq->client_count = 0; mutex_unlock(&wq->wq_lock); - if (rc < 0) - dev_warn(dev, "Failed to disable %s: %d\n", - dev_name(&wq->conf_dev), rc); - else - dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); + dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); } static int idxd_config_bus_remove(struct device *dev) @@ -989,7 +984,7 @@ static ssize_t wq_size_store(struct device *dev, if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; - if (wq->state != IDXD_WQ_DISABLED) + if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) @@ -1449,8 +1444,14 @@ static ssize_t op_cap_show(struct device *dev, { struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); + int i, rc = 0; + + for (i = 0; i < 4; i++) + rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]); - return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]); + rc--; + rc += sysfs_emit_at(buf, rc, "\n"); + return rc; } static DEVICE_ATTR_RO(op_cap); diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c index f387c5bbc170..166934544161 100644 --- a/drivers/dma/plx_dma.c +++ b/drivers/dma/plx_dma.c @@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev) rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0, KBUILD_MODNAME, plxdev); - if (rc) { - kfree(plxdev); - return rc; - } + if (rc) + goto free_plx; spin_lock_init(&plxdev->ring_lock); tasklet_setup(&plxdev->desc_task, plx_dma_desc_task); @@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev) rc = dma_async_device_register(dma); if (rc) { pci_err(pdev, "Failed to register dma device: %d\n", rc); - free_irq(pci_irq_vector(pdev, 0), plxdev); - kfree(plxdev); - return rc; + goto put_device; } pci_set_drvdata(pdev, plxdev); return 0; + +put_device: + put_device(&pdev->dev); + free_irq(pci_irq_vector(pdev, 0), plxdev); +free_plx: + kfree(plxdev); + + return rc; } static int plx_dma_probe(struct pci_dev *pdev, diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 71827d9b0aa1..b7260749e8ee 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc) goto end; } if (!tdc->busy) { - err = pm_runtime_get_sync(tdc->tdma->dev); + err = pm_runtime_resume_and_get(tdc->tdma->dev); if (err < 0) { dev_err(tdc2dev(tdc), "Failed to enable DMA\n"); goto end; @@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc) struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); int err; - err = pm_runtime_get_sync(tdc->tdma->dev); + err = pm_runtime_resume_and_get(tdc->tdma->dev); if (err < 0) { dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err); return; diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c index 55df63dead8d..70b29bd079c9 100644 --- a/drivers/dma/xilinx/xilinx_dpdma.c +++ b/drivers/dma/xilinx/xilinx_dpdma.c @@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) struct xilinx_dpdma_tx_desc *desc; struct virt_dma_desc *vdesc; u32 reg, channels; + bool first_frame; lockdep_assert_held(&chan->lock); @@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) chan->running = true; } - if (chan->video_group) - channels = xilinx_dpdma_chan_video_group_ready(chan); - else - channels = BIT(chan->id); - - if (!channels) - return; - vdesc = vchan_next_desc(&chan->vchan); if (!vdesc) return; @@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK, upper_32_bits(sw_desc->dma_addr))); - if (chan->first_frame) + first_frame = chan->first_frame; + chan->first_frame = false; + + if (chan->video_group) { + channels = xilinx_dpdma_chan_video_group_ready(chan); + /* + * Trigger the transfer only when all channels in the group are + * ready. + */ + if (!channels) + return; + } else { + channels = BIT(chan->id); + } + + if (first_frame) reg = XILINX_DPDMA_GBL_TRIG_MASK(channels); else reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels); - chan->first_frame = false; - dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg); } @@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan) */ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) { - struct xilinx_dpdma_tx_desc *active = chan->desc.active; + struct xilinx_dpdma_tx_desc *active; unsigned long flags; spin_lock_irqsave(&chan->lock, flags); xilinx_dpdma_debugfs_desc_done_irq(chan); + active = chan->desc.active; if (active) vchan_cyclic_callback(&active->vdesc); else diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 50bb2a6d6ccf..62f0d1a5dd32 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -2,7 +2,7 @@ /* * Turris Mox rWTM firmware driver * - * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> + * Copyright (C) 2019 Marek BehĂșn <kabel@kernel.org> */ #include <linux/armada-37xx-rwtm-mailbox.h> @@ -547,4 +547,4 @@ module_platform_driver(turris_mox_rwtm_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Turris Mox rWTM firmware driver"); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c index 8299909318f4..61f9efd6c64f 100644 --- a/drivers/gpio/gpio-moxtet.c +++ b/drivers/gpio/gpio-moxtet.c @@ -2,7 +2,7 @@ /* * Turris Mox Moxtet GPIO expander * - * Copyright (C) 2018 Marek Behun <marek.behun@nic.cz> + * Copyright (C) 2018 Marek BehĂșn <kabel@kernel.org> */ #include <linux/bitops.h> @@ -174,6 +174,6 @@ static struct moxtet_driver moxtet_gpio_driver = { }; module_moxtet_driver(moxtet_gpio_driver); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); MODULE_DESCRIPTION("Turris Mox Moxtet GPIO expander"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 26c5466b8179..ae49bb23c6ed 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class, long gpio; struct gpio_desc *desc; int status; + struct gpio_chip *gc; + int offset; status = kstrtol(buf, 0, &gpio); if (status < 0) @@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class, pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); return -EINVAL; } + gc = desc->gdev->chip; + offset = gpio_chip_hwgpio(desc); + if (!gpiochip_line_is_valid(gc, offset)) { + pr_warn("%s: GPIO %ld masked\n", __func__, gpio); + return -EINVAL; + } /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 651884390137..4f8337c7fd2e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -646,7 +646,6 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) break; case INTEL_BACKLIGHT_DISPLAY_DDI: try_intel_interface = true; - try_vesa_interface = true; break; default: return -ENODEV; diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index f94025ec603a..a9a8ba1d3aba 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -992,14 +992,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, * FIXME As we do with eDP, just make a note of the time here * and perform the wait before the next panel power on. */ - intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay); + msleep(intel_dsi->panel_pwr_cycle_delay); } static void intel_dsi_shutdown(struct intel_encoder *encoder) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay); + msleep(intel_dsi->panel_pwr_cycle_delay); } static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 97b57acc02e2..4b4d8d034782 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5471,12 +5471,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; int ret; - memset(wm, 0, sizeof(*wm)); - /* Watermarks calculated in master */ if (plane_state->planar_slave) return 0; + memset(wm, 0, sizeof(*wm)); + if (plane_state->planar_linked_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; enum plane_id y_plane_id = plane_state->planar_linked_plane->id; diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c index dbac16641662..ddecc84fd6f0 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c @@ -10,6 +10,7 @@ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/dma-mapping.h> +#include <linux/dmi.h> #include <linux/interrupt.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/module.h> @@ -22,9 +23,13 @@ #define ACEL_EN BIT(0) #define GYRO_EN BIT(1) -#define MAGNO_EN BIT(2) +#define MAGNO_EN BIT(2) #define ALS_EN BIT(19) +static int sensor_mask_override = -1; +module_param_named(sensor_mask, sensor_mask_override, int, 0444); +MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask"); + void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info) { union sfh_cmd_param cmd_param; @@ -73,12 +78,41 @@ void amd_stop_all_sensors(struct amd_mp2_dev *privdata) writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0); } +static const struct dmi_system_id dmi_sensor_mask_overrides[] = { + { + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 13-ag0xxx"), + }, + .driver_data = (void *)(ACEL_EN | MAGNO_EN), + }, + { + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 15-cp0xxx"), + }, + .driver_data = (void *)(ACEL_EN | MAGNO_EN), + }, + { } +}; + int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id) { int activestatus, num_of_sensors = 0; + const struct dmi_system_id *dmi_id; + u32 activecontrolstatus; + + if (sensor_mask_override == -1) { + dmi_id = dmi_first_match(dmi_sensor_mask_overrides); + if (dmi_id) + sensor_mask_override = (long)dmi_id->driver_data; + } + + if (sensor_mask_override >= 0) { + activestatus = sensor_mask_override; + } else { + activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3); + activestatus = activecontrolstatus >> 4; + } - privdata->activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3); - activestatus = privdata->activecontrolstatus >> 4; if (ACEL_EN & activestatus) sensor_id[num_of_sensors++] = accel_idx; diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h index 8f8d19b2cfe5..489415f7c22c 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h @@ -61,7 +61,6 @@ struct amd_mp2_dev { struct pci_dev *pdev; struct amdtp_cl_data *cl_data; void __iomem *mmio; - u32 activecontrolstatus; }; struct amd_mp2_sensor_info { diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index 3feaece13ade..6b665931147d 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) if (input_register_device(data->input2)) { input_free_device(input2); + ret = -ENOENT; goto exit; } } diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 1dfe184ebf5a..2ab22b925941 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -1222,6 +1222,9 @@ static const struct hid_device_id asus_devices[] = { USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD), QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, + USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2), + QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD), QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 21e15627a461..477baa30889c 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -161,6 +161,7 @@ struct cp2112_device { atomic_t read_avail; atomic_t xfer_avail; struct gpio_chip gc; + struct irq_chip irq; u8 *in_out_buffer; struct mutex lock; @@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type) return 0; } -static struct irq_chip cp2112_gpio_irqchip = { - .name = "cp2112-gpio", - .irq_startup = cp2112_gpio_irq_startup, - .irq_shutdown = cp2112_gpio_irq_shutdown, - .irq_ack = cp2112_gpio_irq_ack, - .irq_mask = cp2112_gpio_irq_mask, - .irq_unmask = cp2112_gpio_irq_unmask, - .irq_set_type = cp2112_gpio_irq_type, -}; - static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev, int pin) { @@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) dev->gc.can_sleep = 1; dev->gc.parent = &hdev->dev; + dev->irq.name = "cp2112-gpio"; + dev->irq.irq_startup = cp2112_gpio_irq_startup; + dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown; + dev->irq.irq_ack = cp2112_gpio_irq_ack; + dev->irq.irq_mask = cp2112_gpio_irq_mask; + dev->irq.irq_unmask = cp2112_gpio_irq_unmask; + dev->irq.irq_set_type = cp2112_gpio_irq_type; + dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND; + girq = &dev->gc.irq; - girq->chip = &cp2112_gpio_irqchip; + girq->chip = &dev->irq; /* The event comes from the outside so no parent handler */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index d9319622da44..e60c31dd05ff 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -574,6 +574,8 @@ static void hammer_remove(struct hid_device *hdev) static const struct hid_device_id hammer_devices[] = { { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index e42aaae3138f..67fd8a2f5aba 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -194,6 +194,7 @@ #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822 #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866 +#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6 #define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869 #define USB_VENDOR_ID_ATEN 0x0557 @@ -493,6 +494,7 @@ #define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 +#define USB_DEVICE_ID_GOOGLE_DON 0x5050 #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 44d715c12f6a..2d70dc4bea65 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, !wacom_wac->shared->is_touch_on) { if (!wacom_wac->shared->touch_down) return; - prox = 0; + prox = false; } wacom_wac->hid_data.num_received++; @@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, { struct wacom_features *features = &wacom_wac->features; - input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); - if (!(features->device_type & WACOM_DEVICETYPE_PEN)) return -ENODEV; @@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, return 0; } + input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(ABS_MISC, input_dev->absbit); @@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, { struct wacom_features *features = &wacom_wac->features; - input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); - if (!(features->device_type & WACOM_DEVICETYPE_TOUCH)) return -ENODEV; @@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, /* setup has already been done */ return 0; + input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); if (features->touch_max == 1) { diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index dd27b9dbe931..873ef38eb1c8 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -129,6 +129,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK) != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) { dev_err(dev->dev, "High Speed not supported!\n"); + t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; dev->master_cfg &= ~DW_IC_CON_SPEED_MASK; dev->master_cfg |= DW_IC_CON_SPEED_FAST; dev->hs_hcnt = 0; diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index 5ac30d95650c..97d4f3ac0abd 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index c45f226c2b85..aa00ba8bcb70 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014 Linaro Ltd. - * Copyright (c) 2014 Hisilicon Limited. + * Copyright (c) 2014 HiSilicon Limited. * * Now only support 7 bit address. */ diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index 8509c5f11356..55177eb21d7b 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -525,8 +525,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id) i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA); data = *i2c->wbuf; data &= ~JZ4780_I2C_DC_READ; - if ((!i2c->stop_hold) && (i2c->cdata->version >= - ID_X1000)) + if ((i2c->wt_len == 1) && (!i2c->stop_hold) && + (i2c->cdata->version >= ID_X1000)) data |= X1000_I2C_DC_STOP; jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data); i2c->wbuf++; diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c index 937c2c8fd349..4933fc8ce3fd 100644 --- a/drivers/i2c/busses/i2c-stm32f4.c +++ b/drivers/i2c/busses/i2c-stm32f4.c @@ -534,7 +534,7 @@ static void stm32f4_i2c_handle_rx_addr(struct stm32f4_i2c_dev *i2c_dev) default: /* * N-byte reception: - * Enable ACK, reset POS (ACK postion) and clear ADDR flag. + * Enable ACK, reset POS (ACK position) and clear ADDR flag. * In that way, ACK will be sent as soon as the current byte * will be received in the shift register */ diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 63ebf722a424..f21362355973 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -378,7 +378,7 @@ static int i2c_gpio_init_recovery(struct i2c_adapter *adap) static int i2c_init_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; - char *err_str; + char *err_str, *err_level = KERN_ERR; if (!bri) return 0; @@ -387,7 +387,8 @@ static int i2c_init_recovery(struct i2c_adapter *adap) return -EPROBE_DEFER; if (!bri->recover_bus) { - err_str = "no recover_bus() found"; + err_str = "no suitable method provided"; + err_level = KERN_DEBUG; goto err; } @@ -414,7 +415,7 @@ static int i2c_init_recovery(struct i2c_adapter *adap) return 0; err: - dev_err(&adap->dev, "Not using recovery: %s\n", err_str); + dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str); adap->bus_recovery_info = NULL; return -EINVAL; diff --git a/drivers/input/joystick/n64joy.c b/drivers/input/joystick/n64joy.c index 8bcc529942bc..9dbca366613e 100644 --- a/drivers/input/joystick/n64joy.c +++ b/drivers/input/joystick/n64joy.c @@ -252,8 +252,8 @@ static int __init n64joy_probe(struct platform_device *pdev) mutex_init(&priv->n64joy_mutex); priv->reg_base = devm_platform_ioremap_resource(pdev, 0); - if (!priv->reg_base) { - err = -EINVAL; + if (IS_ERR(priv->reg_base)) { + err = PTR_ERR(priv->reg_base); goto fail; } diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c index 63d5e488137d..e9fa1423f136 100644 --- a/drivers/input/keyboard/nspire-keypad.c +++ b/drivers/input/keyboard/nspire-keypad.c @@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static int nspire_keypad_chip_init(struct nspire_keypad *keypad) +static int nspire_keypad_open(struct input_dev *input) { + struct nspire_keypad *keypad = input_get_drvdata(input); unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles; + int error; + + error = clk_prepare_enable(keypad->clk); + if (error) + return error; cycles_per_us = (clk_get_rate(keypad->clk) / 1000000); if (cycles_per_us == 0) @@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad) keypad->int_mask = 1 << 1; writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK); - /* Disable GPIO interrupts to prevent hanging on touchpad */ - /* Possibly used to detect touchpad events */ - writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); - /* Acknowledge existing interrupts */ - writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); - - return 0; -} - -static int nspire_keypad_open(struct input_dev *input) -{ - struct nspire_keypad *keypad = input_get_drvdata(input); - int error; - - error = clk_prepare_enable(keypad->clk); - if (error) - return error; - - error = nspire_keypad_chip_init(keypad); - if (error) { - clk_disable_unprepare(keypad->clk); - return error; - } - return 0; } @@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input) { struct nspire_keypad *keypad = input_get_drvdata(input); + /* Disable interrupts */ + writel(0, keypad->reg_base + KEYPAD_INTMSK); + /* Acknowledge existing interrupts */ + writel(~0, keypad->reg_base + KEYPAD_INT); + clk_disable_unprepare(keypad->clk); } @@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev) return -ENOMEM; } + error = clk_prepare_enable(keypad->clk); + if (error) { + dev_err(&pdev->dev, "failed to enable clock\n"); + return error; + } + + /* Disable interrupts */ + writel(0, keypad->reg_base + KEYPAD_INTMSK); + /* Acknowledge existing interrupts */ + writel(~0, keypad->reg_base + KEYPAD_INT); + + /* Disable GPIO interrupts to prevent hanging on touchpad */ + /* Possibly used to detect touchpad events */ + writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); + /* Acknowledge existing GPIO interrupts */ + writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); + + clk_disable_unprepare(keypad->clk); + input_set_drvdata(input, keypad); input->id.bustype = BUS_HOST; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 9119e12a5778..a5a003553646 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */ diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 4c2b579f6c8b..5f7706febcb0 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -1441,7 +1441,7 @@ static int elants_i2c_probe(struct i2c_client *client, touchscreen_parse_properties(ts->input, true, &ts->prop); - if (ts->chip_id == EKTF3624) { + if (ts->chip_id == EKTF3624 && ts->phy_x && ts->phy_y) { /* calculate resolution from size */ ts->x_res = DIV_ROUND_CLOSEST(ts->prop.max_x, ts->phy_x); ts->y_res = DIV_ROUND_CLOSEST(ts->prop.max_y, ts->phy_y); @@ -1449,8 +1449,7 @@ static int elants_i2c_probe(struct i2c_client *client, input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res); input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res); - if (ts->major_res > 0) - input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res); + input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res); error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c index b63d7fdf0cd2..85a1f465c097 100644 --- a/drivers/input/touchscreen/s6sy761.c +++ b/drivers/input/touchscreen/s6sy761.c @@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata, u8 major = event[4]; u8 minor = event[5]; u8 z = event[6] & S6SY761_MASK_Z; - u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4); - u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y); + u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4); + u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y); input_mt_slot(sdata->input, tid); diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c index 7b2f4d0ae3fe..2f9a289ab245 100644 --- a/drivers/leds/leds-turris-omnia.c +++ b/drivers/leds/leds-turris-omnia.c @@ -2,7 +2,7 @@ /* * CZ.NIC's Turris Omnia LEDs driver * - * 2020 by Marek Behun <marek.behun@nic.cz> + * 2020 by Marek BehĂșn <kabel@kernel.org> */ #include <linux/i2c.h> @@ -287,6 +287,6 @@ static struct i2c_driver omnia_leds_driver = { module_i2c_driver(omnia_leds_driver); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); MODULE_DESCRIPTION("CZ.NIC's Turris Omnia LEDs"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c index 9f2ce7f03c67..456a117a65fd 100644 --- a/drivers/mailbox/armada-37xx-rwtm-mailbox.c +++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c @@ -2,7 +2,7 @@ /* * rWTM BIU Mailbox driver for Armada 37xx * - * Author: Marek Behun <marek.behun@nic.cz> + * Author: Marek BehĂșn <kabel@kernel.org> */ #include <linux/device.h> @@ -203,4 +203,4 @@ module_platform_driver(armada_37xx_mbox_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx"); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 66f4c6398f67..cea2b3789736 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index, u8 *res; position = (index + rsb) * v->fec->roots; - block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem); + block = div64_u64_rem(position, v->fec->io_size, &rem); *offset = (unsigned)rem; res = dm_bufio_read(v->fec->bufio, block, buf); @@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, /* read the next block when we run out of parity bytes */ offset += v->fec->roots; - if (offset >= v->fec->roots << SECTOR_SHIFT) { + if (offset >= v->fec->io_size) { dm_bufio_release(buf); par = fec_read_parity(v, rsb, block_offset, &offset, &buf); @@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v) return -E2BIG; } + if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1)) + f->io_size = 1 << v->data_dev_block_bits; + else + f->io_size = v->fec->roots << SECTOR_SHIFT; + f->bufio = dm_bufio_client_create(f->dev->bdev, - f->roots << SECTOR_SHIFT, + f->io_size, 1, 0, NULL, NULL); if (IS_ERR(f->bufio)) { ti->error = "Cannot initialize FEC bufio client"; diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h index 42fbd3a7fc9f..3c46c8d61883 100644 --- a/drivers/md/dm-verity-fec.h +++ b/drivers/md/dm-verity-fec.h @@ -36,6 +36,7 @@ struct dm_verity_fec { struct dm_dev *dev; /* parity data device */ struct dm_bufio_client *data_bufio; /* for data dev access */ struct dm_bufio_client *bufio; /* for parity data access */ + size_t io_size; /* IO size for roots */ sector_t start; /* parity data start in blocks */ sector_t blocks; /* number of blocks covered */ sector_t rounds; /* number of interleaving rounds */ diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 57f1f1708994..5c5c92132287 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip, return 0; case NAND_OP_WAITRDY_INSTR: return readl_poll_timeout(nfc->regs + NFI_STA, status, - status & STA_BUSY, 20, - instr->ctx.waitrdy.timeout_ms); + !(status & STA_BUSY), 20, + instr->ctx.waitrdy.timeout_ms * 1000); default: break; } diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5895905b6aa1..74dc8e249faa 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -502,6 +502,8 @@ source "drivers/net/wan/Kconfig" source "drivers/net/ieee802154/Kconfig" +source "drivers/net/wwan/Kconfig" + config XEN_NETDEV_FRONTEND tristate "Xen network device frontend driver" depends on XEN diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 040e20b81317..7ffd2d03efaf 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -68,6 +68,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o obj-$(CONFIG_WAN) += wan/ obj-$(CONFIG_WLAN) += wireless/ obj-$(CONFIG_IEEE802154) += ieee802154/ +obj-$(CONFIG_WWAN) += wwan/ obj-$(CONFIG_VMXNET3) += vmxnet3/ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 890c86e11bcc..df79e7370bcc 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -59,9 +59,6 @@ static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe) * look for EISA/PCI cards in addition to ISA cards). */ static struct devprobe2 isa_probes[] __initdata = { -#if defined(CONFIG_HP100) && defined(CONFIG_ISA) /* ISA, EISA */ - {hp100_probe, 0}, -#endif #ifdef CONFIG_3C515 {tc515_probe, 0}, #endif diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index 538f4d9adb91..3deb9f1cd292 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -20,6 +20,16 @@ config CAN_ESD_USB2 This driver supports the CAN-USB/2 interface from esd electronic system design gmbh (http://www.esd.eu). +config CAN_ETAS_ES58X + tristate "ETAS ES58X CAN/USB interfaces" + select CRC16 + help + This driver supports the ES581.4, ES582.1 and ES584.1 interfaces + from ETAS GmbH (https://www.etas.com/en/products/es58x.php). + + To compile this driver as a module, choose M here: the module + will be called etas_es58x. + config CAN_GS_USB tristate "Geschwister Schneider UG interfaces" help diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index aa0f17c0b2ed..748cf31a0d53 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o +obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x/ obj-$(CONFIG_CAN_GS_USB) += gs_usb.o obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/ obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o diff --git a/drivers/net/can/usb/etas_es58x/Makefile b/drivers/net/can/usb/etas_es58x/Makefile new file mode 100644 index 000000000000..a129b4aa0215 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x.o +etas_es58x-y = es58x_core.o es581_4.o es58x_fd.o diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c new file mode 100644 index 000000000000..1985f772fc3c --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es581_4.c @@ -0,0 +1,507 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es581_4.c: Adds support to ETAS ES581.4. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/kernel.h> +#include <asm/unaligned.h> + +#include "es58x_core.h" +#include "es581_4.h" + +/** + * es581_4_sizeof_rx_tx_msg() - Calculate the actual length of the + * structure of a rx or tx message. + * @msg: message of variable length, must have a dlc field. + * + * Even if RTR frames have actually no payload, the ES58X devices + * still expect it. Must be a macro in order to accept several types + * (struct es581_4_tx_can_msg and struct es581_4_rx_can_msg) as an + * input. + * + * Return: length of the message. + */ +#define es581_4_sizeof_rx_tx_msg(msg) \ + offsetof(typeof(msg), data[can_cc_dlc2len((msg).dlc)]) + +static u16 es581_4_get_msg_len(const union es58x_urb_cmd *urb_cmd) +{ + return get_unaligned_le16(&urb_cmd->es581_4_urb_cmd.msg_len); +} + +static int es581_4_echo_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + struct net_device *netdev; + const struct es581_4_bulk_echo_msg *bulk_echo_msg; + const struct es581_4_echo_msg *echo_msg; + u64 *tstamps = es58x_dev->timestamps; + u16 msg_len; + u32 first_packet_idx, packet_idx; + unsigned int dropped = 0; + int i, num_element, ret; + + bulk_echo_msg = &es581_4_urb_cmd->bulk_echo_msg; + msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len) - + sizeof(bulk_echo_msg->channel_no); + num_element = es58x_msg_num_element(es58x_dev->dev, + bulk_echo_msg->echo_msg, msg_len); + if (num_element <= 0) + return num_element; + + ret = es58x_get_netdev(es58x_dev, bulk_echo_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + echo_msg = &bulk_echo_msg->echo_msg[0]; + first_packet_idx = get_unaligned_le32(&echo_msg->packet_idx); + packet_idx = first_packet_idx; + for (i = 0; i < num_element; i++) { + u32 tmp_idx; + + echo_msg = &bulk_echo_msg->echo_msg[i]; + tmp_idx = get_unaligned_le32(&echo_msg->packet_idx); + if (tmp_idx == packet_idx - 1) { + if (net_ratelimit()) + netdev_warn(netdev, + "Received echo packet idx %u twice\n", + packet_idx - 1); + dropped++; + continue; + } + if (tmp_idx != packet_idx) { + netdev_err(netdev, "Echo packet idx jumped from %u to %u\n", + packet_idx - 1, echo_msg->packet_idx); + return -EBADMSG; + } + + tstamps[i] = get_unaligned_le64(&echo_msg->timestamp); + packet_idx++; + } + + netdev->stats.tx_dropped += dropped; + return es58x_can_get_echo_skb(netdev, first_packet_idx, + tstamps, num_element - dropped); +} + +static int es581_4_rx_can_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd, + u16 msg_len) +{ + const struct device *dev = es58x_dev->dev; + struct net_device *netdev; + int pkts, num_element, channel_no, ret; + + num_element = es58x_msg_num_element(dev, es581_4_urb_cmd->rx_can_msg, + msg_len); + if (num_element <= 0) + return num_element; + + channel_no = es581_4_urb_cmd->rx_can_msg[0].channel_no; + ret = es58x_get_netdev(es58x_dev, channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d rx packets\n", + __func__, netdev->name, num_element); + netdev->stats.rx_dropped += num_element; + return 0; + } + + for (pkts = 0; pkts < num_element; pkts++) { + const struct es581_4_rx_can_msg *rx_can_msg = + &es581_4_urb_cmd->rx_can_msg[pkts]; + u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp); + canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id); + + if (channel_no != rx_can_msg->channel_no) + return -EBADMSG; + + ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data, + can_id, rx_can_msg->flags, + rx_can_msg->dlc); + if (ret) + break; + } + + return ret; +} + +static int es581_4_rx_err_msg(struct es58x_device *es58x_dev, + const struct es581_4_rx_err_msg *rx_err_msg) +{ + struct net_device *netdev; + enum es58x_err error = get_unaligned_le32(&rx_err_msg->error); + int ret; + + ret = es58x_get_netdev(es58x_dev, rx_err_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_rx_err_msg(netdev, error, 0, + get_unaligned_le64(&rx_err_msg->timestamp)); +} + +static int es581_4_rx_event_msg(struct es58x_device *es58x_dev, + const struct es581_4_rx_event_msg *rx_event_msg) +{ + struct net_device *netdev; + enum es58x_event event = get_unaligned_le32(&rx_event_msg->event); + int ret; + + ret = es58x_get_netdev(es58x_dev, rx_event_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_rx_err_msg(netdev, 0, event, + get_unaligned_le64(&rx_event_msg->timestamp)); +} + +static int es581_4_rx_cmd_ret_u32(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd, + enum es58x_ret_type ret_type) +{ + struct net_device *netdev; + const struct es581_4_rx_cmd_ret *rx_cmd_ret; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + int ret; + + ret = es58x_check_msg_len(es58x_dev->dev, + es581_4_urb_cmd->rx_cmd_ret, msg_len); + if (ret) + return ret; + + rx_cmd_ret = &es581_4_urb_cmd->rx_cmd_ret; + + ret = es58x_get_netdev(es58x_dev, rx_cmd_ret->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_rx_cmd_ret_u32(netdev, ret_type, + get_unaligned_le32(&rx_cmd_ret->rx_cmd_ret_le32)); +} + +static int es581_4_tx_ack_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + struct net_device *netdev; + const struct es581_4_tx_ack_msg *tx_ack_msg; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + int ret; + + tx_ack_msg = &es581_4_urb_cmd->tx_ack_msg; + ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len); + if (ret) + return ret; + + if (tx_ack_msg->rx_cmd_ret_u8 != ES58X_RET_U8_OK) + return es58x_rx_cmd_ret_u8(es58x_dev->dev, + ES58X_RET_TYPE_TX_MSG, + tx_ack_msg->rx_cmd_ret_u8); + + ret = es58x_get_netdev(es58x_dev, tx_ack_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_tx_ack_msg(netdev, + get_unaligned_le16(&tx_ack_msg->tx_free_entries), + ES58X_RET_U32_OK); +} + +static int es581_4_dispatch_rx_cmd(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + const struct device *dev = es58x_dev->dev; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + enum es581_4_rx_type rx_type = es581_4_urb_cmd->rx_can_msg[0].rx_type; + int ret = 0; + + switch (rx_type) { + case ES581_4_RX_TYPE_MESSAGE: + return es581_4_rx_can_msg(es58x_dev, es581_4_urb_cmd, msg_len); + + case ES581_4_RX_TYPE_ERROR: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_err_msg, + msg_len); + if (ret < 0) + return ret; + return es581_4_rx_err_msg(es58x_dev, + &es581_4_urb_cmd->rx_err_msg); + + case ES581_4_RX_TYPE_EVENT: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_event_msg, + msg_len); + if (ret < 0) + return ret; + return es581_4_rx_event_msg(es58x_dev, + &es581_4_urb_cmd->rx_event_msg); + + default: + dev_err(dev, "%s: Unknown rx_type 0x%02X\n", __func__, rx_type); + return -EBADRQC; + } +} + +static int es581_4_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es581_4_urb_cmd *es581_4_urb_cmd; + struct device *dev = es58x_dev->dev; + u16 msg_len = es581_4_get_msg_len(urb_cmd); + int ret; + + es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + + if (es581_4_urb_cmd->cmd_type != ES581_4_CAN_COMMAND_TYPE) { + dev_err(dev, "%s: Unknown command type (0x%02X)\n", + __func__, es581_4_urb_cmd->cmd_type); + return -EBADRQC; + } + + switch ((enum es581_4_cmd_id)es581_4_urb_cmd->cmd_id) { + case ES581_4_CMD_ID_SET_BITTIMING: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_SET_BITTIMING); + + case ES581_4_CMD_ID_ENABLE_CHANNEL: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_ENABLE_CHANNEL); + + case ES581_4_CMD_ID_TX_MSG: + return es581_4_tx_ack_msg(es58x_dev, es581_4_urb_cmd); + + case ES581_4_CMD_ID_RX_MSG: + return es581_4_dispatch_rx_cmd(es58x_dev, es581_4_urb_cmd); + + case ES581_4_CMD_ID_RESET_RX: + ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_RESET_RX); + return ret; + + case ES581_4_CMD_ID_RESET_TX: + ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_RESET_TX); + return ret; + + case ES581_4_CMD_ID_DISABLE_CHANNEL: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_DISABLE_CHANNEL); + + case ES581_4_CMD_ID_TIMESTAMP: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->timestamp, + msg_len); + if (ret < 0) + return ret; + es58x_rx_timestamp(es58x_dev, + get_unaligned_le64(&es581_4_urb_cmd->timestamp)); + return 0; + + case ES581_4_CMD_ID_ECHO: + return es581_4_echo_msg(es58x_dev, es581_4_urb_cmd); + + case ES581_4_CMD_ID_DEVICE_ERR: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_cmd_ret_u8, + msg_len); + if (ret) + return ret; + return es58x_rx_cmd_ret_u8(dev, ES58X_RET_TYPE_DEVICE_ERR, + es581_4_urb_cmd->rx_cmd_ret_u8); + + default: + dev_warn(dev, "%s: Unexpected command ID: 0x%02X\n", + __func__, es581_4_urb_cmd->cmd_id); + return -EBADRQC; + } +} + +static void es581_4_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 msg_len) +{ + struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + + es581_4_urb_cmd->SOF = cpu_to_le16(es581_4_param.tx_start_of_frame); + es581_4_urb_cmd->cmd_type = cmd_type; + es581_4_urb_cmd->cmd_id = cmd_id; + es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len); +} + +static int es581_4_tx_can_msg(struct es58x_priv *priv, + const struct sk_buff *skb) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer; + struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + struct can_frame *cf = (struct can_frame *)skb->data; + struct es581_4_tx_can_msg *tx_can_msg; + u16 msg_len; + int ret; + + if (can_is_canfd_skb(skb)) + return -EMSGSIZE; + + if (priv->tx_can_msg_cnt == 0) { + msg_len = 1; /* struct es581_4_bulk_tx_can_msg:num_can_msg */ + es581_4_fill_urb_header(urb_cmd, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_TX_MSG, + priv->channel_idx, msg_len); + es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg = 0; + } else { + msg_len = es581_4_get_msg_len(urb_cmd); + } + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es581_4_urb_cmd->bulk_tx_can_msg, + msg_len + sizeof(*tx_can_msg)); + if (ret) + return ret; + + /* Fill message contents. */ + tx_can_msg = (struct es581_4_tx_can_msg *) + &es581_4_urb_cmd->bulk_tx_can_msg.tx_can_msg_buf[msg_len - 1]; + put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); + put_unaligned_le32(priv->tx_head, &tx_can_msg->packet_idx); + put_unaligned_le16((u16)es58x_get_flags(skb), &tx_can_msg->flags); + tx_can_msg->channel_no = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); + + memcpy(tx_can_msg->data, cf->data, cf->len); + + /* Calculate new sizes. */ + es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg++; + msg_len += es581_4_sizeof_rx_tx_msg(*tx_can_msg); + priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev, + msg_len); + es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len); + + return 0; +} + +static int es581_4_set_bittiming(struct es58x_priv *priv) +{ + struct es581_4_tx_conf_msg tx_conf_msg = { 0 }; + struct can_bittiming *bt = &priv->can.bittiming; + + tx_conf_msg.bitrate = cpu_to_le32(bt->bitrate); + /* bt->sample_point is in tenth of percent. Convert it to percent. */ + tx_conf_msg.sample_point = cpu_to_le32(bt->sample_point / 10U); + tx_conf_msg.samples_per_bit = cpu_to_le32(ES58X_SAMPLES_PER_BIT_ONE); + tx_conf_msg.bit_time = cpu_to_le32(can_bit_time(bt)); + tx_conf_msg.sjw = cpu_to_le32(bt->sjw); + tx_conf_msg.sync_edge = cpu_to_le32(ES58X_SYNC_EDGE_SINGLE); + tx_conf_msg.physical_layer = + cpu_to_le32(ES58X_PHYSICAL_LAYER_HIGH_SPEED); + tx_conf_msg.echo_mode = cpu_to_le32(ES58X_ECHO_ON); + tx_conf_msg.channel_no = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + + return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_SET_BITTIMING, &tx_conf_msg, + sizeof(tx_conf_msg), priv->channel_idx); +} + +static int es581_4_enable_channel(struct es58x_priv *priv) +{ + int ret; + u8 msg = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + + ret = es581_4_set_bittiming(priv); + if (ret) + return ret; + + return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_ENABLE_CHANNEL, &msg, sizeof(msg), + priv->channel_idx); +} + +static int es581_4_disable_channel(struct es58x_priv *priv) +{ + u8 msg = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + + return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_DISABLE_CHANNEL, &msg, sizeof(msg), + priv->channel_idx); +} + +static int es581_4_reset_device(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_RESET_DEVICE, + ES58X_EMPTY_MSG, 0, ES58X_CHANNEL_IDX_NA); +} + +static int es581_4_get_timestamp(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_TIMESTAMP, + ES58X_EMPTY_MSG, 0, ES58X_CHANNEL_IDX_NA); +} + +/* Nominal bittiming constants for ES581.4 as specified in the + * microcontroller datasheet: "Stellaris(R) LM3S5B91 Microcontroller" + * table 17-4 "CAN Protocol Ranges" from Texas Instruments. + */ +static const struct can_bittiming_const es581_4_bittiming_const = { + .name = "ES581.4", + .tseg1_min = 1, + .tseg1_max = 8, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 128, + .brp_inc = 1 +}; + +const struct es58x_parameters es581_4_param = { + .bittiming_const = &es581_4_bittiming_const, + .data_bittiming_const = NULL, + .tdc_const = NULL, + .bitrate_max = 1 * CAN_MBPS, + .clock = {.freq = 50 * CAN_MHZ}, + .ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC, + .tx_start_of_frame = 0xAFAF, + .rx_start_of_frame = 0xFAFA, + .tx_urb_cmd_max_len = ES581_4_TX_URB_CMD_MAX_LEN, + .rx_urb_cmd_max_len = ES581_4_RX_URB_CMD_MAX_LEN, + /* Size of internal device TX queue is 330. + * + * However, we witnessed some ES58X_ERR_PROT_CRC errors from + * the device and thus, echo_skb_max was lowered to the + * empirical value of 75 which seems stable and then rounded + * down to become a power of two. + * + * Root cause of those ES58X_ERR_PROT_CRC errors is still + * unclear. + */ + .fifo_mask = 63, /* echo_skb_max = 64 */ + .dql_min_limit = CAN_FRAME_LEN_MAX * 50, /* Empirical value. */ + .tx_bulk_max = ES581_4_TX_BULK_MAX, + .urb_cmd_header_len = ES581_4_URB_CMD_HEADER_LEN, + .rx_urb_max = ES58X_RX_URBS_MAX, + .tx_urb_max = ES58X_TX_URBS_MAX +}; + +const struct es58x_operators es581_4_ops = { + .get_msg_len = es581_4_get_msg_len, + .handle_urb_cmd = es581_4_handle_urb_cmd, + .fill_urb_header = es581_4_fill_urb_header, + .tx_can_msg = es581_4_tx_can_msg, + .enable_channel = es581_4_enable_channel, + .disable_channel = es581_4_disable_channel, + .reset_device = es581_4_reset_device, + .get_timestamp = es581_4_get_timestamp +}; diff --git a/drivers/net/can/usb/etas_es58x/es581_4.h b/drivers/net/can/usb/etas_es58x/es581_4.h new file mode 100644 index 000000000000..4bc60a6df697 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es581_4.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es581_4.h: Definitions and declarations specific to ETAS + * ES581.4. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __ES581_4_H__ +#define __ES581_4_H__ + +#include <linux/types.h> + +#define ES581_4_NUM_CAN_CH 2 +#define ES581_4_CHANNEL_IDX_OFFSET 1 + +#define ES581_4_TX_BULK_MAX 25 +#define ES581_4_RX_BULK_MAX 30 +#define ES581_4_ECHO_BULK_MAX 30 + +enum es581_4_cmd_type { + ES581_4_CAN_COMMAND_TYPE = 0x45 +}; + +enum es581_4_cmd_id { + ES581_4_CMD_ID_OPEN_CHANNEL = 0x01, + ES581_4_CMD_ID_CLOSE_CHANNEL = 0x02, + ES581_4_CMD_ID_SET_BITTIMING = 0x03, + ES581_4_CMD_ID_ENABLE_CHANNEL = 0x04, + ES581_4_CMD_ID_TX_MSG = 0x05, + ES581_4_CMD_ID_RX_MSG = 0x06, + ES581_4_CMD_ID_RESET_RX = 0x0A, + ES581_4_CMD_ID_RESET_TX = 0x0B, + ES581_4_CMD_ID_DISABLE_CHANNEL = 0x0C, + ES581_4_CMD_ID_TIMESTAMP = 0x0E, + ES581_4_CMD_ID_RESET_DEVICE = 0x28, + ES581_4_CMD_ID_ECHO = 0x71, + ES581_4_CMD_ID_DEVICE_ERR = 0x72 +}; + +enum es581_4_rx_type { + ES581_4_RX_TYPE_MESSAGE = 1, + ES581_4_RX_TYPE_ERROR = 3, + ES581_4_RX_TYPE_EVENT = 4 +}; + +/** + * struct es581_4_tx_conf_msg - Channel configuration. + * @bitrate: Bitrate. + * @sample_point: Sample point is in percent [0..100]. + * @samples_per_bit: type enum es58x_samples_per_bit. + * @bit_time: Number of time quanta in one bit. + * @sjw: Synchronization Jump Width. + * @sync_edge: type enum es58x_sync_edge. + * @physical_layer: type enum es58x_physical_layer. + * @echo_mode: type enum es58x_echo_mode. + * @channel_no: Channel number, starting from 1. Not to be confused + * with channed_idx of the ES58X FD which starts from 0. + */ +struct es581_4_tx_conf_msg { + __le32 bitrate; + __le32 sample_point; + __le32 samples_per_bit; + __le32 bit_time; + __le32 sjw; + __le32 sync_edge; + __le32 physical_layer; + __le32 echo_mode; + u8 channel_no; +} __packed; + +struct es581_4_tx_can_msg { + __le32 can_id; + __le32 packet_idx; + __le16 flags; + u8 channel_no; + u8 dlc; + u8 data[CAN_MAX_DLEN]; +} __packed; + +/* The ES581.4 allows bulk transfer. */ +struct es581_4_bulk_tx_can_msg { + u8 num_can_msg; + /* Using type "u8[]" instead of "struct es581_4_tx_can_msg[]" + * for tx_msg_buf because each member has a flexible size. + */ + u8 tx_can_msg_buf[ES581_4_TX_BULK_MAX * + sizeof(struct es581_4_tx_can_msg)]; +} __packed; + +struct es581_4_echo_msg { + __le64 timestamp; + __le32 packet_idx; +} __packed; + +struct es581_4_bulk_echo_msg { + u8 channel_no; + struct es581_4_echo_msg echo_msg[ES581_4_ECHO_BULK_MAX]; +} __packed; + +/* Normal Rx CAN Message */ +struct es581_4_rx_can_msg { + __le64 timestamp; + u8 rx_type; /* type enum es581_4_rx_type */ + u8 flags; /* type enum es58x_flag */ + u8 channel_no; + u8 dlc; + __le32 can_id; + u8 data[CAN_MAX_DLEN]; +} __packed; + +struct es581_4_rx_err_msg { + __le64 timestamp; + __le16 rx_type; /* type enum es581_4_rx_type */ + __le16 flags; /* type enum es58x_flag */ + u8 channel_no; + u8 __padding[2]; + u8 dlc; + __le32 tag; /* Related to the CAN filtering. Unused in this module */ + __le32 can_id; + __le32 error; /* type enum es58x_error */ + __le32 destination; /* Unused in this module */ +} __packed; + +struct es581_4_rx_event_msg { + __le64 timestamp; + __le16 rx_type; /* type enum es581_4_rx_type */ + u8 channel_no; + u8 __padding; + __le32 tag; /* Related to the CAN filtering. Unused in this module */ + __le32 event; /* type enum es58x_event */ + __le32 destination; /* Unused in this module */ +} __packed; + +struct es581_4_tx_ack_msg { + __le16 tx_free_entries; /* Number of remaining free entries in the device TX queue */ + u8 channel_no; + u8 rx_cmd_ret_u8; /* type enum es58x_cmd_ret_code_u8 */ +} __packed; + +struct es581_4_rx_cmd_ret { + __le32 rx_cmd_ret_le32; + u8 channel_no; + u8 __padding[3]; +} __packed; + +/** + * struct es581_4_urb_cmd - Commands received from or sent to the + * ES581.4 device. + * @SOF: Start of Frame. + * @cmd_type: Command Type (type: enum es581_4_cmd_type). The CRC + * calculation starts at this position. + * @cmd_id: Command ID (type: enum es581_4_cmd_id). + * @msg_len: Length of the message, excluding CRC (i.e. length of the + * union). + * @tx_conf_msg: Channel configuration. + * @bulk_tx_can_msg: Tx messages. + * @rx_can_msg: Array of Rx messages. + * @bulk_echo_msg: Tx message being looped back. + * @rx_err_msg: Error message. + * @rx_event_msg: Event message. + * @tx_ack_msg: Tx acknowledgment message. + * @rx_cmd_ret: Command return code. + * @timestamp: Timestamp reply. + * @rx_cmd_ret_u8: Rx 8 bits return code (type: enum + * es58x_cmd_ret_code_u8). + * @raw_msg: Message raw payload. + * @reserved_for_crc16_do_not_use: The structure ends with a + * CRC16. Because the structures in above union are of variable + * lengths, we can not predict the offset of the CRC in + * advance. Use functions es58x_get_crc() and es58x_set_crc() to + * manipulate it. + */ +struct es581_4_urb_cmd { + __le16 SOF; + u8 cmd_type; + u8 cmd_id; + __le16 msg_len; + + union { + struct es581_4_tx_conf_msg tx_conf_msg; + struct es581_4_bulk_tx_can_msg bulk_tx_can_msg; + struct es581_4_rx_can_msg rx_can_msg[ES581_4_RX_BULK_MAX]; + struct es581_4_bulk_echo_msg bulk_echo_msg; + struct es581_4_rx_err_msg rx_err_msg; + struct es581_4_rx_event_msg rx_event_msg; + struct es581_4_tx_ack_msg tx_ack_msg; + struct es581_4_rx_cmd_ret rx_cmd_ret; + __le64 timestamp; + u8 rx_cmd_ret_u8; + u8 raw_msg[0]; + } __packed; + + __le16 reserved_for_crc16_do_not_use; +} __packed; + +#define ES581_4_URB_CMD_HEADER_LEN (offsetof(struct es581_4_urb_cmd, raw_msg)) +#define ES581_4_TX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es581_4_urb_cmd, bulk_tx_can_msg) +#define ES581_4_RX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es581_4_urb_cmd, rx_can_msg) + +#endif /* __ES581_4_H__ */ diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c new file mode 100644 index 000000000000..57e5f94468e9 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -0,0 +1,2301 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_core.c: Core logic to manage the network devices and the + * USB interface. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> +#include <linux/crc16.h> +#include <asm/unaligned.h> + +#include "es58x_core.h" + +#define DRV_VERSION "1.00" +MODULE_AUTHOR("Mailhol Vincent <mailhol.vincent@wanadoo.fr>"); +MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>"); +MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL v2"); + +#define ES58X_MODULE_NAME "etas_es58x" +#define ES58X_VENDOR_ID 0x108C +#define ES581_4_PRODUCT_ID 0x0159 +#define ES582_1_PRODUCT_ID 0x0168 +#define ES584_1_PRODUCT_ID 0x0169 + +/* ES58X FD has some interface protocols unsupported by this driver. */ +#define ES58X_FD_INTERFACE_PROTOCOL 0 + +/* Table of devices which work with this driver. */ +static const struct usb_device_id es58x_id_table[] = { + { + /* ETAS GmbH ES581.4 USB dual-channel CAN Bus Interface module. */ + USB_DEVICE(ES58X_VENDOR_ID, ES581_4_PRODUCT_ID), + .driver_info = ES58X_DUAL_CHANNEL + }, { + /* ETAS GmbH ES582.1 USB dual-channel CAN FD Bus Interface module. */ + USB_DEVICE_INTERFACE_PROTOCOL(ES58X_VENDOR_ID, ES582_1_PRODUCT_ID, + ES58X_FD_INTERFACE_PROTOCOL), + .driver_info = ES58X_DUAL_CHANNEL | ES58X_FD_FAMILY + }, { + /* ETAS GmbH ES584.1 USB single-channel CAN FD Bus Interface module. */ + USB_DEVICE_INTERFACE_PROTOCOL(ES58X_VENDOR_ID, ES584_1_PRODUCT_ID, + ES58X_FD_INTERFACE_PROTOCOL), + .driver_info = ES58X_FD_FAMILY + }, { + /* Terminating entry */ + } +}; + +MODULE_DEVICE_TABLE(usb, es58x_id_table); + +#define es58x_print_hex_dump(buf, len) \ + print_hex_dump(KERN_DEBUG, \ + ES58X_MODULE_NAME " " __stringify(buf) ": ", \ + DUMP_PREFIX_NONE, 16, 1, buf, len, false) + +#define es58x_print_hex_dump_debug(buf, len) \ + print_hex_dump_debug(ES58X_MODULE_NAME " " __stringify(buf) ": ",\ + DUMP_PREFIX_NONE, 16, 1, buf, len, false) + +/* The last two bytes of an ES58X command is a CRC16. The first two + * bytes (the start of frame) are skipped and the CRC calculation + * starts on the third byte. + */ +#define ES58X_CRC_CALC_OFFSET 2 + +/** + * es58x_calculate_crc() - Compute the crc16 of a given URB. + * @urb_cmd: The URB command for which we want to calculate the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + * + * Return: crc16 value. + */ +static u16 es58x_calculate_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + ssize_t len = urb_len - ES58X_CRC_CALC_OFFSET - sizeof(crc); + + crc = crc16(0, &urb_cmd->raw_cmd[ES58X_CRC_CALC_OFFSET], len); + return crc; +} + +/** + * es58x_get_crc() - Get the CRC value of a given URB. + * @urb_cmd: The URB command for which we want to get the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + * + * Return: crc16 value. + */ +static u16 es58x_get_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + const __le16 *crc_addr; + + crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)]; + crc = get_unaligned_le16(crc_addr); + return crc; +} + +/** + * es58x_set_crc() - Set the CRC value of a given URB. + * @urb_cmd: The URB command for which we want to get the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + */ +static void es58x_set_crc(union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + __le16 *crc_addr; + + crc = es58x_calculate_crc(urb_cmd, urb_len); + crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)]; + put_unaligned_le16(crc, crc_addr); +} + +/** + * es58x_check_crc() - Validate the CRC value of a given URB. + * @es58x_dev: ES58X device. + * @urb_cmd: The URB command for which we want to check the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + * + * Return: zero on success, -EBADMSG if the CRC check fails. + */ +static int es58x_check_crc(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 calculated_crc = es58x_calculate_crc(urb_cmd, urb_len); + u16 expected_crc = es58x_get_crc(urb_cmd, urb_len); + + if (expected_crc != calculated_crc) { + dev_err_ratelimited(es58x_dev->dev, + "%s: Bad CRC, urb_len: %d\n", + __func__, urb_len); + return -EBADMSG; + } + + return 0; +} + +/** + * es58x_timestamp_to_ns() - Convert a timestamp value received from a + * ES58X device to nanoseconds. + * @timestamp: Timestamp received from a ES58X device. + * + * The timestamp received from ES58X is expressed in multiples of 0.5 + * micro seconds. This function converts it in to nanoseconds. + * + * Return: Timestamp value in nanoseconds. + */ +static u64 es58x_timestamp_to_ns(u64 timestamp) +{ + const u64 es58x_timestamp_ns_mult_coef = 500ULL; + + return es58x_timestamp_ns_mult_coef * timestamp; +} + +/** + * es58x_set_skb_timestamp() - Set the hardware timestamp of an skb. + * @netdev: CAN network device. + * @skb: socket buffer of a CAN message. + * @timestamp: Timestamp received from an ES58X device. + * + * Used for both received and echo messages. + */ +static void es58x_set_skb_timestamp(struct net_device *netdev, + struct sk_buff *skb, u64 timestamp) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + struct skb_shared_hwtstamps *hwts; + + hwts = skb_hwtstamps(skb); + /* Ignoring overflow (overflow on 64 bits timestamp with nano + * second precision would occur after more than 500 years). + */ + hwts->hwtstamp = ns_to_ktime(es58x_timestamp_to_ns(timestamp) + + es58x_dev->realtime_diff_ns); +} + +/** + * es58x_rx_timestamp() - Handle a received timestamp. + * @es58x_dev: ES58X device. + * @timestamp: Timestamp received from a ES58X device. + * + * Calculate the difference between the ES58X device and the kernel + * internal clocks. This difference will be later used as an offset to + * convert the timestamps of RX and echo messages to match the kernel + * system time (e.g. convert to UNIX time). + */ +void es58x_rx_timestamp(struct es58x_device *es58x_dev, u64 timestamp) +{ + u64 ktime_real_ns = ktime_get_real_ns(); + u64 device_timestamp = es58x_timestamp_to_ns(timestamp); + + dev_dbg(es58x_dev->dev, "%s: request round-trip time: %llu ns\n", + __func__, ktime_real_ns - es58x_dev->ktime_req_ns); + + es58x_dev->realtime_diff_ns = + (es58x_dev->ktime_req_ns + ktime_real_ns) / 2 - device_timestamp; + es58x_dev->ktime_req_ns = 0; + + dev_dbg(es58x_dev->dev, + "%s: Device timestamp: %llu, diff with kernel: %llu\n", + __func__, device_timestamp, es58x_dev->realtime_diff_ns); +} + +/** + * es58x_set_realtime_diff_ns() - Calculate difference between the + * clocks of the ES58X device and the kernel + * @es58x_dev: ES58X device. + * + * Request a timestamp from the ES58X device. Once the answer is + * received, the timestamp difference will be set by the callback + * function es58x_rx_timestamp(). + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_set_realtime_diff_ns(struct es58x_device *es58x_dev) +{ + if (es58x_dev->ktime_req_ns) { + dev_warn(es58x_dev->dev, + "%s: Previous request to set timestamp has not completed yet\n", + __func__); + return -EBUSY; + } + + es58x_dev->ktime_req_ns = ktime_get_real_ns(); + return es58x_dev->ops->get_timestamp(es58x_dev); +} + +/** + * es58x_is_can_state_active() - Is the network device in an active + * CAN state? + * @netdev: CAN network device. + * + * The device is considered active if it is able to send or receive + * CAN frames, that is to say if it is in any of + * CAN_STATE_ERROR_ACTIVE, CAN_STATE_ERROR_WARNING or + * CAN_STATE_ERROR_PASSIVE states. + * + * Caution: when recovering from a bus-off, + * net/core/dev.c#can_restart() will call + * net/core/dev.c#can_flush_echo_skb() without using any kind of + * locks. For this reason, it is critical to guarantee that no TX or + * echo operations (i.e. any access to priv->echo_skb[]) can be done + * while this function is returning false. + * + * Return: true if the device is active, else returns false. + */ +static bool es58x_is_can_state_active(struct net_device *netdev) +{ + return es58x_priv(netdev)->can.state < CAN_STATE_BUS_OFF; +} + +/** + * es58x_is_echo_skb_threshold_reached() - Determine the limit of how + * many skb slots can be taken before we should stop the network + * queue. + * @priv: ES58X private parameters related to the network device. + * + * We need to save enough free skb slots in order to be able to do + * bulk send. This function can be used to determine when to wake or + * stop the network queue in regard to the number of skb slots already + * taken if the echo FIFO. + * + * Return: boolean. + */ +static bool es58x_is_echo_skb_threshold_reached(struct es58x_priv *priv) +{ + u32 num_echo_skb = priv->tx_head - priv->tx_tail; + u32 threshold = priv->can.echo_skb_max - + priv->es58x_dev->param->tx_bulk_max + 1; + + return num_echo_skb >= threshold; +} + +/** + * es58x_can_free_echo_skb_tail() - Remove the oldest echo skb of the + * echo FIFO. + * @netdev: CAN network device. + * + * Naming convention: the tail is the beginning of the FIFO, i.e. the + * first skb to have entered the FIFO. + */ +static void es58x_can_free_echo_skb_tail(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + unsigned int frame_len = 0; + + can_free_echo_skb(netdev, priv->tx_tail & fifo_mask, &frame_len); + netdev_completed_queue(netdev, 1, frame_len); + + priv->tx_tail++; + + netdev->stats.tx_dropped++; +} + +/** + * es58x_can_get_echo_skb_recovery() - Try to re-sync the echo FIFO. + * @netdev: CAN network device. + * @rcv_packet_idx: Index + * + * This function should not be called under normal circumstances. In + * the unlikely case that one or several URB packages get dropped by + * the device, the index will get out of sync. Try to recover by + * dropping the echo skb packets with older indexes. + * + * Return: zero if recovery was successful, -EINVAL otherwise. + */ +static int es58x_can_get_echo_skb_recovery(struct net_device *netdev, + u32 rcv_packet_idx) +{ + struct es58x_priv *priv = es58x_priv(netdev); + int ret = 0; + + netdev->stats.tx_errors++; + + if (net_ratelimit()) + netdev_warn(netdev, + "Bad echo packet index: %u. First index: %u, end index %u, num_echo_skb: %02u/%02u\n", + rcv_packet_idx, priv->tx_tail, priv->tx_head, + priv->tx_head - priv->tx_tail, + priv->can.echo_skb_max); + + if ((s32)(rcv_packet_idx - priv->tx_tail) < 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "Received echo index is from the past. Ignoring it\n"); + ret = -EINVAL; + } else if ((s32)(rcv_packet_idx - priv->tx_head) >= 0) { + if (net_ratelimit()) + netdev_err(netdev, + "Received echo index is from the future. Ignoring it\n"); + ret = -EINVAL; + } else { + if (net_ratelimit()) + netdev_warn(netdev, + "Recovery: dropping %u echo skb from index %u to %u\n", + rcv_packet_idx - priv->tx_tail, + priv->tx_tail, rcv_packet_idx - 1); + while (priv->tx_tail != rcv_packet_idx) { + if (priv->tx_tail == priv->tx_head) + return -EINVAL; + es58x_can_free_echo_skb_tail(netdev); + } + } + return ret; +} + +/** + * es58x_can_get_echo_skb() - Get the skb from the echo FIFO and loop + * it back locally. + * @netdev: CAN network device. + * @rcv_packet_idx: Index of the first packet received from the device. + * @tstamps: Array of hardware timestamps received from a ES58X device. + * @pkts: Number of packets (and so, length of @tstamps). + * + * Callback function for when we receive a self reception + * acknowledgment. Retrieves the skb from the echo FIFO, sets its + * hardware timestamp (the actual time it was sent) and loops it back + * locally. + * + * The device has to be active (i.e. network interface UP and not in + * bus off state or restarting). + * + * Packet indexes must be consecutive (i.e. index of first packet is + * @rcv_packet_idx, index of second packet is @rcv_packet_idx + 1 and + * index of last packet is @rcv_packet_idx + @pkts - 1). + * + * Return: zero on success. + */ +int es58x_can_get_echo_skb(struct net_device *netdev, u32 rcv_packet_idx, + u64 *tstamps, unsigned int pkts) +{ + struct es58x_priv *priv = es58x_priv(netdev); + unsigned int rx_total_frame_len = 0; + unsigned int num_echo_skb = priv->tx_head - priv->tx_tail; + int i; + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d echo packets\n", + __func__, netdev->name, pkts); + netdev->stats.tx_dropped += pkts; + return 0; + } else if (!es58x_is_can_state_active(netdev)) { + if (net_ratelimit()) + netdev_dbg(netdev, + "Bus is off or device is restarting. Ignoring %u echo packets from index %u\n", + pkts, rcv_packet_idx); + /* stats.tx_dropped will be (or was already) + * incremented by + * drivers/net/can/net/dev.c:can_flush_echo_skb(). + */ + return 0; + } else if (num_echo_skb == 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "Received %u echo packets from index: %u but echo skb queue is empty.\n", + pkts, rcv_packet_idx); + netdev->stats.tx_dropped += pkts; + return 0; + } + + if (priv->tx_tail != rcv_packet_idx) { + if (es58x_can_get_echo_skb_recovery(netdev, rcv_packet_idx) < 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "Could not find echo skb for echo packet index: %u\n", + rcv_packet_idx); + return 0; + } + } + if (num_echo_skb < pkts) { + int pkts_drop = pkts - num_echo_skb; + + if (net_ratelimit()) + netdev_err(netdev, + "Received %u echo packets but have only %d echo skb. Dropping %d echo skb\n", + pkts, num_echo_skb, pkts_drop); + netdev->stats.tx_dropped += pkts_drop; + pkts -= pkts_drop; + } + + for (i = 0; i < pkts; i++) { + unsigned int skb_idx = priv->tx_tail & fifo_mask; + struct sk_buff *skb = priv->can.echo_skb[skb_idx]; + unsigned int frame_len = 0; + + if (skb) + es58x_set_skb_timestamp(netdev, skb, tstamps[i]); + + netdev->stats.tx_bytes += can_get_echo_skb(netdev, skb_idx, + &frame_len); + rx_total_frame_len += frame_len; + + priv->tx_tail++; + } + + netdev_completed_queue(netdev, pkts, rx_total_frame_len); + netdev->stats.tx_packets += pkts; + + priv->err_passive_before_rtx_success = 0; + if (!es58x_is_echo_skb_threshold_reached(priv)) + netif_wake_queue(netdev); + + return 0; +} + +/** + * es58x_can_reset_echo_fifo() - Reset the echo FIFO. + * @netdev: CAN network device. + * + * The echo_skb array of struct can_priv will be flushed by + * drivers/net/can/dev.c:can_flush_echo_skb(). This function resets + * the parameters of the struct es58x_priv of our device and reset the + * queue (c.f. BQL). + */ +static void es58x_can_reset_echo_fifo(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + priv->tx_tail = 0; + priv->tx_head = 0; + priv->tx_urb = NULL; + priv->err_passive_before_rtx_success = 0; + netdev_reset_queue(netdev); +} + +/** + * es58x_flush_pending_tx_msg() - Reset the buffer for transmission messages. + * @netdev: CAN network device. + * + * es58x_start_xmit() will queue up to tx_bulk_max messages in + * &tx_urb buffer and do a bulk send of all messages in one single URB + * (c.f. xmit_more flag). When the device recovers from a bus off + * state or when the device stops, the tx_urb buffer might still have + * pending messages in it and thus need to be flushed. + */ +static void es58x_flush_pending_tx_msg(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + + if (priv->tx_urb) { + netdev_warn(netdev, "%s: dropping %d TX messages\n", + __func__, priv->tx_can_msg_cnt); + netdev->stats.tx_dropped += priv->tx_can_msg_cnt; + while (priv->tx_can_msg_cnt > 0) { + unsigned int frame_len = 0; + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + + priv->tx_head--; + priv->tx_can_msg_cnt--; + can_free_echo_skb(netdev, priv->tx_head & fifo_mask, + &frame_len); + netdev_completed_queue(netdev, 1, frame_len); + } + usb_anchor_urb(priv->tx_urb, &priv->es58x_dev->tx_urbs_idle); + atomic_inc(&es58x_dev->tx_urbs_idle_cnt); + usb_free_urb(priv->tx_urb); + } + priv->tx_urb = NULL; +} + +/** + * es58x_tx_ack_msg() - Handle acknowledgment messages. + * @netdev: CAN network device. + * @tx_free_entries: Number of free entries in the device transmit FIFO. + * @rx_cmd_ret_u32: error code as returned by the ES58X device. + * + * ES58X sends an acknowledgment message after a transmission request + * is done. This is mandatory for the ES581.4 but is optional (and + * deactivated in this driver) for the ES58X_FD family. + * + * Under normal circumstances, this function should never throw an + * error message. + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries, + enum es58x_ret_u32 rx_cmd_ret_u32) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + if (tx_free_entries <= priv->es58x_dev->param->tx_bulk_max) { + if (net_ratelimit()) + netdev_err(netdev, + "Only %d entries left in device queue, num_echo_skb: %d/%d\n", + tx_free_entries, + priv->tx_head - priv->tx_tail, + priv->can.echo_skb_max); + netif_stop_queue(netdev); + } + + return es58x_rx_cmd_ret_u32(netdev, ES58X_RET_TYPE_TX_MSG, + rx_cmd_ret_u32); +} + +/** + * es58x_rx_can_msg() - Handle a received a CAN message. + * @netdev: CAN network device. + * @timestamp: Hardware time stamp (only relevant in rx branches). + * @data: CAN payload. + * @can_id: CAN ID. + * @es58x_flags: Please refer to enum es58x_flag. + * @dlc: Data Length Code (raw value). + * + * Fill up a CAN skb and post it. + * + * This function handles the case where the DLC of a classical CAN + * frame is greater than CAN_MAX_DLEN (c.f. the len8_dlc field of + * struct can_frame). + * + * Return: zero on success. + */ +int es58x_rx_can_msg(struct net_device *netdev, u64 timestamp, const u8 *data, + canid_t can_id, enum es58x_flag es58x_flags, u8 dlc) +{ + struct canfd_frame *cfd; + struct can_frame *ccf; + struct sk_buff *skb; + u8 len; + bool is_can_fd = !!(es58x_flags & ES58X_FLAG_FD_DATA); + + if (dlc > CAN_MAX_RAW_DLC) { + netdev_err(netdev, + "%s: DLC is %d but maximum should be %d\n", + __func__, dlc, CAN_MAX_RAW_DLC); + return -EMSGSIZE; + } + + if (is_can_fd) { + len = can_fd_dlc2len(dlc); + skb = alloc_canfd_skb(netdev, &cfd); + } else { + len = can_cc_dlc2len(dlc); + skb = alloc_can_skb(netdev, &ccf); + cfd = (struct canfd_frame *)ccf; + } + if (!skb) { + netdev->stats.rx_dropped++; + return 0; + } + + cfd->can_id = can_id; + if (es58x_flags & ES58X_FLAG_EFF) + cfd->can_id |= CAN_EFF_FLAG; + if (is_can_fd) { + cfd->len = len; + if (es58x_flags & ES58X_FLAG_FD_BRS) + cfd->flags |= CANFD_BRS; + if (es58x_flags & ES58X_FLAG_FD_ESI) + cfd->flags |= CANFD_ESI; + } else { + can_frame_set_cc_len(ccf, dlc, es58x_priv(netdev)->can.ctrlmode); + if (es58x_flags & ES58X_FLAG_RTR) { + ccf->can_id |= CAN_RTR_FLAG; + len = 0; + } + } + memcpy(cfd->data, data, len); + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + + es58x_set_skb_timestamp(netdev, skb, timestamp); + netif_rx(skb); + + es58x_priv(netdev)->err_passive_before_rtx_success = 0; + + return 0; +} + +/** + * es58x_rx_err_msg() - Handle a received CAN event or error message. + * @netdev: CAN network device. + * @error: Error code. + * @event: Event code. + * @timestamp: Timestamp received from a ES58X device. + * + * Handle the errors and events received by the ES58X device, create + * a CAN error skb and post it. + * + * In some rare cases the devices might get stuck alternating between + * CAN_STATE_ERROR_PASSIVE and CAN_STATE_ERROR_WARNING. To prevent + * this behavior, we force a bus off state if the device goes in + * CAN_STATE_ERROR_WARNING for ES58X_MAX_CONSECUTIVE_WARN consecutive + * times with no successful transmission or reception in between. + * + * Once the device is in bus off state, the only way to restart it is + * through the drivers/net/can/dev.c:can_restart() function. The + * device is technically capable to recover by itself under certain + * circumstances, however, allowing self recovery would create + * complex race conditions with drivers/net/can/dev.c:can_restart() + * and thus was not implemented. To activate automatic restart, please + * set the restart-ms parameter (e.g. ip link set can0 type can + * restart-ms 100). + * + * If the bus is really instable, this function would try to send a + * lot of log messages. Those are rate limited (i.e. you will see + * messages such as "net_ratelimit: XXX callbacks suppressed" in + * dmesg). + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error, + enum es58x_event event, u64 timestamp) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct can_priv *can = netdev_priv(netdev); + struct can_device_stats *can_stats = &can->can_stats; + struct can_frame *cf = NULL; + struct sk_buff *skb; + int ret; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, "%s: %s is down, dropping packet\n", + __func__, netdev->name); + netdev->stats.rx_dropped++; + return 0; + } + + if (error == ES58X_ERR_OK && event == ES58X_EVENT_OK) { + netdev_err(netdev, "%s: Both error and event are zero\n", + __func__); + return -EINVAL; + } + + skb = alloc_can_err_skb(netdev, &cf); + + switch (error) { + case ES58X_ERR_OK: /* 0: No error */ + break; + + case ES58X_ERR_PROT_STUFF: + if (net_ratelimit()) + netdev_dbg(netdev, "Error BITSUFF\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + + case ES58X_ERR_PROT_FORM: + if (net_ratelimit()) + netdev_dbg(netdev, "Error FORMAT\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + + case ES58X_ERR_ACK: + if (net_ratelimit()) + netdev_dbg(netdev, "Error ACK\n"); + if (cf) + cf->can_id |= CAN_ERR_ACK; + break; + + case ES58X_ERR_PROT_BIT: + if (net_ratelimit()) + netdev_dbg(netdev, "Error BIT\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + + case ES58X_ERR_PROT_CRC: + if (net_ratelimit()) + netdev_dbg(netdev, "Error CRC\n"); + if (cf) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + break; + + case ES58X_ERR_PROT_BIT1: + if (net_ratelimit()) + netdev_dbg(netdev, + "Error: expected a recessive bit but monitored a dominant one\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_BIT1; + break; + + case ES58X_ERR_PROT_BIT0: + if (net_ratelimit()) + netdev_dbg(netdev, + "Error expected a dominant bit but monitored a recessive one\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_BIT0; + break; + + case ES58X_ERR_PROT_OVERLOAD: + if (net_ratelimit()) + netdev_dbg(netdev, "Error OVERLOAD\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + break; + + case ES58X_ERR_PROT_UNSPEC: + if (net_ratelimit()) + netdev_dbg(netdev, "Unspecified error\n"); + if (cf) + cf->can_id |= CAN_ERR_PROT; + break; + + default: + if (net_ratelimit()) + netdev_err(netdev, + "%s: Unspecified error code 0x%04X\n", + __func__, (int)error); + if (cf) + cf->can_id |= CAN_ERR_PROT; + break; + } + + switch (event) { + case ES58X_EVENT_OK: /* 0: No event */ + break; + + case ES58X_EVENT_CRTL_ACTIVE: + if (can->state == CAN_STATE_BUS_OFF) { + netdev_err(netdev, + "%s: state transition: BUS OFF -> ACTIVE\n", + __func__); + } + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS ACTIVE\n"); + if (cf) + cf->data[1] |= CAN_ERR_CRTL_ACTIVE; + can->state = CAN_STATE_ERROR_ACTIVE; + break; + + case ES58X_EVENT_CRTL_PASSIVE: + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS PASSIVE\n"); + /* Either TX or RX error count reached passive state + * but we do not know which. Setting both flags by + * default. + */ + if (cf) { + cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; + } + if (can->state < CAN_STATE_BUS_OFF) + can->state = CAN_STATE_ERROR_PASSIVE; + can_stats->error_passive++; + if (priv->err_passive_before_rtx_success < U8_MAX) + priv->err_passive_before_rtx_success++; + break; + + case ES58X_EVENT_CRTL_WARNING: + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS WARNING\n"); + /* Either TX or RX error count reached warning state + * but we do not know which. Setting both flags by + * default. + */ + if (cf) { + cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; + cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; + } + if (can->state < CAN_STATE_BUS_OFF) + can->state = CAN_STATE_ERROR_WARNING; + can_stats->error_warning++; + break; + + case ES58X_EVENT_BUSOFF: + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS OFF\n"); + if (cf) + cf->can_id |= CAN_ERR_BUSOFF; + can_stats->bus_off++; + netif_stop_queue(netdev); + if (can->state != CAN_STATE_BUS_OFF) { + can->state = CAN_STATE_BUS_OFF; + can_bus_off(netdev); + ret = can->do_set_mode(netdev, CAN_MODE_STOP); + if (ret) + return ret; + } + break; + + case ES58X_EVENT_SINGLE_WIRE: + if (net_ratelimit()) + netdev_warn(netdev, + "Lost connection on either CAN high or CAN low\n"); + /* Lost connection on either CAN high or CAN + * low. Setting both flags by default. + */ + if (cf) { + cf->data[4] |= CAN_ERR_TRX_CANH_NO_WIRE; + cf->data[4] |= CAN_ERR_TRX_CANL_NO_WIRE; + } + break; + + default: + if (net_ratelimit()) + netdev_err(netdev, + "%s: Unspecified event code 0x%04X\n", + __func__, (int)event); + if (cf) + cf->can_id |= CAN_ERR_CRTL; + break; + } + + /* driver/net/can/dev.c:can_restart() takes in account error + * messages in the RX stats. Doing the same here for + * consistency. + */ + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += CAN_ERR_DLC; + + if (cf) { + if (cf->data[1]) + cf->can_id |= CAN_ERR_CRTL; + if (cf->data[2] || cf->data[3]) { + cf->can_id |= CAN_ERR_PROT; + can_stats->bus_error++; + } + if (cf->data[4]) + cf->can_id |= CAN_ERR_TRX; + + es58x_set_skb_timestamp(netdev, skb, timestamp); + netif_rx(skb); + } + + if ((event & ES58X_EVENT_CRTL_PASSIVE) && + priv->err_passive_before_rtx_success == ES58X_CONSECUTIVE_ERR_PASSIVE_MAX) { + netdev_info(netdev, + "Got %d consecutive warning events with no successful RX or TX. Forcing bus-off\n", + priv->err_passive_before_rtx_success); + return es58x_rx_err_msg(netdev, ES58X_ERR_OK, + ES58X_EVENT_BUSOFF, timestamp); + } + + return 0; +} + +/** + * es58x_cmd_ret_desc() - Convert a command type to a string. + * @cmd_ret_type: Type of the command which triggered the return code. + * + * The final line (return "<unknown>") should not be reached. If this + * is the case, there is an implementation bug. + * + * Return: a readable description of the @cmd_ret_type. + */ +static const char *es58x_cmd_ret_desc(enum es58x_ret_type cmd_ret_type) +{ + switch (cmd_ret_type) { + case ES58X_RET_TYPE_SET_BITTIMING: + return "Set bittiming"; + case ES58X_RET_TYPE_ENABLE_CHANNEL: + return "Enable channel"; + case ES58X_RET_TYPE_DISABLE_CHANNEL: + return "Disable channel"; + case ES58X_RET_TYPE_TX_MSG: + return "Transmit message"; + case ES58X_RET_TYPE_RESET_RX: + return "Reset RX"; + case ES58X_RET_TYPE_RESET_TX: + return "Reset TX"; + case ES58X_RET_TYPE_DEVICE_ERR: + return "Device error"; + } + + return "<unknown>"; +}; + +/** + * es58x_rx_cmd_ret_u8() - Handle the command's return code received + * from the ES58X device. + * @dev: Device, only used for the dev_XXX() print functions. + * @cmd_ret_type: Type of the command which triggered the return code. + * @rx_cmd_ret_u8: Command error code as returned by the ES58X device. + * + * Handles the 8 bits command return code. Those are specific to the + * ES581.4 device. The return value will eventually be used by + * es58x_handle_urb_cmd() function which will take proper actions in + * case of critical issues such and memory errors or bad CRC values. + * + * In contrast with es58x_rx_cmd_ret_u32(), the network device is + * unknown. + * + * Return: zero on success, return errno when any error occurs. + */ +int es58x_rx_cmd_ret_u8(struct device *dev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u8 rx_cmd_ret_u8) +{ + const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type); + + switch (rx_cmd_ret_u8) { + case ES58X_RET_U8_OK: + dev_dbg_ratelimited(dev, "%s: OK\n", ret_desc); + return 0; + + case ES58X_RET_U8_ERR_UNSPECIFIED_FAILURE: + dev_err(dev, "%s: unspecified failure\n", ret_desc); + return -EBADMSG; + + case ES58X_RET_U8_ERR_NO_MEM: + dev_err(dev, "%s: device ran out of memory\n", ret_desc); + return -ENOMEM; + + case ES58X_RET_U8_ERR_BAD_CRC: + dev_err(dev, "%s: CRC of previous command is incorrect\n", + ret_desc); + return -EIO; + + default: + dev_err(dev, "%s: returned unknown value: 0x%02X\n", + ret_desc, rx_cmd_ret_u8); + return -EBADMSG; + } +} + +/** + * es58x_rx_cmd_ret_u32() - Handle the command return code received + * from the ES58X device. + * @netdev: CAN network device. + * @cmd_ret_type: Type of the command which triggered the return code. + * @rx_cmd_ret_u32: error code as returned by the ES58X device. + * + * Handles the 32 bits command return code. The return value will + * eventually be used by es58x_handle_urb_cmd() function which will + * take proper actions in case of critical issues such and memory + * errors or bad CRC values. + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_rx_cmd_ret_u32(struct net_device *netdev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u32 rx_cmd_ret_u32) +{ + struct es58x_priv *priv = es58x_priv(netdev); + const struct es58x_operators *ops = priv->es58x_dev->ops; + const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type); + + switch (rx_cmd_ret_u32) { + case ES58X_RET_U32_OK: + switch (cmd_ret_type) { + case ES58X_RET_TYPE_ENABLE_CHANNEL: + es58x_can_reset_echo_fifo(netdev); + priv->can.state = CAN_STATE_ERROR_ACTIVE; + netif_wake_queue(netdev); + netdev_info(netdev, + "%s: %s (Serial Number %s): CAN%d channel becomes ready\n", + ret_desc, priv->es58x_dev->udev->product, + priv->es58x_dev->udev->serial, + priv->channel_idx + 1); + break; + + case ES58X_RET_TYPE_TX_MSG: + if (IS_ENABLED(CONFIG_VERBOSE_DEBUG) && net_ratelimit()) + netdev_vdbg(netdev, "%s: OK\n", ret_desc); + break; + + default: + netdev_dbg(netdev, "%s: OK\n", ret_desc); + break; + } + return 0; + + case ES58X_RET_U32_ERR_UNSPECIFIED_FAILURE: + if (cmd_ret_type == ES58X_RET_TYPE_ENABLE_CHANNEL) { + int ret; + + netdev_warn(netdev, + "%s: channel is already opened, closing and re-openning it to reflect new configuration\n", + ret_desc); + ret = ops->disable_channel(es58x_priv(netdev)); + if (ret) + return ret; + return ops->enable_channel(es58x_priv(netdev)); + } + if (cmd_ret_type == ES58X_RET_TYPE_DISABLE_CHANNEL) { + netdev_info(netdev, + "%s: channel is already closed\n", ret_desc); + return 0; + } + netdev_err(netdev, + "%s: unspecified failure\n", ret_desc); + return -EBADMSG; + + case ES58X_RET_U32_ERR_NO_MEM: + netdev_err(netdev, "%s: device ran out of memory\n", ret_desc); + return -ENOMEM; + + case ES58X_RET_U32_WARN_PARAM_ADJUSTED: + netdev_warn(netdev, + "%s: some incompatible parameters have been adjusted\n", + ret_desc); + return 0; + + case ES58X_RET_U32_WARN_TX_MAYBE_REORDER: + netdev_warn(netdev, + "%s: TX messages might have been reordered\n", + ret_desc); + return 0; + + case ES58X_RET_U32_ERR_TIMEDOUT: + netdev_err(netdev, "%s: command timed out\n", ret_desc); + return -ETIMEDOUT; + + case ES58X_RET_U32_ERR_FIFO_FULL: + netdev_warn(netdev, "%s: fifo is full\n", ret_desc); + return 0; + + case ES58X_RET_U32_ERR_BAD_CONFIG: + netdev_err(netdev, "%s: bad configuration\n", ret_desc); + return -EINVAL; + + case ES58X_RET_U32_ERR_NO_RESOURCE: + netdev_err(netdev, "%s: no resource available\n", ret_desc); + return -EBUSY; + + default: + netdev_err(netdev, "%s returned unknown value: 0x%08X\n", + ret_desc, rx_cmd_ret_u32); + return -EBADMSG; + } +} + +/** + * es58x_increment_rx_errors() - Increment the network devices' error + * count. + * @es58x_dev: ES58X device. + * + * If an error occurs on the early stages on receiving an URB command, + * we might not be able to figure out on which network device the + * error occurred. In such case, we arbitrarily increment the error + * count of all the network devices attached to our ES58X device. + */ +static void es58x_increment_rx_errors(struct es58x_device *es58x_dev) +{ + int i; + + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + es58x_dev->netdev[i]->stats.rx_errors++; +} + +/** + * es58x_handle_urb_cmd() - Handle the URB command + * @es58x_dev: ES58X device. + * @urb_cmd: The URB command received from the ES58X device, might not + * be aligned. + * + * Sends the URB command to the device specific function. Manages the + * errors thrown back by those functions. + */ +static void es58x_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es58x_operators *ops = es58x_dev->ops; + size_t cmd_len; + int i, ret; + + ret = ops->handle_urb_cmd(es58x_dev, urb_cmd); + switch (ret) { + case 0: /* OK */ + return; + + case -ENODEV: + dev_err_ratelimited(es58x_dev->dev, "Device is not ready\n"); + break; + + case -EINVAL: + case -EMSGSIZE: + case -EBADRQC: + case -EBADMSG: + case -ECHRNG: + case -ETIMEDOUT: + cmd_len = es58x_get_urb_cmd_len(es58x_dev, + ops->get_msg_len(urb_cmd)); + dev_err(es58x_dev->dev, + "ops->handle_urb_cmd() returned error %pe", + ERR_PTR(ret)); + es58x_print_hex_dump(urb_cmd, cmd_len); + break; + + case -EFAULT: + case -ENOMEM: + case -EIO: + default: + dev_crit(es58x_dev->dev, + "ops->handle_urb_cmd() returned error %pe, detaching all network devices\n", + ERR_PTR(ret)); + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + netif_device_detach(es58x_dev->netdev[i]); + if (es58x_dev->ops->reset_device) + es58x_dev->ops->reset_device(es58x_dev); + break; + } + + /* Because the urb command could not fully be parsed, + * channel_id is not confirmed. Incrementing rx_errors count + * of all channels. + */ + es58x_increment_rx_errors(es58x_dev); +} + +/** + * es58x_check_rx_urb() - Check the length and format of the URB command. + * @es58x_dev: ES58X device. + * @urb_cmd: The URB command received from the ES58X device, might not + * be aligned. + * @urb_actual_len: The actual length of the URB command. + * + * Check if the first message of the received urb is valid, that is to + * say that both the header and the length are coherent. + * + * Return: + * the length of the first message of the URB on success. + * + * -ENODATA if the URB command is incomplete (in which case, the URB + * command should be buffered and combined with the next URB to try to + * reconstitute the URB command). + * + * -EOVERFLOW if the length is bigger than the maximum expected one. + * + * -EBADRQC if the start of frame does not match the expected value. + */ +static signed int es58x_check_rx_urb(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd, + u32 urb_actual_len) +{ + const struct device *dev = es58x_dev->dev; + const struct es58x_parameters *param = es58x_dev->param; + u16 sof, msg_len; + signed int urb_cmd_len, ret; + + if (urb_actual_len < param->urb_cmd_header_len) { + dev_vdbg(dev, + "%s: Received %d bytes [%*ph]: header incomplete\n", + __func__, urb_actual_len, urb_actual_len, + urb_cmd->raw_cmd); + return -ENODATA; + } + + sof = get_unaligned_le16(&urb_cmd->sof); + if (sof != param->rx_start_of_frame) { + dev_err_ratelimited(es58x_dev->dev, + "%s: Expected sequence 0x%04X for start of frame but got 0x%04X.\n", + __func__, param->rx_start_of_frame, sof); + return -EBADRQC; + } + + msg_len = es58x_dev->ops->get_msg_len(urb_cmd); + urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len); + if (urb_cmd_len > param->rx_urb_cmd_max_len) { + dev_err_ratelimited(es58x_dev->dev, + "%s: Biggest expected size for rx urb_cmd is %u but receive a command of size %d\n", + __func__, + param->rx_urb_cmd_max_len, urb_cmd_len); + return -EOVERFLOW; + } else if (urb_actual_len < urb_cmd_len) { + dev_vdbg(dev, "%s: Received %02d/%02d bytes\n", + __func__, urb_actual_len, urb_cmd_len); + return -ENODATA; + } + + ret = es58x_check_crc(es58x_dev, urb_cmd, urb_cmd_len); + if (ret) + return ret; + + return urb_cmd_len; +} + +/** + * es58x_copy_to_cmd_buf() - Copy an array to the URB command buffer. + * @es58x_dev: ES58X device. + * @raw_cmd: the buffer we want to copy. + * @raw_cmd_len: length of @raw_cmd. + * + * Concatenates @raw_cmd_len bytes of @raw_cmd to the end of the URB + * command buffer. + * + * Return: zero on success, -EMSGSIZE if not enough space is available + * to do the copy. + */ +static int es58x_copy_to_cmd_buf(struct es58x_device *es58x_dev, + u8 *raw_cmd, int raw_cmd_len) +{ + if (es58x_dev->rx_cmd_buf_len + raw_cmd_len > + es58x_dev->param->rx_urb_cmd_max_len) + return -EMSGSIZE; + + memcpy(&es58x_dev->rx_cmd_buf.raw_cmd[es58x_dev->rx_cmd_buf_len], + raw_cmd, raw_cmd_len); + es58x_dev->rx_cmd_buf_len += raw_cmd_len; + + return 0; +} + +/** + * es58x_split_urb_try_recovery() - Try to recover bad URB sequences. + * @es58x_dev: ES58X device. + * @raw_cmd: pointer to the buffer we want to copy. + * @raw_cmd_len: length of @raw_cmd. + * + * Under some rare conditions, we might get incorrect URBs from the + * device. From our observations, one of the valid URB gets replaced + * by one from the past. The full root cause is not identified. + * + * This function looks for the next start of frame in the urb buffer + * in order to try to recover. + * + * Such behavior was not observed on the devices of the ES58X FD + * family and only seems to impact the ES581.4. + * + * Return: the number of bytes dropped on success, -EBADMSG if recovery failed. + */ +static int es58x_split_urb_try_recovery(struct es58x_device *es58x_dev, + u8 *raw_cmd, size_t raw_cmd_len) +{ + union es58x_urb_cmd *urb_cmd; + signed int urb_cmd_len; + u16 sof; + int dropped_bytes = 0; + + es58x_increment_rx_errors(es58x_dev); + + while (raw_cmd_len > sizeof(sof)) { + urb_cmd = (union es58x_urb_cmd *)raw_cmd; + sof = get_unaligned_le16(&urb_cmd->sof); + + if (sof == es58x_dev->param->rx_start_of_frame) { + urb_cmd_len = es58x_check_rx_urb(es58x_dev, + urb_cmd, raw_cmd_len); + if ((urb_cmd_len == -ENODATA) || urb_cmd_len > 0) { + dev_info_ratelimited(es58x_dev->dev, + "Recovery successful! Dropped %d bytes (urb_cmd_len: %d)\n", + dropped_bytes, + urb_cmd_len); + return dropped_bytes; + } + } + raw_cmd++; + raw_cmd_len--; + dropped_bytes++; + } + + dev_warn_ratelimited(es58x_dev->dev, "%s: Recovery failed\n", __func__); + return -EBADMSG; +} + +/** + * es58x_handle_incomplete_cmd() - Reconstitute an URB command from + * different URB pieces. + * @es58x_dev: ES58X device. + * @urb: last urb buffer received. + * + * The device might split the URB commands in an arbitrary amount of + * pieces. This function concatenates those in an URB buffer until a + * full URB command is reconstituted and consume it. + * + * Return: + * number of bytes consumed from @urb if successful. + * + * -ENODATA if the URB command is still incomplete. + * + * -EBADMSG if the URB command is incorrect. + */ +static signed int es58x_handle_incomplete_cmd(struct es58x_device *es58x_dev, + struct urb *urb) +{ + size_t cpy_len; + signed int urb_cmd_len, tmp_cmd_buf_len, ret; + + tmp_cmd_buf_len = es58x_dev->rx_cmd_buf_len; + cpy_len = min_t(int, es58x_dev->param->rx_urb_cmd_max_len - + es58x_dev->rx_cmd_buf_len, urb->actual_length); + ret = es58x_copy_to_cmd_buf(es58x_dev, urb->transfer_buffer, cpy_len); + if (ret < 0) + return ret; + + urb_cmd_len = es58x_check_rx_urb(es58x_dev, &es58x_dev->rx_cmd_buf, + es58x_dev->rx_cmd_buf_len); + if (urb_cmd_len == -ENODATA) { + return -ENODATA; + } else if (urb_cmd_len < 0) { + dev_err_ratelimited(es58x_dev->dev, + "Could not reconstitute incomplete command from previous URB, dropping %d bytes\n", + tmp_cmd_buf_len + urb->actual_length); + dev_err_ratelimited(es58x_dev->dev, + "Error code: %pe, es58x_dev->rx_cmd_buf_len: %d, urb->actual_length: %u\n", + ERR_PTR(urb_cmd_len), + tmp_cmd_buf_len, urb->actual_length); + es58x_print_hex_dump(&es58x_dev->rx_cmd_buf, tmp_cmd_buf_len); + es58x_print_hex_dump(urb->transfer_buffer, urb->actual_length); + return urb->actual_length; + } + + es58x_handle_urb_cmd(es58x_dev, &es58x_dev->rx_cmd_buf); + return urb_cmd_len - tmp_cmd_buf_len; /* consumed length */ +} + +/** + * es58x_split_urb() - Cut the received URB in individual URB commands. + * @es58x_dev: ES58X device. + * @urb: last urb buffer received. + * + * The device might send urb in bulk format (i.e. several URB commands + * concatenated together). This function will split all the commands + * contained in the urb. + * + * Return: + * number of bytes consumed from @urb if successful. + * + * -ENODATA if the URB command is incomplete. + * + * -EBADMSG if the URB command is incorrect. + */ +static signed int es58x_split_urb(struct es58x_device *es58x_dev, + struct urb *urb) +{ + union es58x_urb_cmd *urb_cmd; + u8 *raw_cmd = urb->transfer_buffer; + s32 raw_cmd_len = urb->actual_length; + int ret; + + if (es58x_dev->rx_cmd_buf_len != 0) { + ret = es58x_handle_incomplete_cmd(es58x_dev, urb); + if (ret != -ENODATA) + es58x_dev->rx_cmd_buf_len = 0; + if (ret < 0) + return ret; + + raw_cmd += ret; + raw_cmd_len -= ret; + } + + while (raw_cmd_len > 0) { + if (raw_cmd[0] == ES58X_HEARTBEAT) { + raw_cmd++; + raw_cmd_len--; + continue; + } + urb_cmd = (union es58x_urb_cmd *)raw_cmd; + ret = es58x_check_rx_urb(es58x_dev, urb_cmd, raw_cmd_len); + if (ret > 0) { + es58x_handle_urb_cmd(es58x_dev, urb_cmd); + } else if (ret == -ENODATA) { + es58x_copy_to_cmd_buf(es58x_dev, raw_cmd, raw_cmd_len); + return -ENODATA; + } else if (ret < 0) { + ret = es58x_split_urb_try_recovery(es58x_dev, raw_cmd, + raw_cmd_len); + if (ret < 0) + return ret; + } + raw_cmd += ret; + raw_cmd_len -= ret; + } + + return 0; +} + +/** + * es58x_read_bulk_callback() - Callback for reading data from device. + * @urb: last urb buffer received. + * + * This function gets eventually called each time an URB is received + * from the ES58X device. + * + * Checks urb status, calls read function and resubmits urb read + * operation. + */ +static void es58x_read_bulk_callback(struct urb *urb) +{ + struct es58x_device *es58x_dev = urb->context; + const struct device *dev = es58x_dev->dev; + int i, ret; + + switch (urb->status) { + case 0: /* success */ + break; + + case -EOVERFLOW: + dev_err_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + es58x_print_hex_dump_debug(urb->transfer_buffer, + urb->transfer_buffer_length); + goto resubmit_urb; + + case -EPROTO: + dev_warn_ratelimited(dev, "%s: error %pe. Device unplugged?\n", + __func__, ERR_PTR(urb->status)); + goto free_urb; + + case -ENOENT: + case -EPIPE: + dev_err_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + goto free_urb; + + case -ESHUTDOWN: + dev_dbg_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + goto free_urb; + + default: + dev_err_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + goto resubmit_urb; + } + + ret = es58x_split_urb(es58x_dev, urb); + if ((ret != -ENODATA) && ret < 0) { + dev_err(es58x_dev->dev, "es58x_split_urb() returned error %pe", + ERR_PTR(ret)); + es58x_print_hex_dump_debug(urb->transfer_buffer, + urb->actual_length); + + /* Because the urb command could not be parsed, + * channel_id is not confirmed. Incrementing rx_errors + * count of all channels. + */ + es58x_increment_rx_errors(es58x_dev); + } + + resubmit_urb: + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe, + urb->transfer_buffer, urb->transfer_buffer_length, + es58x_read_bulk_callback, es58x_dev); + + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret == -ENODEV) { + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + netif_device_detach(es58x_dev->netdev[i]); + } else if (ret) + dev_err_ratelimited(dev, + "Failed resubmitting read bulk urb: %pe\n", + ERR_PTR(ret)); + return; + + free_urb: + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); +} + +/** + * es58x_write_bulk_callback() - Callback after writing data to the device. + * @urb: urb buffer which was previously submitted. + * + * This function gets eventually called each time an URB was sent to + * the ES58X device. + * + * Puts the @urb back to the urbs idle anchor and tries to restart the + * network queue. + */ +static void es58x_write_bulk_callback(struct urb *urb) +{ + struct net_device *netdev = urb->context; + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + + switch (urb->status) { + case 0: /* success */ + break; + + case -EOVERFLOW: + if (net_ratelimit()) + netdev_err(netdev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + es58x_print_hex_dump(urb->transfer_buffer, + urb->transfer_buffer_length); + break; + + case -ENOENT: + if (net_ratelimit()) + netdev_dbg(netdev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + usb_free_coherent(urb->dev, + es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + return; + + default: + if (net_ratelimit()) + netdev_info(netdev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + break; + } + + usb_anchor_urb(urb, &es58x_dev->tx_urbs_idle); + atomic_inc(&es58x_dev->tx_urbs_idle_cnt); +} + +/** + * es58x_alloc_urb() - Allocate memory for an URB and its transfer + * buffer. + * @es58x_dev: ES58X device. + * @urb: URB to be allocated. + * @buf: used to return DMA address of buffer. + * @buf_len: requested buffer size. + * @mem_flags: affect whether allocation may block. + * + * Allocates an URB and its @transfer_buffer and set its @transfer_dma + * address. + * + * This function is used at start-up to allocate all RX URBs at once + * and during run time for TX URBs. + * + * Return: zero on success, -ENOMEM if no memory is available. + */ +static int es58x_alloc_urb(struct es58x_device *es58x_dev, struct urb **urb, + u8 **buf, size_t buf_len, gfp_t mem_flags) +{ + *urb = usb_alloc_urb(0, mem_flags); + if (!*urb) { + dev_err(es58x_dev->dev, "No memory left for URBs\n"); + return -ENOMEM; + } + + *buf = usb_alloc_coherent(es58x_dev->udev, buf_len, + mem_flags, &(*urb)->transfer_dma); + if (!*buf) { + dev_err(es58x_dev->dev, "No memory left for USB buffer\n"); + usb_free_urb(*urb); + return -ENOMEM; + } + + (*urb)->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + return 0; +} + +/** + * es58x_get_tx_urb() - Get an URB for transmission. + * @es58x_dev: ES58X device. + * + * Gets an URB from the idle urbs anchor or allocate a new one if the + * anchor is empty. + * + * If there are more than ES58X_TX_URBS_MAX in the idle anchor, do + * some garbage collection. The garbage collection is done here + * instead of within es58x_write_bulk_callback() because + * usb_free_coherent() should not be used in IRQ context: + * c.f. WARN_ON(irqs_disabled()) in dma_free_attrs(). + * + * Return: a pointer to an URB on success, NULL if no memory is + * available. + */ +static struct urb *es58x_get_tx_urb(struct es58x_device *es58x_dev) +{ + atomic_t *idle_cnt = &es58x_dev->tx_urbs_idle_cnt; + struct urb *urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle); + + if (!urb) { + size_t tx_buf_len; + u8 *buf; + + tx_buf_len = es58x_dev->param->tx_urb_cmd_max_len; + if (es58x_alloc_urb(es58x_dev, &urb, &buf, tx_buf_len, + GFP_ATOMIC)) + return NULL; + + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe, + buf, tx_buf_len, NULL, NULL); + return urb; + } + + while (atomic_dec_return(idle_cnt) > ES58X_TX_URBS_MAX) { + /* Garbage collector */ + struct urb *tmp = usb_get_from_anchor(&es58x_dev->tx_urbs_idle); + + if (!tmp) + break; + usb_free_coherent(tmp->dev, + es58x_dev->param->tx_urb_cmd_max_len, + tmp->transfer_buffer, tmp->transfer_dma); + usb_free_urb(tmp); + } + + return urb; +} + +/** + * es58x_submit_urb() - Send data to the device. + * @es58x_dev: ES58X device. + * @urb: URB to be sent. + * @netdev: CAN network device. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_submit_urb(struct es58x_device *es58x_dev, struct urb *urb, + struct net_device *netdev) +{ + int ret; + + es58x_set_crc(urb->transfer_buffer, urb->transfer_buffer_length); + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe, + urb->transfer_buffer, urb->transfer_buffer_length, + es58x_write_bulk_callback, netdev); + usb_anchor_urb(urb, &es58x_dev->tx_urbs_busy); + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret) { + netdev_err(netdev, "%s: USB send urb failure: %pe\n", + __func__, ERR_PTR(ret)); + usb_unanchor_urb(urb); + usb_free_coherent(urb->dev, + es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + } + usb_free_urb(urb); + + return ret; +} + +/** + * es58x_send_msg() - Prepare an URB and submit it. + * @es58x_dev: ES58X device. + * @cmd_type: Command type. + * @cmd_id: Command ID. + * @msg: ES58X message to be sent. + * @msg_len: Length of @msg. + * @channel_idx: Index of the network device. + * + * Creates an URB command from a given message, sets the header and the + * CRC and then submits it. + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id, + const void *msg, u16 msg_len, int channel_idx) +{ + struct net_device *netdev; + union es58x_urb_cmd *urb_cmd; + struct urb *urb; + int urb_cmd_len; + + if (channel_idx == ES58X_CHANNEL_IDX_NA) + netdev = es58x_dev->netdev[0]; /* Default to first channel */ + else + netdev = es58x_dev->netdev[channel_idx]; + + urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len); + if (urb_cmd_len > es58x_dev->param->tx_urb_cmd_max_len) + return -EOVERFLOW; + + urb = es58x_get_tx_urb(es58x_dev); + if (!urb) + return -ENOMEM; + + urb_cmd = urb->transfer_buffer; + es58x_dev->ops->fill_urb_header(urb_cmd, cmd_type, cmd_id, + channel_idx, msg_len); + memcpy(&urb_cmd->raw_cmd[es58x_dev->param->urb_cmd_header_len], + msg, msg_len); + urb->transfer_buffer_length = urb_cmd_len; + + return es58x_submit_urb(es58x_dev, urb, netdev); +} + +/** + * es58x_alloc_rx_urbs() - Allocate RX URBs. + * @es58x_dev: ES58X device. + * + * Allocate URBs for reception and anchor them. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev) +{ + const struct device *dev = es58x_dev->dev; + const struct es58x_parameters *param = es58x_dev->param; + size_t rx_buf_len = es58x_dev->rx_max_packet_size; + struct urb *urb; + u8 *buf; + int i; + int ret = -EINVAL; + + for (i = 0; i < param->rx_urb_max; i++) { + ret = es58x_alloc_urb(es58x_dev, &urb, &buf, rx_buf_len, + GFP_KERNEL); + if (ret) + break; + + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe, + buf, rx_buf_len, es58x_read_bulk_callback, + es58x_dev); + usb_anchor_urb(urb, &es58x_dev->rx_urbs); + + ret = usb_submit_urb(urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(urb); + usb_free_coherent(es58x_dev->udev, rx_buf_len, + buf, urb->transfer_dma); + usb_free_urb(urb); + break; + } + usb_free_urb(urb); + } + + if (i == 0) { + dev_err(dev, "%s: Could not setup any rx URBs\n", __func__); + return ret; + } + dev_dbg(dev, "%s: Allocated %d rx URBs each of size %zu\n", + __func__, i, rx_buf_len); + + return ret; +} + +/** + * es58x_free_urbs() - Free all the TX and RX URBs. + * @es58x_dev: ES58X device. + */ +static void es58x_free_urbs(struct es58x_device *es58x_dev) +{ + struct urb *urb; + + if (!usb_wait_anchor_empty_timeout(&es58x_dev->tx_urbs_busy, 1000)) { + dev_err(es58x_dev->dev, "%s: Timeout, some TX urbs still remain\n", + __func__); + usb_kill_anchored_urbs(&es58x_dev->tx_urbs_busy); + } + + while ((urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle)) != NULL) { + usb_free_coherent(urb->dev, es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); + atomic_dec(&es58x_dev->tx_urbs_idle_cnt); + } + if (atomic_read(&es58x_dev->tx_urbs_idle_cnt)) + dev_err(es58x_dev->dev, + "All idle urbs were freed but tx_urb_idle_cnt is %d\n", + atomic_read(&es58x_dev->tx_urbs_idle_cnt)); + + usb_kill_anchored_urbs(&es58x_dev->rx_urbs); +} + +/** + * es58x_open() - Enable the network device. + * @netdev: CAN network device. + * + * Called when the network transitions to the up state. Allocate the + * URB resources if needed and open the channel. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_open(struct net_device *netdev) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + int ret; + + if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) { + ret = es58x_alloc_rx_urbs(es58x_dev); + if (ret) + return ret; + + ret = es58x_set_realtime_diff_ns(es58x_dev); + if (ret) + goto free_urbs; + } + + ret = open_candev(netdev); + if (ret) + goto free_urbs; + + ret = es58x_dev->ops->enable_channel(es58x_priv(netdev)); + if (ret) + goto free_urbs; + + netif_start_queue(netdev); + + return ret; + + free_urbs: + if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + es58x_free_urbs(es58x_dev); + netdev_err(netdev, "%s: Could not open the network device: %pe\n", + __func__, ERR_PTR(ret)); + + return ret; +} + +/** + * es58x_stop() - Disable the network device. + * @netdev: CAN network device. + * + * Called when the network transitions to the down state. If all the + * channels of the device are closed, free the URB resources which are + * not needed anymore. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_stop(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + int ret; + + netif_stop_queue(netdev); + ret = es58x_dev->ops->disable_channel(priv); + if (ret) + return ret; + + priv->can.state = CAN_STATE_STOPPED; + es58x_can_reset_echo_fifo(netdev); + close_candev(netdev); + + es58x_flush_pending_tx_msg(netdev); + + if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + es58x_free_urbs(es58x_dev); + + return 0; +} + +/** + * es58x_xmit_commit() - Send the bulk urb. + * @netdev: CAN network device. + * + * Do the bulk send. This function should be called only once by bulk + * transmission. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_xmit_commit(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + int ret; + + if (!es58x_is_can_state_active(netdev)) + return -ENETDOWN; + + if (es58x_is_echo_skb_threshold_reached(priv)) + netif_stop_queue(netdev); + + ret = es58x_submit_urb(priv->es58x_dev, priv->tx_urb, netdev); + if (ret == 0) + priv->tx_urb = NULL; + + return ret; +} + +/** + * es58x_xmit_more() - Can we put more packets? + * @priv: ES58X private parameters related to the network device. + * + * Return: true if we can put more, false if it is time to send. + */ +static bool es58x_xmit_more(struct es58x_priv *priv) +{ + unsigned int free_slots = + priv->can.echo_skb_max - (priv->tx_head - priv->tx_tail); + + return netdev_xmit_more() && free_slots > 0 && + priv->tx_can_msg_cnt < priv->es58x_dev->param->tx_bulk_max; +} + +/** + * es58x_start_xmit() - Transmit an skb. + * @skb: socket buffer of a CAN message. + * @netdev: CAN network device. + * + * Called when a packet needs to be transmitted. + * + * This function relies on Byte Queue Limits (BQL). The main benefit + * is to increase the throughput by allowing bulk transfers + * (c.f. xmit_more flag). + * + * Queues up to tx_bulk_max messages in &tx_urb buffer and does + * a bulk send of all messages in one single URB. + * + * Return: NETDEV_TX_OK regardless of if we could transmit the @skb or + * had to drop it. + */ +static netdev_tx_t es58x_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + unsigned int frame_len; + int ret; + + if (can_dropped_invalid_skb(netdev, skb)) { + if (priv->tx_urb) + goto xmit_commit; + return NETDEV_TX_OK; + } + + if (priv->tx_urb && priv->tx_can_msg_is_fd != can_is_canfd_skb(skb)) { + /* Can not do bulk send with mixed CAN and CAN FD frames. */ + ret = es58x_xmit_commit(netdev); + if (ret) + goto drop_skb; + } + + if (!priv->tx_urb) { + priv->tx_urb = es58x_get_tx_urb(es58x_dev); + if (!priv->tx_urb) { + ret = -ENOMEM; + goto drop_skb; + } + priv->tx_can_msg_cnt = 0; + priv->tx_can_msg_is_fd = can_is_canfd_skb(skb); + } + + ret = es58x_dev->ops->tx_can_msg(priv, skb); + if (ret) + goto drop_skb; + + frame_len = can_skb_get_frame_len(skb); + ret = can_put_echo_skb(skb, netdev, + priv->tx_head & es58x_dev->param->fifo_mask, + frame_len); + if (ret) + goto xmit_failure; + netdev_sent_queue(netdev, frame_len); + + priv->tx_head++; + priv->tx_can_msg_cnt++; + + xmit_commit: + if (!es58x_xmit_more(priv)) { + ret = es58x_xmit_commit(netdev); + if (ret) + goto xmit_failure; + } + + return NETDEV_TX_OK; + + drop_skb: + dev_kfree_skb(skb); + netdev->stats.tx_dropped++; + xmit_failure: + netdev_warn(netdev, "%s: send message failure: %pe\n", + __func__, ERR_PTR(ret)); + netdev->stats.tx_errors++; + es58x_flush_pending_tx_msg(netdev); + return NETDEV_TX_OK; +} + +static const struct net_device_ops es58x_netdev_ops = { + .ndo_open = es58x_open, + .ndo_stop = es58x_stop, + .ndo_start_xmit = es58x_start_xmit +}; + +/** + * es58x_set_mode() - Change network device mode. + * @netdev: CAN network device. + * @mode: either %CAN_MODE_START, %CAN_MODE_STOP or %CAN_MODE_SLEEP + * + * Currently, this function is only used to stop and restart the + * channel during a bus off event (c.f. es58x_rx_err_msg() and + * drivers/net/can/dev.c:can_restart() which are the two only + * callers). + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_set_mode(struct net_device *netdev, enum can_mode mode) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + switch (mode) { + case CAN_MODE_START: + switch (priv->can.state) { + case CAN_STATE_BUS_OFF: + return priv->es58x_dev->ops->enable_channel(priv); + + case CAN_STATE_STOPPED: + return es58x_open(netdev); + + case CAN_STATE_ERROR_ACTIVE: + case CAN_STATE_ERROR_WARNING: + case CAN_STATE_ERROR_PASSIVE: + default: + return 0; + } + + case CAN_MODE_STOP: + switch (priv->can.state) { + case CAN_STATE_STOPPED: + return 0; + + case CAN_STATE_ERROR_ACTIVE: + case CAN_STATE_ERROR_WARNING: + case CAN_STATE_ERROR_PASSIVE: + case CAN_STATE_BUS_OFF: + default: + return priv->es58x_dev->ops->disable_channel(priv); + } + + case CAN_MODE_SLEEP: + default: + return -EOPNOTSUPP; + } +} + +/** + * es58x_init_priv() - Initialize private parameters. + * @es58x_dev: ES58X device. + * @priv: ES58X private parameters related to the network device. + * @channel_idx: Index of the network device. + */ +static void es58x_init_priv(struct es58x_device *es58x_dev, + struct es58x_priv *priv, int channel_idx) +{ + const struct es58x_parameters *param = es58x_dev->param; + struct can_priv *can = &priv->can; + + priv->es58x_dev = es58x_dev; + priv->channel_idx = channel_idx; + priv->tx_urb = NULL; + priv->tx_can_msg_cnt = 0; + + can->bittiming_const = param->bittiming_const; + if (param->ctrlmode_supported & CAN_CTRLMODE_FD) { + can->data_bittiming_const = param->data_bittiming_const; + can->tdc_const = param->tdc_const; + } + can->bitrate_max = param->bitrate_max; + can->clock = param->clock; + can->state = CAN_STATE_STOPPED; + can->ctrlmode_supported = param->ctrlmode_supported; + can->do_set_mode = es58x_set_mode; +} + +/** + * es58x_init_netdev() - Initialize the network device. + * @es58x_dev: ES58X device. + * @channel_idx: Index of the network device. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx) +{ + struct net_device *netdev; + struct device *dev = es58x_dev->dev; + int ret; + + netdev = alloc_candev(sizeof(struct es58x_priv), + es58x_dev->param->fifo_mask + 1); + if (!netdev) { + dev_err(dev, "Could not allocate candev\n"); + return -ENOMEM; + } + SET_NETDEV_DEV(netdev, dev); + es58x_dev->netdev[channel_idx] = netdev; + es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx); + + netdev->netdev_ops = &es58x_netdev_ops; + netdev->flags |= IFF_ECHO; /* We support local echo */ + + ret = register_candev(netdev); + if (ret) + return ret; + + netdev_queue_set_dql_min_limit(netdev_get_tx_queue(netdev, 0), + es58x_dev->param->dql_min_limit); + + return ret; +} + +/** + * es58x_get_product_info() - Get the product information and print them. + * @es58x_dev: ES58X device. + * + * Do a synchronous call to get the product information. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_get_product_info(struct es58x_device *es58x_dev) +{ + struct usb_device *udev = es58x_dev->udev; + const int es58x_prod_info_idx = 6; + /* Empirical tests show a prod_info length of maximum 83, + * below should be more than enough. + */ + const size_t prod_info_len = 127; + char *prod_info; + int ret; + + prod_info = kmalloc(prod_info_len, GFP_KERNEL); + if (!prod_info) + return -ENOMEM; + + ret = usb_string(udev, es58x_prod_info_idx, prod_info, prod_info_len); + if (ret < 0) { + dev_err(es58x_dev->dev, + "%s: Could not read the product info: %pe\n", + __func__, ERR_PTR(ret)); + goto out_free; + } + if (ret >= prod_info_len - 1) { + dev_warn(es58x_dev->dev, + "%s: Buffer is too small, result might be truncated\n", + __func__); + } + dev_info(es58x_dev->dev, "Product info: %s\n", prod_info); + + out_free: + kfree(prod_info); + return ret < 0 ? ret : 0; +} + +/** + * es58x_init_es58x_dev() - Initialize the ES58X device. + * @intf: USB interface. + * @p_es58x_dev: pointer to the address of the ES58X device. + * @driver_info: Quirks of the device. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_init_es58x_dev(struct usb_interface *intf, + struct es58x_device **p_es58x_dev, + kernel_ulong_t driver_info) +{ + struct device *dev = &intf->dev; + struct es58x_device *es58x_dev; + const struct es58x_parameters *param; + const struct es58x_operators *ops; + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *ep_in, *ep_out; + int ret; + + dev_info(dev, + "Starting %s %s (Serial Number %s) driver version %s\n", + udev->manufacturer, udev->product, udev->serial, DRV_VERSION); + + ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out, + NULL, NULL); + if (ret) + return ret; + + if (driver_info & ES58X_FD_FAMILY) { + param = &es58x_fd_param; + ops = &es58x_fd_ops; + } else { + param = &es581_4_param; + ops = &es581_4_ops; + } + + es58x_dev = kzalloc(es58x_sizeof_es58x_device(param), GFP_KERNEL); + if (!es58x_dev) + return -ENOMEM; + + es58x_dev->param = param; + es58x_dev->ops = ops; + es58x_dev->dev = dev; + es58x_dev->udev = udev; + + if (driver_info & ES58X_DUAL_CHANNEL) + es58x_dev->num_can_ch = 2; + else + es58x_dev->num_can_ch = 1; + + init_usb_anchor(&es58x_dev->rx_urbs); + init_usb_anchor(&es58x_dev->tx_urbs_idle); + init_usb_anchor(&es58x_dev->tx_urbs_busy); + atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0); + atomic_set(&es58x_dev->opened_channel_cnt, 0); + usb_set_intfdata(intf, es58x_dev); + + es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev, + ep_in->bEndpointAddress); + es58x_dev->tx_pipe = usb_sndbulkpipe(es58x_dev->udev, + ep_out->bEndpointAddress); + es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wMaxPacketSize); + + *p_es58x_dev = es58x_dev; + + return 0; +} + +/** + * es58x_probe() - Initialize the USB device. + * @intf: USB interface. + * @id: USB device ID. + * + * Return: zero on success, -ENODEV if the interface is not supported + * or errno when any other error occurs. + */ +static int es58x_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct es58x_device *es58x_dev; + int ch_idx, ret; + + ret = es58x_init_es58x_dev(intf, &es58x_dev, id->driver_info); + if (ret) + return ret; + + ret = es58x_get_product_info(es58x_dev); + if (ret) + goto cleanup_es58x_dev; + + for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) { + ret = es58x_init_netdev(es58x_dev, ch_idx); + if (ret) + goto cleanup_candev; + } + + return ret; + + cleanup_candev: + for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) + if (es58x_dev->netdev[ch_idx]) { + unregister_candev(es58x_dev->netdev[ch_idx]); + free_candev(es58x_dev->netdev[ch_idx]); + } + cleanup_es58x_dev: + kfree(es58x_dev); + + return ret; +} + +/** + * es58x_disconnect() - Disconnect the USB device. + * @intf: USB interface + * + * Called by the usb core when driver is unloaded or device is + * removed. + */ +static void es58x_disconnect(struct usb_interface *intf) +{ + struct es58x_device *es58x_dev = usb_get_intfdata(intf); + struct net_device *netdev; + int i; + + dev_info(&intf->dev, "Disconnecting %s %s\n", + es58x_dev->udev->manufacturer, es58x_dev->udev->product); + + for (i = 0; i < es58x_dev->num_can_ch; i++) { + netdev = es58x_dev->netdev[i]; + if (!netdev) + continue; + unregister_candev(netdev); + es58x_dev->netdev[i] = NULL; + free_candev(netdev); + } + + es58x_free_urbs(es58x_dev); + + kfree(es58x_dev); + usb_set_intfdata(intf, NULL); +} + +static struct usb_driver es58x_driver = { + .name = ES58X_MODULE_NAME, + .probe = es58x_probe, + .disconnect = es58x_disconnect, + .id_table = es58x_id_table +}; + +module_usb_driver(es58x_driver); diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h new file mode 100644 index 000000000000..5f4e7dc5be35 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h @@ -0,0 +1,700 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_core.h: All common definitions and declarations. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __ES58X_COMMON_H__ +#define __ES58X_COMMON_H__ + +#include <linux/types.h> +#include <linux/usb.h> +#include <linux/netdevice.h> +#include <linux/can.h> +#include <linux/can/dev.h> + +#include "es581_4.h" +#include "es58x_fd.h" + +/* Driver constants */ +#define ES58X_RX_URBS_MAX 5 /* Empirical value */ +#define ES58X_TX_URBS_MAX 6 /* Empirical value */ + +#define ES58X_MAX(param) \ + (ES581_4_##param > ES58X_FD_##param ? \ + ES581_4_##param : ES58X_FD_##param) +#define ES58X_TX_BULK_MAX ES58X_MAX(TX_BULK_MAX) +#define ES58X_RX_BULK_MAX ES58X_MAX(RX_BULK_MAX) +#define ES58X_ECHO_BULK_MAX ES58X_MAX(ECHO_BULK_MAX) +#define ES58X_NUM_CAN_CH_MAX ES58X_MAX(NUM_CAN_CH) + +/* Use this when channel index is irrelevant (e.g. device + * timestamp). + */ +#define ES58X_CHANNEL_IDX_NA 0xFF +#define ES58X_EMPTY_MSG NULL + +/* Threshold on consecutive CAN_STATE_ERROR_PASSIVE. If we receive + * ES58X_CONSECUTIVE_ERR_PASSIVE_MAX times the event + * ES58X_ERR_CRTL_PASSIVE in a row without any successful RX or TX, + * we force the device to switch to CAN_STATE_BUS_OFF state. + */ +#define ES58X_CONSECUTIVE_ERR_PASSIVE_MAX 254 + +/* A magic number sent by the ES581.4 to inform it is alive. */ +#define ES58X_HEARTBEAT 0x11 + +/** + * enum es58x_driver_info - Quirks of the device. + * @ES58X_DUAL_CHANNEL: Device has two CAN channels. If this flag is + * not set, it is implied that the device has only one CAN + * channel. + * @ES58X_FD_FAMILY: Device is CAN-FD capable. If this flag is not + * set, the device only supports classical CAN. + */ +enum es58x_driver_info { + ES58X_DUAL_CHANNEL = BIT(0), + ES58X_FD_FAMILY = BIT(1) +}; + +enum es58x_echo { + ES58X_ECHO_OFF = 0, + ES58X_ECHO_ON = 1 +}; + +/** + * enum es58x_physical_layer - Type of the physical layer. + * @ES58X_PHYSICAL_LAYER_HIGH_SPEED: High-speed CAN (c.f. ISO + * 11898-2). + * + * Some products of the ETAS portfolio also support low-speed CAN + * (c.f. ISO 11898-3). However, all the devices in scope of this + * driver do not support the option, thus, the enum has only one + * member. + */ +enum es58x_physical_layer { + ES58X_PHYSICAL_LAYER_HIGH_SPEED = 1 +}; + +enum es58x_samples_per_bit { + ES58X_SAMPLES_PER_BIT_ONE = 1, + ES58X_SAMPLES_PER_BIT_THREE = 2 +}; + +/** + * enum es58x_sync_edge - Synchronization method. + * @ES58X_SYNC_EDGE_SINGLE: ISO CAN specification defines the use of a + * single edge synchronization. The synchronization should be + * done on recessive to dominant level change. + * + * For information, ES582.1 and ES584.1 also support a double + * synchronization, requiring both recessive to dominant then dominant + * to recessive level change. However, this is not supported in + * SocketCAN framework, thus, the enum has only one member. + */ +enum es58x_sync_edge { + ES58X_SYNC_EDGE_SINGLE = 1 +}; + +/** + * enum es58x_flag - CAN flags for RX/TX messages. + * @ES58X_FLAG_EFF: Extended Frame Format (EFF). + * @ES58X_FLAG_RTR: Remote Transmission Request (RTR). + * @ES58X_FLAG_FD_BRS: Bit rate switch (BRS): second bitrate for + * payload data. + * @ES58X_FLAG_FD_ESI: Error State Indicator (ESI): tell if the + * transmitting node is in error passive mode. + * @ES58X_FLAG_FD_DATA: CAN FD frame. + */ +enum es58x_flag { + ES58X_FLAG_EFF = BIT(0), + ES58X_FLAG_RTR = BIT(1), + ES58X_FLAG_FD_BRS = BIT(3), + ES58X_FLAG_FD_ESI = BIT(5), + ES58X_FLAG_FD_DATA = BIT(6) +}; + +/** + * enum es58x_err - CAN error detection. + * @ES58X_ERR_OK: No errors. + * @ES58X_ERR_PROT_STUFF: Bit stuffing error: more than 5 consecutive + * equal bits. + * @ES58X_ERR_PROT_FORM: Frame format error. + * @ES58X_ERR_ACK: Received no ACK on transmission. + * @ES58X_ERR_PROT_BIT: Single bit error. + * @ES58X_ERR_PROT_CRC: Incorrect 15, 17 or 21 bits CRC. + * @ES58X_ERR_PROT_BIT1: Unable to send recessive bit: tried to send + * recessive bit 1 but monitored dominant bit 0. + * @ES58X_ERR_PROT_BIT0: Unable to send dominant bit: tried to send + * dominant bit 0 but monitored recessive bit 1. + * @ES58X_ERR_PROT_OVERLOAD: Bus overload. + * @ES58X_ERR_PROT_UNSPEC: Unspecified. + * + * Please refer to ISO 11898-1:2015, section 10.11 "Error detection" + * and section 10.13 "Overload signaling" for additional details. + */ +enum es58x_err { + ES58X_ERR_OK = 0, + ES58X_ERR_PROT_STUFF = BIT(0), + ES58X_ERR_PROT_FORM = BIT(1), + ES58X_ERR_ACK = BIT(2), + ES58X_ERR_PROT_BIT = BIT(3), + ES58X_ERR_PROT_CRC = BIT(4), + ES58X_ERR_PROT_BIT1 = BIT(5), + ES58X_ERR_PROT_BIT0 = BIT(6), + ES58X_ERR_PROT_OVERLOAD = BIT(7), + ES58X_ERR_PROT_UNSPEC = BIT(31) +}; + +/** + * enum es58x_event - CAN error codes returned by the device. + * @ES58X_EVENT_OK: No errors. + * @ES58X_EVENT_CRTL_ACTIVE: Active state: both TR and RX error count + * is less than 128. + * @ES58X_EVENT_CRTL_PASSIVE: Passive state: either TX or RX error + * count is greater than 127. + * @ES58X_EVENT_CRTL_WARNING: Warning state: either TX or RX error + * count is greater than 96. + * @ES58X_EVENT_BUSOFF: Bus off. + * @ES58X_EVENT_SINGLE_WIRE: Lost connection on either CAN high or CAN + * low. + * + * Please refer to ISO 11898-1:2015, section 12.1.4 "Rules of fault + * confinement" for additional details. + */ +enum es58x_event { + ES58X_EVENT_OK = 0, + ES58X_EVENT_CRTL_ACTIVE = BIT(0), + ES58X_EVENT_CRTL_PASSIVE = BIT(1), + ES58X_EVENT_CRTL_WARNING = BIT(2), + ES58X_EVENT_BUSOFF = BIT(3), + ES58X_EVENT_SINGLE_WIRE = BIT(4) +}; + +/* enum es58x_ret_u8 - Device return error codes, 8 bit format. + * + * Specific to ES581.4. + */ +enum es58x_ret_u8 { + ES58X_RET_U8_OK = 0x00, + ES58X_RET_U8_ERR_UNSPECIFIED_FAILURE = 0x80, + ES58X_RET_U8_ERR_NO_MEM = 0x81, + ES58X_RET_U8_ERR_BAD_CRC = 0x99 +}; + +/* enum es58x_ret_u32 - Device return error codes, 32 bit format. + */ +enum es58x_ret_u32 { + ES58X_RET_U32_OK = 0x00000000UL, + ES58X_RET_U32_ERR_UNSPECIFIED_FAILURE = 0x80000000UL, + ES58X_RET_U32_ERR_NO_MEM = 0x80004001UL, + ES58X_RET_U32_WARN_PARAM_ADJUSTED = 0x40004000UL, + ES58X_RET_U32_WARN_TX_MAYBE_REORDER = 0x40004001UL, + ES58X_RET_U32_ERR_TIMEDOUT = 0x80000008UL, + ES58X_RET_U32_ERR_FIFO_FULL = 0x80003002UL, + ES58X_RET_U32_ERR_BAD_CONFIG = 0x80004000UL, + ES58X_RET_U32_ERR_NO_RESOURCE = 0x80004002UL +}; + +/* enum es58x_ret_type - Type of the command returned by the ES58X + * device. + */ +enum es58x_ret_type { + ES58X_RET_TYPE_SET_BITTIMING, + ES58X_RET_TYPE_ENABLE_CHANNEL, + ES58X_RET_TYPE_DISABLE_CHANNEL, + ES58X_RET_TYPE_TX_MSG, + ES58X_RET_TYPE_RESET_RX, + ES58X_RET_TYPE_RESET_TX, + ES58X_RET_TYPE_DEVICE_ERR +}; + +union es58x_urb_cmd { + struct es581_4_urb_cmd es581_4_urb_cmd; + struct es58x_fd_urb_cmd es58x_fd_urb_cmd; + struct { /* Common header parts of all variants */ + __le16 sof; + u8 cmd_type; + u8 cmd_id; + } __packed; + u8 raw_cmd[0]; +}; + +/** + * struct es58x_priv - All information specific to a CAN channel. + * @can: struct can_priv must be the first member (Socket CAN relies + * on the fact that function netdev_priv() returns a pointer to + * a struct can_priv). + * @es58x_dev: pointer to the corresponding ES58X device. + * @tx_urb: Used as a buffer to concatenate the TX messages and to do + * a bulk send. Please refer to es58x_start_xmit() for more + * details. + * @tx_tail: Index of the oldest packet still pending for + * completion. @tx_tail & echo_skb_mask represents the beginning + * of the echo skb FIFO, i.e. index of the first element. + * @tx_head: Index of the next packet to be sent to the + * device. @tx_head & echo_skb_mask represents the end of the + * echo skb FIFO plus one, i.e. the first free index. + * @tx_can_msg_cnt: Number of messages in @tx_urb. + * @tx_can_msg_is_fd: false: all messages in @tx_urb are Classical + * CAN, true: all messages in @tx_urb are CAN FD. Rationale: + * ES58X FD devices do not allow to mix Classical CAN and FD CAN + * frames in one single bulk transmission. + * @err_passive_before_rtx_success: The ES58X device might enter in a + * state in which it keeps alternating between error passive + * and active states. This counter keeps track of the number of + * error passive and if it gets bigger than + * ES58X_CONSECUTIVE_ERR_PASSIVE_MAX, es58x_rx_err_msg() will + * force the status to bus-off. + * @channel_idx: Channel index, starts at zero. + */ +struct es58x_priv { + struct can_priv can; + struct es58x_device *es58x_dev; + struct urb *tx_urb; + + u32 tx_tail; + u32 tx_head; + + u8 tx_can_msg_cnt; + bool tx_can_msg_is_fd; + + u8 err_passive_before_rtx_success; + + u8 channel_idx; +}; + +/** + * struct es58x_parameters - Constant parameters of a given hardware + * variant. + * @bittiming_const: Nominal bittimming constant parameters. + * @data_bittiming_const: Data bittiming constant parameters. + * @tdc_const: Transmission Delay Compensation constant parameters. + * @bitrate_max: Maximum bitrate supported by the device. + * @clock: CAN clock parameters. + * @ctrlmode_supported: List of supported modes. Please refer to + * can/netlink.h file for additional details. + * @tx_start_of_frame: Magic number at the beginning of each TX URB + * command. + * @rx_start_of_frame: Magic number at the beginning of each RX URB + * command. + * @tx_urb_cmd_max_len: Maximum length of a TX URB command. + * @rx_urb_cmd_max_len: Maximum length of a RX URB command. + * @fifo_mask: Bit mask to quickly convert the tx_tail and tx_head + * field of the struct es58x_priv into echo_skb + * indexes. Properties: @fifo_mask = echos_skb_max - 1 where + * echo_skb_max must be a power of two. Also, echo_skb_max must + * not exceed the maximum size of the device internal TX FIFO + * length. This parameter is used to control the network queue + * wake/stop logic. + * @dql_min_limit: Dynamic Queue Limits (DQL) absolute minimum limit + * of bytes allowed to be queued on this network device transmit + * queue. Used by the Byte Queue Limits (BQL) to determine how + * frequently the xmit_more flag will be set to true in + * es58x_start_xmit(). Set this value higher to optimize for + * throughput but be aware that it might have a negative impact + * on the latency! This value can also be set dynamically. Please + * refer to Documentation/ABI/testing/sysfs-class-net-queues for + * more details. + * @tx_bulk_max: Maximum number of TX messages that can be sent in one + * single URB packet. + * @urb_cmd_header_len: Length of the URB command header. + * @rx_urb_max: Number of RX URB to be allocated during device probe. + * @tx_urb_max: Number of TX URB to be allocated during device probe. + */ +struct es58x_parameters { + const struct can_bittiming_const *bittiming_const; + const struct can_bittiming_const *data_bittiming_const; + const struct can_tdc_const *tdc_const; + u32 bitrate_max; + struct can_clock clock; + u32 ctrlmode_supported; + u16 tx_start_of_frame; + u16 rx_start_of_frame; + u16 tx_urb_cmd_max_len; + u16 rx_urb_cmd_max_len; + u16 fifo_mask; + u16 dql_min_limit; + u8 tx_bulk_max; + u8 urb_cmd_header_len; + u8 rx_urb_max; + u8 tx_urb_max; +}; + +/** + * struct es58x_operators - Function pointers used to encode/decode + * the TX/RX messages. + * @get_msg_len: Get field msg_len of the urb_cmd. The offset of + * msg_len inside urb_cmd depends of the device model. + * @handle_urb_cmd: Decode the URB command received from the device + * and dispatch it to the relevant sub function. + * @fill_urb_header: Fill the header of urb_cmd. + * @tx_can_msg: Encode a TX CAN message and add it to the bulk buffer + * cmd_buf of es58x_dev. + * @enable_channel: Start the CAN channel. + * @disable_channel: Stop the CAN channel. + * @reset_device: Full reset of the device. N.B: this feature is only + * present on the ES581.4. For ES58X FD devices, this field is + * set to NULL. + * @get_timestamp: Request a timestamp from the ES58X device. + */ +struct es58x_operators { + u16 (*get_msg_len)(const union es58x_urb_cmd *urb_cmd); + int (*handle_urb_cmd)(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd); + void (*fill_urb_header)(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 cmd_len); + int (*tx_can_msg)(struct es58x_priv *priv, const struct sk_buff *skb); + int (*enable_channel)(struct es58x_priv *priv); + int (*disable_channel)(struct es58x_priv *priv); + int (*reset_device)(struct es58x_device *es58x_dev); + int (*get_timestamp)(struct es58x_device *es58x_dev); +}; + +/** + * struct es58x_device - All information specific to an ES58X device. + * @dev: Device information. + * @udev: USB device information. + * @netdev: Array of our CAN channels. + * @param: The constant parameters. + * @ops: Operators. + * @rx_pipe: USB reception pipe. + * @tx_pipe: USB transmission pipe. + * @rx_urbs: Anchor for received URBs. + * @tx_urbs_busy: Anchor for TX URBs which were send to the device. + * @tx_urbs_idle: Anchor for TX USB which are idle. This driver + * allocates the memory for the URBs during the probe. When a TX + * URB is needed, it can be taken from this anchor. The network + * queue wake/stop logic should prevent this URB from getting + * empty. Please refer to es58x_get_tx_urb() for more details. + * @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle. + * @opened_channel_cnt: number of channels opened (c.f. es58x_open() + * and es58x_stop()). + * @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns() + * was called. + * @realtime_diff_ns: difference in nanoseconds between the clocks of + * the ES58X device and the kernel. + * @timestamps: a temporary buffer to store the time stamps before + * feeding them to es58x_can_get_echo_skb(). Can only be used + * in RX branches. + * @rx_max_packet_size: Maximum length of bulk-in URB. + * @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev). + * @rx_cmd_buf_len: Length of @rx_cmd_buf. + * @rx_cmd_buf: The device might split the URB commands in an + * arbitrary amount of pieces. This buffer is used to concatenate + * all those pieces. Can only be used in RX branches. This field + * has to be the last one of the structure because it is has a + * flexible size (c.f. es58x_sizeof_es58x_device() function). + */ +struct es58x_device { + struct device *dev; + struct usb_device *udev; + struct net_device *netdev[ES58X_NUM_CAN_CH_MAX]; + + const struct es58x_parameters *param; + const struct es58x_operators *ops; + + int rx_pipe; + int tx_pipe; + + struct usb_anchor rx_urbs; + struct usb_anchor tx_urbs_busy; + struct usb_anchor tx_urbs_idle; + atomic_t tx_urbs_idle_cnt; + atomic_t opened_channel_cnt; + + u64 ktime_req_ns; + s64 realtime_diff_ns; + + u64 timestamps[ES58X_ECHO_BULK_MAX]; + + u16 rx_max_packet_size; + u8 num_can_ch; + + u16 rx_cmd_buf_len; + union es58x_urb_cmd rx_cmd_buf; +}; + +/** + * es58x_sizeof_es58x_device() - Calculate the maximum length of + * struct es58x_device. + * @es58x_dev_param: The constant parameters of the device. + * + * The length of struct es58x_device depends on the length of its last + * field: rx_cmd_buf. This macro allows to optimize the memory + * allocation. + * + * Return: length of struct es58x_device. + */ +static inline size_t es58x_sizeof_es58x_device(const struct es58x_parameters + *es58x_dev_param) +{ + return offsetof(struct es58x_device, rx_cmd_buf) + + es58x_dev_param->rx_urb_cmd_max_len; +} + +static inline int __es58x_check_msg_len(const struct device *dev, + const char *stringified_msg, + size_t actual_len, size_t expected_len) +{ + if (expected_len != actual_len) { + dev_err(dev, + "Length of %s is %zu but received command is %zu.\n", + stringified_msg, expected_len, actual_len); + return -EMSGSIZE; + } + return 0; +} + +/** + * es58x_check_msg_len() - Check the size of a received message. + * @dev: Device, used to print error messages. + * @msg: Received message, must not be a pointer. + * @actual_len: Length of the message as advertised in the command header. + * + * Must be a macro in order to accept the different types of messages + * as an input. Can be use with any of the messages which have a fixed + * length. Check for an exact match of the size. + * + * Return: zero on success, -EMSGSIZE if @actual_len differs from the + * expected length. + */ +#define es58x_check_msg_len(dev, msg, actual_len) \ + __es58x_check_msg_len(dev, __stringify(msg), \ + actual_len, sizeof(msg)) + +static inline int __es58x_check_msg_max_len(const struct device *dev, + const char *stringified_msg, + size_t actual_len, + size_t expected_len) +{ + if (actual_len > expected_len) { + dev_err(dev, + "Maximum length for %s is %zu but received command is %zu.\n", + stringified_msg, expected_len, actual_len); + return -EOVERFLOW; + } + return 0; +} + +/** + * es58x_check_msg_max_len() - Check the maximum size of a received message. + * @dev: Device, used to print error messages. + * @msg: Received message, must not be a pointer. + * @actual_len: Length of the message as advertised in the command header. + * + * Must be a macro in order to accept the different types of messages + * as an input. To be used with the messages of variable sizes. Only + * check that the message is not bigger than the maximum expected + * size. + * + * Return: zero on success, -EOVERFLOW if @actual_len is greater than + * the expected length. + */ +#define es58x_check_msg_max_len(dev, msg, actual_len) \ + __es58x_check_msg_max_len(dev, __stringify(msg), \ + actual_len, sizeof(msg)) + +static inline int __es58x_msg_num_element(const struct device *dev, + const char *stringified_msg, + size_t actual_len, size_t msg_len, + size_t elem_len) +{ + size_t actual_num_elem = actual_len / elem_len; + size_t expected_num_elem = msg_len / elem_len; + + if (actual_num_elem == 0) { + dev_err(dev, + "Minimum length for %s is %zu but received command is %zu.\n", + stringified_msg, elem_len, actual_len); + return -EMSGSIZE; + } else if ((actual_len % elem_len) != 0) { + dev_err(dev, + "Received command length: %zu is not a multiple of %s[0]: %zu\n", + actual_len, stringified_msg, elem_len); + return -EMSGSIZE; + } else if (actual_num_elem > expected_num_elem) { + dev_err(dev, + "Array %s is supposed to have %zu elements each of size %zu...\n", + stringified_msg, expected_num_elem, elem_len); + dev_err(dev, + "... But received command has %zu elements (total length %zu).\n", + actual_num_elem, actual_len); + return -EOVERFLOW; + } + return actual_num_elem; +} + +/** + * es58x_msg_num_element() - Check size and give the number of + * elements in a message of array type. + * @dev: Device, used to print error messages. + * @msg: Received message, must be an array. + * @actual_len: Length of the message as advertised in the command + * header. + * + * Must be a macro in order to accept the different types of messages + * as an input. To be used on message of array type. Array's element + * has to be of fixed size (else use es58x_check_msg_max_len()). Check + * that the total length is an exact multiple of the length of a + * single element. + * + * Return: number of elements in the array on success, -EOVERFLOW if + * @actual_len is greater than the expected length, -EMSGSIZE if + * @actual_len is not a multiple of a single element. + */ +#define es58x_msg_num_element(dev, msg, actual_len) \ +({ \ + size_t __elem_len = sizeof((msg)[0]) + __must_be_array(msg); \ + __es58x_msg_num_element(dev, __stringify(msg), actual_len, \ + sizeof(msg), __elem_len); \ +}) + +/** + * es58x_priv() - Get the priv member and cast it to struct es58x_priv. + * @netdev: CAN network device. + * + * Return: ES58X device. + */ +static inline struct es58x_priv *es58x_priv(struct net_device *netdev) +{ + return (struct es58x_priv *)netdev_priv(netdev); +} + +/** + * ES58X_SIZEOF_URB_CMD() - Calculate the maximum length of an urb + * command for a given message field name. + * @es58x_urb_cmd_type: type (either "struct es581_4_urb_cmd" or + * "struct es58x_fd_urb_cmd"). + * @msg_field: name of the message field. + * + * Must be a macro in order to accept the different command types as + * an input. + * + * Return: length of the urb command. + */ +#define ES58X_SIZEOF_URB_CMD(es58x_urb_cmd_type, msg_field) \ + (offsetof(es58x_urb_cmd_type, raw_msg) \ + + sizeof_field(es58x_urb_cmd_type, msg_field) \ + + sizeof_field(es58x_urb_cmd_type, \ + reserved_for_crc16_do_not_use)) + +/** + * es58x_get_urb_cmd_len() - Calculate the actual length of an urb + * command for a given message length. + * @es58x_dev: ES58X device. + * @msg_len: Length of the message. + * + * Add the header and CRC lengths to the message length. + * + * Return: length of the urb command. + */ +static inline size_t es58x_get_urb_cmd_len(struct es58x_device *es58x_dev, + u16 msg_len) +{ + return es58x_dev->param->urb_cmd_header_len + msg_len + sizeof(u16); +} + +/** + * es58x_get_netdev() - Get the network device. + * @es58x_dev: ES58X device. + * @channel_no: The channel number as advertised in the urb command. + * @channel_idx_offset: Some of the ES58x starts channel numbering + * from 0 (ES58X FD), others from 1 (ES581.4). + * @netdev: CAN network device. + * + * Do a sanity check on the index provided by the device. + * + * Return: zero on success, -ECHRNG if the received channel number is + * out of range and -ENODEV if the network device is not yet + * configured. + */ +static inline int es58x_get_netdev(struct es58x_device *es58x_dev, + int channel_no, int channel_idx_offset, + struct net_device **netdev) +{ + int channel_idx = channel_no - channel_idx_offset; + + *netdev = NULL; + if (channel_idx < 0 || channel_idx >= es58x_dev->num_can_ch) + return -ECHRNG; + + *netdev = es58x_dev->netdev[channel_idx]; + if (!netdev || !netif_device_present(*netdev)) + return -ENODEV; + + return 0; +} + +/** + * es58x_get_raw_can_id() - Get the CAN ID. + * @cf: CAN frame. + * + * Mask the CAN ID in order to only keep the significant bits. + * + * Return: the raw value of the CAN ID. + */ +static inline int es58x_get_raw_can_id(const struct can_frame *cf) +{ + if (cf->can_id & CAN_EFF_FLAG) + return cf->can_id & CAN_EFF_MASK; + else + return cf->can_id & CAN_SFF_MASK; +} + +/** + * es58x_get_flags() - Get the CAN flags. + * @skb: socket buffer of a CAN message. + * + * Return: the CAN flag as an enum es58x_flag. + */ +static inline enum es58x_flag es58x_get_flags(const struct sk_buff *skb) +{ + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + enum es58x_flag es58x_flags = 0; + + if (cf->can_id & CAN_EFF_FLAG) + es58x_flags |= ES58X_FLAG_EFF; + + if (can_is_canfd_skb(skb)) { + es58x_flags |= ES58X_FLAG_FD_DATA; + if (cf->flags & CANFD_BRS) + es58x_flags |= ES58X_FLAG_FD_BRS; + if (cf->flags & CANFD_ESI) + es58x_flags |= ES58X_FLAG_FD_ESI; + } else if (cf->can_id & CAN_RTR_FLAG) + /* Remote frames are only defined in Classical CAN frames */ + es58x_flags |= ES58X_FLAG_RTR; + + return es58x_flags; +} + +int es58x_can_get_echo_skb(struct net_device *netdev, u32 packet_idx, + u64 *tstamps, unsigned int pkts); +int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries, + enum es58x_ret_u32 rx_cmd_ret_u32); +int es58x_rx_can_msg(struct net_device *netdev, u64 timestamp, const u8 *data, + canid_t can_id, enum es58x_flag es58x_flags, u8 dlc); +int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error, + enum es58x_event event, u64 timestamp); +void es58x_rx_timestamp(struct es58x_device *es58x_dev, u64 timestamp); +int es58x_rx_cmd_ret_u8(struct device *dev, enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u8 rx_cmd_ret_u8); +int es58x_rx_cmd_ret_u32(struct net_device *netdev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u32 rx_cmd_ret_u32); +int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id, + const void *msg, u16 cmd_len, int channel_idx); + +extern const struct es58x_parameters es581_4_param; +extern const struct es58x_operators es581_4_ops; + +extern const struct es58x_parameters es58x_fd_param; +extern const struct es58x_operators es58x_fd_ops; + +#endif /* __ES58X_COMMON_H__ */ diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c new file mode 100644 index 000000000000..1a2779d383a4 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_fd.c: Adds support to ETAS ES582.1 and ES584.1 (naming + * convention: we use the term "ES58X FD" when referring to those two + * variants together). + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/kernel.h> +#include <asm/unaligned.h> + +#include "es58x_core.h" +#include "es58x_fd.h" + +/** + * es58x_fd_sizeof_rx_tx_msg() - Calculate the actual length of the + * structure of a rx or tx message. + * @msg: message of variable length, must have a dlc and a len fields. + * + * Even if RTR frames have actually no payload, the ES58X devices + * still expect it. Must be a macro in order to accept several types + * (struct es58x_fd_tx_can_msg and struct es58x_fd_rx_can_msg) as an + * input. + * + * Return: length of the message. + */ +#define es58x_fd_sizeof_rx_tx_msg(msg) \ +({ \ + typeof(msg) __msg = (msg); \ + size_t __msg_len; \ + \ + if (__msg.flags & ES58X_FLAG_FD_DATA) \ + __msg_len = canfd_sanitize_len(__msg.len); \ + else \ + __msg_len = can_cc_dlc2len(__msg.dlc); \ + \ + offsetof(typeof(__msg), data[__msg_len]); \ +}) + +static enum es58x_fd_cmd_type es58x_fd_cmd_type(struct net_device *netdev) +{ + u32 ctrlmode = es58x_priv(netdev)->can.ctrlmode; + + if (ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) + return ES58X_FD_CMD_TYPE_CANFD; + else + return ES58X_FD_CMD_TYPE_CAN; +} + +static u16 es58x_fd_get_msg_len(const union es58x_urb_cmd *urb_cmd) +{ + return get_unaligned_le16(&urb_cmd->es58x_fd_urb_cmd.msg_len); +} + +static int es58x_fd_echo_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_priv *priv = es58x_priv(netdev); + const struct es58x_fd_echo_msg *echo_msg; + struct es58x_device *es58x_dev = priv->es58x_dev; + u64 *tstamps = es58x_dev->timestamps; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int i, num_element; + u32 rcv_packet_idx; + + const u32 mask = GENMASK(31, sizeof(echo_msg->packet_idx) * 8); + + num_element = es58x_msg_num_element(es58x_dev->dev, + es58x_fd_urb_cmd->echo_msg, + msg_len); + if (num_element < 0) + return num_element; + echo_msg = es58x_fd_urb_cmd->echo_msg; + + rcv_packet_idx = (priv->tx_tail & mask) | echo_msg[0].packet_idx; + for (i = 0; i < num_element; i++) { + if ((u8)rcv_packet_idx != echo_msg[i].packet_idx) { + netdev_err(netdev, "Packet idx jumped from %u to %u\n", + (u8)rcv_packet_idx - 1, + echo_msg[i].packet_idx); + return -EBADMSG; + } + + tstamps[i] = get_unaligned_le64(&echo_msg[i].timestamp); + rcv_packet_idx++; + } + + return es58x_can_get_echo_skb(netdev, priv->tx_tail, tstamps, num_element); +} + +static int es58x_fd_rx_can_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + const u8 *rx_can_msg_buf = es58x_fd_urb_cmd->rx_can_msg_buf; + u16 rx_can_msg_buf_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int pkts, ret; + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es58x_fd_urb_cmd->rx_can_msg_buf, + rx_can_msg_buf_len); + if (ret) + return ret; + + for (pkts = 0; rx_can_msg_buf_len > 0; pkts++) { + const struct es58x_fd_rx_can_msg *rx_can_msg = + (const struct es58x_fd_rx_can_msg *)rx_can_msg_buf; + bool is_can_fd = !!(rx_can_msg->flags & ES58X_FLAG_FD_DATA); + /* rx_can_msg_len is the length of the rx_can_msg + * buffer. Not to be confused with rx_can_msg->len + * which is the length of the CAN payload + * rx_can_msg->data. + */ + u16 rx_can_msg_len = es58x_fd_sizeof_rx_tx_msg(*rx_can_msg); + + if (rx_can_msg_len > rx_can_msg_buf_len) { + netdev_err(netdev, + "%s: Expected a rx_can_msg of size %d but only %d bytes are left in rx_can_msg_buf\n", + __func__, + rx_can_msg_len, rx_can_msg_buf_len); + return -EMSGSIZE; + } + if (rx_can_msg->len > CANFD_MAX_DLEN) { + netdev_err(netdev, + "%s: Data length is %d but maximum should be %d\n", + __func__, rx_can_msg->len, CANFD_MAX_DLEN); + return -EMSGSIZE; + } + + if (netif_running(netdev)) { + u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp); + canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id); + u8 dlc; + + if (is_can_fd) + dlc = can_fd_len2dlc(rx_can_msg->len); + else + dlc = rx_can_msg->dlc; + + ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data, + can_id, rx_can_msg->flags, dlc); + if (ret) + break; + } + + rx_can_msg_buf_len -= rx_can_msg_len; + rx_can_msg_buf += rx_can_msg_len; + } + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d rx packets\n", + __func__, netdev->name, pkts); + netdev->stats.rx_dropped += pkts; + } + + return ret; +} + +static int es58x_fd_rx_event_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + const struct es58x_fd_rx_event_msg *rx_event_msg; + int ret; + + ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len); + if (ret) + return ret; + + rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg; + + return es58x_rx_err_msg(netdev, rx_event_msg->error_code, + rx_event_msg->event_code, + get_unaligned_le64(&rx_event_msg->timestamp)); +} + +static int es58x_fd_rx_cmd_ret_u32(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd, + enum es58x_ret_type cmd_ret_type) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + ret = es58x_check_msg_len(es58x_dev->dev, + es58x_fd_urb_cmd->rx_cmd_ret_le32, msg_len); + if (ret) + return ret; + + return es58x_rx_cmd_ret_u32(netdev, cmd_ret_type, + get_unaligned_le32(&es58x_fd_urb_cmd->rx_cmd_ret_le32)); +} + +static int es58x_fd_tx_ack_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + const struct es58x_fd_tx_ack_msg *tx_ack_msg; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + tx_ack_msg = &es58x_fd_urb_cmd->tx_ack_msg; + ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len); + if (ret) + return ret; + + return es58x_tx_ack_msg(netdev, + get_unaligned_le16(&tx_ack_msg->tx_free_entries), + get_unaligned_le32(&tx_ack_msg->rx_cmd_ret_le32)); +} + +static int es58x_fd_can_cmd_id(struct es58x_device *es58x_dev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct net_device *netdev; + int ret; + + ret = es58x_get_netdev(es58x_dev, es58x_fd_urb_cmd->channel_idx, + ES58X_FD_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + switch ((enum es58x_fd_can_cmd_id)es58x_fd_urb_cmd->cmd_id) { + case ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_ENABLE_CHANNEL); + + case ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_DISABLE_CHANNEL); + + case ES58X_FD_CAN_CMD_ID_TX_MSG: + return es58x_fd_tx_ack_msg(netdev, es58x_fd_urb_cmd); + + case ES58X_FD_CAN_CMD_ID_ECHO_MSG: + return es58x_fd_echo_msg(netdev, es58x_fd_urb_cmd); + + case ES58X_FD_CAN_CMD_ID_RX_MSG: + return es58x_fd_rx_can_msg(netdev, es58x_fd_urb_cmd); + + case ES58X_FD_CAN_CMD_ID_RESET_RX: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_RESET_RX); + + case ES58X_FD_CAN_CMD_ID_RESET_TX: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_RESET_TX); + + case ES58X_FD_CAN_CMD_ID_ERROR_OR_EVENT_MSG: + return es58x_fd_rx_event_msg(netdev, es58x_fd_urb_cmd); + + default: + return -EBADRQC; + } +} + +static int es58x_fd_device_cmd_id(struct es58x_device *es58x_dev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + switch ((enum es58x_fd_dev_cmd_id)es58x_fd_urb_cmd->cmd_id) { + case ES58X_FD_DEV_CMD_ID_TIMESTAMP: + ret = es58x_check_msg_len(es58x_dev->dev, + es58x_fd_urb_cmd->timestamp, msg_len); + if (ret) + return ret; + es58x_rx_timestamp(es58x_dev, + get_unaligned_le64(&es58x_fd_urb_cmd->timestamp)); + return 0; + + default: + return -EBADRQC; + } +} + +static int es58x_fd_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd; + int ret; + + es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + + switch ((enum es58x_fd_cmd_type)es58x_fd_urb_cmd->cmd_type) { + case ES58X_FD_CMD_TYPE_CAN: + case ES58X_FD_CMD_TYPE_CANFD: + ret = es58x_fd_can_cmd_id(es58x_dev, es58x_fd_urb_cmd); + break; + + case ES58X_FD_CMD_TYPE_DEVICE: + ret = es58x_fd_device_cmd_id(es58x_dev, es58x_fd_urb_cmd); + break; + + default: + ret = -EBADRQC; + break; + } + + if (ret == -EBADRQC) + dev_err(es58x_dev->dev, + "%s: Unknown command type (0x%02X) and command ID (0x%02X) combination\n", + __func__, es58x_fd_urb_cmd->cmd_type, + es58x_fd_urb_cmd->cmd_id); + + return ret; +} + +static void es58x_fd_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 msg_len) +{ + struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + + es58x_fd_urb_cmd->SOF = cpu_to_le16(es58x_fd_param.tx_start_of_frame); + es58x_fd_urb_cmd->cmd_type = cmd_type; + es58x_fd_urb_cmd->cmd_id = cmd_id; + es58x_fd_urb_cmd->channel_idx = channel_idx; + es58x_fd_urb_cmd->msg_len = cpu_to_le16(msg_len); +} + +static int es58x_fd_tx_can_msg(struct es58x_priv *priv, + const struct sk_buff *skb) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer; + struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + struct can_frame *cf = (struct can_frame *)skb->data; + struct es58x_fd_tx_can_msg *tx_can_msg; + bool is_fd = can_is_canfd_skb(skb); + u16 msg_len; + int ret; + + if (priv->tx_can_msg_cnt == 0) { + msg_len = 0; + es58x_fd_fill_urb_header(urb_cmd, + is_fd ? ES58X_FD_CMD_TYPE_CANFD + : ES58X_FD_CMD_TYPE_CAN, + ES58X_FD_CAN_CMD_ID_TX_MSG_NO_ACK, + priv->channel_idx, msg_len); + } else { + msg_len = es58x_fd_get_msg_len(urb_cmd); + } + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es58x_fd_urb_cmd->tx_can_msg_buf, + msg_len + sizeof(*tx_can_msg)); + if (ret) + return ret; + + /* Fill message contents. */ + tx_can_msg = (struct es58x_fd_tx_can_msg *) + &es58x_fd_urb_cmd->tx_can_msg_buf[msg_len]; + tx_can_msg->packet_idx = (u8)priv->tx_head; + put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); + tx_can_msg->flags = (u8)es58x_get_flags(skb); + if (is_fd) + tx_can_msg->len = cf->len; + else + tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); + memcpy(tx_can_msg->data, cf->data, cf->len); + + /* Calculate new sizes */ + msg_len += es58x_fd_sizeof_rx_tx_msg(*tx_can_msg); + priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev, + msg_len); + put_unaligned_le16(msg_len, &es58x_fd_urb_cmd->msg_len); + + return 0; +} + +static void es58x_fd_convert_bittiming(struct es58x_fd_bittiming *es58x_fd_bt, + struct can_bittiming *bt) +{ + /* The actual value set in the hardware registers is one less + * than the functional value. + */ + const int offset = 1; + + es58x_fd_bt->bitrate = cpu_to_le32(bt->bitrate); + es58x_fd_bt->tseg1 = + cpu_to_le16(bt->prop_seg + bt->phase_seg1 - offset); + es58x_fd_bt->tseg2 = cpu_to_le16(bt->phase_seg2 - offset); + es58x_fd_bt->brp = cpu_to_le16(bt->brp - offset); + es58x_fd_bt->sjw = cpu_to_le16(bt->sjw - offset); +} + +static int es58x_fd_enable_channel(struct es58x_priv *priv) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + struct net_device *netdev = es58x_dev->netdev[priv->channel_idx]; + struct es58x_fd_tx_conf_msg tx_conf_msg = { 0 }; + u32 ctrlmode; + size_t conf_len = 0; + + es58x_fd_convert_bittiming(&tx_conf_msg.nominal_bittiming, + &priv->can.bittiming); + ctrlmode = priv->can.ctrlmode; + + if (ctrlmode & CAN_CTRLMODE_3_SAMPLES) + tx_conf_msg.samples_per_bit = ES58X_SAMPLES_PER_BIT_THREE; + else + tx_conf_msg.samples_per_bit = ES58X_SAMPLES_PER_BIT_ONE; + tx_conf_msg.sync_edge = ES58X_SYNC_EDGE_SINGLE; + tx_conf_msg.physical_layer = ES58X_PHYSICAL_LAYER_HIGH_SPEED; + tx_conf_msg.echo_mode = ES58X_ECHO_ON; + if (ctrlmode & CAN_CTRLMODE_LISTENONLY) + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_PASSIVE; + else + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_ACTIVE; + + if (ctrlmode & CAN_CTRLMODE_FD_NON_ISO) { + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_FD_NON_ISO; + tx_conf_msg.canfd_enabled = 1; + } else if (ctrlmode & CAN_CTRLMODE_FD) { + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_FD; + tx_conf_msg.canfd_enabled = 1; + } + + if (tx_conf_msg.canfd_enabled) { + es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming, + &priv->can.data_bittiming); + + if (priv->can.tdc.tdco) { + tx_conf_msg.tdc_enabled = 1; + tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco); + tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf); + } + + conf_len = ES58X_FD_CANFD_CONF_LEN; + } else { + conf_len = ES58X_FD_CAN_CONF_LEN; + } + + return es58x_send_msg(es58x_dev, es58x_fd_cmd_type(netdev), + ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL, + &tx_conf_msg, conf_len, priv->channel_idx); +} + +static int es58x_fd_disable_channel(struct es58x_priv *priv) +{ + /* The type (ES58X_FD_CMD_TYPE_CAN or ES58X_FD_CMD_TYPE_CANFD) does + * not matter here. + */ + return es58x_send_msg(priv->es58x_dev, ES58X_FD_CMD_TYPE_CAN, + ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL, + ES58X_EMPTY_MSG, 0, priv->channel_idx); +} + +static int es58x_fd_get_timestamp(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, ES58X_FD_CMD_TYPE_DEVICE, + ES58X_FD_DEV_CMD_ID_TIMESTAMP, ES58X_EMPTY_MSG, + 0, ES58X_CHANNEL_IDX_NA); +} + +/* Nominal bittiming constants for ES582.1 and ES584.1 as specified in + * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family" + * section 49.6.8 "MCAN Nominal Bit Timing and Prescaler Register" + * from Microchip. + * + * The values from the specification are the hardware register + * values. To convert them to the functional values, all ranges were + * incremented by 1 (e.g. range [0..n-1] changed to [1..n]). + */ +static const struct can_bittiming_const es58x_fd_nom_bittiming_const = { + .name = "ES582.1/ES584.1", + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 2, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1 +}; + +/* Data bittiming constants for ES582.1 and ES584.1 as specified in + * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family" + * section 49.6.4 "MCAN Data Bit Timing and Prescaler Register" from + * Microchip. + */ +static const struct can_bittiming_const es58x_fd_data_bittiming_const = { + .name = "ES582.1/ES584.1", + .tseg1_min = 2, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1 +}; + +/* Transmission Delay Compensation constants for ES582.1 and ES584.1 + * as specified in the microcontroller datasheet: "SAM + * E701/S70/V70/V71 Family" section 49.6.15 "MCAN Transmitter Delay + * Compensation Register" from Microchip. + */ +static const struct can_tdc_const es58x_tdc_const = { + .tdcv_max = 0, /* Manual mode not supported. */ + .tdco_max = 127, + .tdcf_max = 127 +}; + +const struct es58x_parameters es58x_fd_param = { + .bittiming_const = &es58x_fd_nom_bittiming_const, + .data_bittiming_const = &es58x_fd_data_bittiming_const, + .tdc_const = &es58x_tdc_const, + /* The devices use NXP TJA1044G transievers which guarantee + * the timing for data rates up to 5 Mbps. Bitrates up to 8 + * Mbps work in an optimal environment but are not recommended + * for production environment. + */ + .bitrate_max = 8 * CAN_MBPS, + .clock = {.freq = 80 * CAN_MHZ}, + .ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | + CAN_CTRLMODE_CC_LEN8_DLC, + .tx_start_of_frame = 0xCEFA, /* FACE in little endian */ + .rx_start_of_frame = 0xFECA, /* CAFE in little endian */ + .tx_urb_cmd_max_len = ES58X_FD_TX_URB_CMD_MAX_LEN, + .rx_urb_cmd_max_len = ES58X_FD_RX_URB_CMD_MAX_LEN, + /* Size of internal device TX queue is 500. + * + * However, when reaching value around 278, the device's busy + * LED turns on and thus maximum value of 500 is never reached + * in practice. Also, when this value is too high, some error + * on the echo_msg were witnessed when the device is + * recovering from bus off. + * + * For above reasons, a value that would prevent the device + * from becoming busy was chosen. In practice, BQL would + * prevent the value from even getting closer to below + * maximum, so no impact on performance was measured. + */ + .fifo_mask = 255, /* echo_skb_max = 256 */ + .dql_min_limit = CAN_FRAME_LEN_MAX * 15, /* Empirical value. */ + .tx_bulk_max = ES58X_FD_TX_BULK_MAX, + .urb_cmd_header_len = ES58X_FD_URB_CMD_HEADER_LEN, + .rx_urb_max = ES58X_RX_URBS_MAX, + .tx_urb_max = ES58X_TX_URBS_MAX +}; + +const struct es58x_operators es58x_fd_ops = { + .get_msg_len = es58x_fd_get_msg_len, + .handle_urb_cmd = es58x_fd_handle_urb_cmd, + .fill_urb_header = es58x_fd_fill_urb_header, + .tx_can_msg = es58x_fd_tx_can_msg, + .enable_channel = es58x_fd_enable_channel, + .disable_channel = es58x_fd_disable_channel, + .reset_device = NULL, /* Not implemented in the device firmware. */ + .get_timestamp = es58x_fd_get_timestamp +}; diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.h b/drivers/net/can/usb/etas_es58x/es58x_fd.h new file mode 100644 index 000000000000..ee18a87e40c0 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_fd.h: Definitions and declarations specific to ETAS + * ES582.1 and ES584.1 (naming convention: we use the term "ES58X FD" + * when referring to those two variants together). + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __ES58X_FD_H__ +#define __ES58X_FD_H__ + +#include <linux/types.h> + +#define ES582_1_NUM_CAN_CH 2 +#define ES584_1_NUM_CAN_CH 1 +#define ES58X_FD_NUM_CAN_CH 2 +#define ES58X_FD_CHANNEL_IDX_OFFSET 0 + +#define ES58X_FD_TX_BULK_MAX 100 +#define ES58X_FD_RX_BULK_MAX 100 +#define ES58X_FD_ECHO_BULK_MAX 100 + +enum es58x_fd_cmd_type { + ES58X_FD_CMD_TYPE_CAN = 0x03, + ES58X_FD_CMD_TYPE_CANFD = 0x04, + ES58X_FD_CMD_TYPE_DEVICE = 0xFF +}; + +/* Command IDs for ES58X_FD_CMD_TYPE_{CAN,CANFD}. */ +enum es58x_fd_can_cmd_id { + ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL = 0x01, + ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL = 0x02, + ES58X_FD_CAN_CMD_ID_TX_MSG = 0x05, + ES58X_FD_CAN_CMD_ID_ECHO_MSG = 0x07, + ES58X_FD_CAN_CMD_ID_RX_MSG = 0x10, + ES58X_FD_CAN_CMD_ID_ERROR_OR_EVENT_MSG = 0x11, + ES58X_FD_CAN_CMD_ID_RESET_RX = 0x20, + ES58X_FD_CAN_CMD_ID_RESET_TX = 0x21, + ES58X_FD_CAN_CMD_ID_TX_MSG_NO_ACK = 0x55 +}; + +/* Command IDs for ES58X_FD_CMD_TYPE_DEVICE. */ +enum es58x_fd_dev_cmd_id { + ES58X_FD_DEV_CMD_ID_GETTIMETICKS = 0x01, + ES58X_FD_DEV_CMD_ID_TIMESTAMP = 0x02 +}; + +/** + * enum es58x_fd_ctrlmode - Controller mode. + * @ES58X_FD_CTRLMODE_ACTIVE: send and receive messages. + * @ES58X_FD_CTRLMODE_PASSIVE: only receive messages (monitor). Do not + * send anything, not even the acknowledgment bit. + * @ES58X_FD_CTRLMODE_FD: CAN FD according to ISO11898-1. + * @ES58X_FD_CTRLMODE_FD_NON_ISO: follow Bosch CAN FD Specification + * V1.0 + * @ES58X_FD_CTRLMODE_DISABLE_PROTOCOL_EXCEPTION_HANDLING: How to + * behave when CAN FD reserved bit is monitored as + * dominant. (c.f. ISO 11898-1:2015, section 10.4.2.4 "Control + * field", paragraph "r0 bit"). 0 (not disable = enable): send + * error frame. 1 (disable): goes into bus integration mode + * (c.f. below). + * @ES58X_FD_CTRLMODE_EDGE_FILTER_DURING_BUS_INTEGRATION: 0: Edge + * filtering is disabled. 1: Edge filtering is enabled. Two + * consecutive dominant bits required to detect an edge for hard + * synchronization. + */ +enum es58x_fd_ctrlmode { + ES58X_FD_CTRLMODE_ACTIVE = 0, + ES58X_FD_CTRLMODE_PASSIVE = BIT(0), + ES58X_FD_CTRLMODE_FD = BIT(4), + ES58X_FD_CTRLMODE_FD_NON_ISO = BIT(5), + ES58X_FD_CTRLMODE_DISABLE_PROTOCOL_EXCEPTION_HANDLING = BIT(6), + ES58X_FD_CTRLMODE_EDGE_FILTER_DURING_BUS_INTEGRATION = BIT(7) +}; + +struct es58x_fd_bittiming { + __le32 bitrate; + __le16 tseg1; /* range: [tseg1_min-1..tseg1_max-1] */ + __le16 tseg2; /* range: [tseg2_min-1..tseg2_max-1] */ + __le16 brp; /* range: [brp_min-1..brp_max-1] */ + __le16 sjw; /* range: [0..sjw_max-1] */ +} __packed; + +/** + * struct es58x_fd_tx_conf_msg - Channel configuration. + * @nominal_bittiming: Nominal bittiming. + * @samples_per_bit: type enum es58x_samples_per_bit. + * @sync_edge: type enum es58x_sync_edge. + * @physical_layer: type enum es58x_physical_layer. + * @echo_mode: type enum es58x_echo_mode. + * @ctrlmode: type enum es58x_fd_ctrlmode. + * @canfd_enabled: boolean (0: Classical CAN, 1: CAN and/or CANFD). + * @data_bittiming: Bittiming for flexible data-rate transmission. + * @tdc_enabled: Transmitter Delay Compensation switch (0: disabled, + * 1: enabled). On very high bitrates, the delay between when the + * bit is sent and received on the CANTX and CANRX pins of the + * transceiver start to be significant enough for errors to occur + * and thus need to be compensated. + * @tdco: Transmitter Delay Compensation Offset. Offset value, in time + * quanta, defining the delay between the start of the bit + * reception on the CANRX pin of the transceiver and the SSP + * (Secondary Sample Point). Valid values: 0 to 127. + * @tdcf: Transmitter Delay Compensation Filter window. Defines the + * minimum value for the SSP position, in time quanta. The + * feature is enabled when TDCF is configured to a value greater + * than TDCO. Valid values: 0 to 127. + * + * Please refer to the microcontroller datasheet: "SAM + * E701/S70/V70/V71 Family" section 49 "Controller Area Network + * (MCAN)" for additional information. + */ +struct es58x_fd_tx_conf_msg { + struct es58x_fd_bittiming nominal_bittiming; + u8 samples_per_bit; + u8 sync_edge; + u8 physical_layer; + u8 echo_mode; + u8 ctrlmode; + u8 canfd_enabled; + struct es58x_fd_bittiming data_bittiming; + u8 tdc_enabled; + __le16 tdco; + __le16 tdcf; +} __packed; + +#define ES58X_FD_CAN_CONF_LEN \ + (offsetof(struct es58x_fd_tx_conf_msg, canfd_enabled)) +#define ES58X_FD_CANFD_CONF_LEN (sizeof(struct es58x_fd_tx_conf_msg)) + +struct es58x_fd_tx_can_msg { + u8 packet_idx; + __le32 can_id; + u8 flags; + union { + u8 dlc; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CAN */ + u8 len; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CANFD */ + } __packed; + u8 data[CANFD_MAX_DLEN]; +} __packed; + +#define ES58X_FD_CAN_TX_LEN \ + (offsetof(struct es58x_fd_tx_can_msg, data[CAN_MAX_DLEN])) +#define ES58X_FD_CANFD_TX_LEN (sizeof(struct es58x_fd_tx_can_msg)) + +struct es58x_fd_rx_can_msg { + __le64 timestamp; + __le32 can_id; + u8 flags; + union { + u8 dlc; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CAN */ + u8 len; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CANFD */ + } __packed; + u8 data[CANFD_MAX_DLEN]; +} __packed; + +#define ES58X_FD_CAN_RX_LEN \ + (offsetof(struct es58x_fd_rx_can_msg, data[CAN_MAX_DLEN])) +#define ES58X_FD_CANFD_RX_LEN (sizeof(struct es58x_fd_rx_can_msg)) + +struct es58x_fd_echo_msg { + __le64 timestamp; + u8 packet_idx; +} __packed; + +struct es58x_fd_rx_event_msg { + __le64 timestamp; + __le32 can_id; + u8 flags; /* type enum es58x_flag */ + u8 error_type; /* 0: event, 1: error */ + u8 error_code; + u8 event_code; +} __packed; + +struct es58x_fd_tx_ack_msg { + __le32 rx_cmd_ret_le32; /* type enum es58x_cmd_ret_code_u32 */ + __le16 tx_free_entries; /* Number of remaining free entries in the device TX queue */ +} __packed; + +/** + * struct es58x_fd_urb_cmd - Commands received from or sent to the + * ES58X FD device. + * @SOF: Start of Frame. + * @cmd_type: Command Type (type: enum es58x_fd_cmd_type). The CRC + * calculation starts at this position. + * @cmd_id: Command ID (type: enum es58x_fd_cmd_id). + * @channel_idx: Channel index starting at 0. + * @msg_len: Length of the message, excluding CRC (i.e. length of the + * union). + * @tx_conf_msg: Channel configuration. + * @tx_can_msg_buf: Concatenation of Tx messages. Type is "u8[]" + * instead of "struct es58x_fd_tx_msg[]" because the structure + * has a flexible size. + * @rx_can_msg_buf: Concatenation Rx messages. Type is "u8[]" instead + * of "struct es58x_fd_rx_msg[]" because the structure has a + * flexible size. + * @echo_msg: Array of echo messages (e.g. Tx messages being looped + * back). + * @rx_event_msg: Error or event message. + * @tx_ack_msg: Tx acknowledgment message. + * @timestamp: Timestamp reply. + * @rx_cmd_ret_le32: Rx 32 bits return code (type: enum + * es58x_cmd_ret_code_u32). + * @raw_msg: Message raw payload. + * @reserved_for_crc16_do_not_use: The structure ends with a + * CRC16. Because the structures in above union are of variable + * lengths, we can not predict the offset of the CRC in + * advance. Use functions es58x_get_crc() and es58x_set_crc() to + * manipulate it. + */ +struct es58x_fd_urb_cmd { + __le16 SOF; + u8 cmd_type; + u8 cmd_id; + u8 channel_idx; + __le16 msg_len; + + union { + struct es58x_fd_tx_conf_msg tx_conf_msg; + u8 tx_can_msg_buf[ES58X_FD_TX_BULK_MAX * ES58X_FD_CANFD_TX_LEN]; + u8 rx_can_msg_buf[ES58X_FD_RX_BULK_MAX * ES58X_FD_CANFD_RX_LEN]; + struct es58x_fd_echo_msg echo_msg[ES58X_FD_ECHO_BULK_MAX]; + struct es58x_fd_rx_event_msg rx_event_msg; + struct es58x_fd_tx_ack_msg tx_ack_msg; + __le64 timestamp; + __le32 rx_cmd_ret_le32; + u8 raw_msg[0]; + } __packed; + + __le16 reserved_for_crc16_do_not_use; +} __packed; + +#define ES58X_FD_URB_CMD_HEADER_LEN (offsetof(struct es58x_fd_urb_cmd, raw_msg)) +#define ES58X_FD_TX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es58x_fd_urb_cmd, tx_can_msg_buf) +#define ES58X_FD_RX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es58x_fd_urb_cmd, rx_can_msg_buf) + +#endif /* __ES58X_FD_H__ */ diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index ba509aed7b4c..1d6f77252f01 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -365,16 +365,11 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number) int err; err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_SN, PCAN_USB_GET, args); - if (err) { - netdev_err(dev->netdev, "getting serial failure: %d\n", err); - } else if (serial_number) { - __le32 tmp32; - - memcpy(&tmp32, args, 4); - *serial_number = le32_to_cpu(tmp32); - } + if (err) + return err; + *serial_number = le32_to_cpup((__le32 *)args); - return err; + return 0; } /* @@ -388,8 +383,8 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id) err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args); if (err) netdev_err(dev->netdev, "getting device id failure: %d\n", err); - else if (device_id) - *device_id = args[0]; + + *device_id = args[0]; return err; } @@ -399,14 +394,10 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id) */ static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc) { - __le16 tmp16; - - if ((mc->ptr+2) > mc->end) + if ((mc->ptr + 2) > mc->end) return -EINVAL; - memcpy(&tmp16, mc->ptr, 2); - - mc->ts16 = le16_to_cpu(tmp16); + mc->ts16 = get_unaligned_le16(mc->ptr); if (mc->rec_idx > 0) peak_usb_update_ts_now(&mc->pdev->time_ref, mc->ts16); @@ -423,16 +414,13 @@ static int pcan_usb_decode_ts(struct pcan_usb_msg_context *mc, u8 first_packet) { /* only 1st packet supplies a word timestamp */ if (first_packet) { - __le16 tmp16; - if ((mc->ptr + 2) > mc->end) return -EINVAL; - memcpy(&tmp16, mc->ptr, 2); - mc->ptr += 2; - - mc->ts16 = le16_to_cpu(tmp16); + mc->ts16 = get_unaligned_le16(mc->ptr); mc->prev_ts8 = mc->ts16 & 0x00ff; + + mc->ptr += 2; } else { u8 ts8; @@ -722,25 +710,17 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) return -ENOMEM; if (status_len & PCAN_USB_STATUSLEN_EXT_ID) { - __le32 tmp32; - if ((mc->ptr + 4) > mc->end) goto decode_failed; - memcpy(&tmp32, mc->ptr, 4); + cf->can_id = get_unaligned_le32(mc->ptr) >> 3 | CAN_EFF_FLAG; mc->ptr += 4; - - cf->can_id = (le32_to_cpu(tmp32) >> 3) | CAN_EFF_FLAG; } else { - __le16 tmp16; - if ((mc->ptr + 2) > mc->end) goto decode_failed; - memcpy(&tmp16, mc->ptr, 2); + cf->can_id = get_unaligned_le16(mc->ptr) >> 5; mc->ptr += 2; - - cf->can_id = le16_to_cpu(tmp16) >> 5; } can_frame_set_cc_len(cf, rec_len, mc->pdev->dev.can.ctrlmode); @@ -854,15 +834,15 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, /* can id */ if (cf->can_id & CAN_EFF_FLAG) { - __le32 tmp32 = cpu_to_le32((cf->can_id & CAN_ERR_MASK) << 3); - *pc |= PCAN_USB_STATUSLEN_EXT_ID; - memcpy(++pc, &tmp32, 4); + pc++; + + put_unaligned_le32((cf->can_id & CAN_ERR_MASK) << 3, pc); pc += 4; } else { - __le16 tmp16 = cpu_to_le16((cf->can_id & CAN_ERR_MASK) << 5); + pc++; - memcpy(++pc, &tmp16, 2); + put_unaligned_le16((cf->can_id & CAN_ERR_MASK) << 5, pc); pc += 2; } @@ -1039,7 +1019,7 @@ const struct peak_usb_adapter pcan_usb = { CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { - .freq = PCAN_USB_CRYSTAL_HZ / 2 , + .freq = PCAN_USB_CRYSTAL_HZ / 2, }, .bittiming_const = &pcan_usb_const, @@ -1050,7 +1030,6 @@ const struct peak_usb_adapter pcan_usb = { /* timestamps usage */ .ts_used_bits = 16, - .ts_period = 24575, /* calibration period in ts. */ .us_per_ts_scale = PCAN_USB_TS_US_PER_TICK, /* us=(ts*scale) */ .us_per_ts_shift = PCAN_USB_TS_DIV_SHIFTER, /* >> shift */ diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index e69b005be068..e8f43ed90b72 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -27,28 +27,32 @@ MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters"); MODULE_LICENSE("GPL v2"); /* Table of devices that work with this driver */ -static struct usb_device_id peak_usb_table[] = { - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)}, - {} /* Terminating entry */ +static const struct usb_device_id peak_usb_table[] = { + { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_pro, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_fd, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_pro_fd, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_chip, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_x6, + }, { + /* Terminating entry */ + } }; MODULE_DEVICE_TABLE(usb, peak_usb_table); -/* List of supported PCAN-USB adapters (NULL terminated list) */ -static const struct peak_usb_adapter *const peak_usb_adapters_list[] = { - &pcan_usb, - &pcan_usb_pro, - &pcan_usb_fd, - &pcan_usb_pro_fd, - &pcan_usb_chip, - &pcan_usb_x6, -}; - /* * dump memory */ @@ -624,6 +628,7 @@ static int peak_usb_ndo_stop(struct net_device *netdev) /* can set bus off now */ if (dev->adapter->dev_set_bus) { int err = dev->adapter->dev_set_bus(dev, 0); + if (err) return err; } @@ -927,24 +932,11 @@ static void peak_usb_disconnect(struct usb_interface *intf) static int peak_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { - struct usb_device *usb_dev = interface_to_usbdev(intf); - const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct); - const struct peak_usb_adapter *peak_usb_adapter = NULL; + const struct peak_usb_adapter *peak_usb_adapter; int i, err = -ENOMEM; /* get corresponding PCAN-USB adapter */ - for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++) - if (peak_usb_adapters_list[i]->device_id == usb_id_product) { - peak_usb_adapter = peak_usb_adapters_list[i]; - break; - } - - if (!peak_usb_adapter) { - /* should never come except device_id bad usage in this file */ - pr_err("%s: didn't find device id. 0x%x in devices list\n", - PCAN_USB_DRIVER_NAME, usb_id_product); - return -ENODEV; - } + peak_usb_adapter = (const struct peak_usb_adapter *)id->driver_info; /* got corresponding adapter: check if it handles current interface */ if (peak_usb_adapter->intf_probe) { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h index e15b4c78f309..b00a4811bf61 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h @@ -31,7 +31,7 @@ /* usb adapters maximum channels per usb interface */ #define PCAN_USB_MAX_CHANNEL 2 -/* maximum length of the usb commands sent to/received from the devices */ +/* maximum length of the usb commands sent to/received from the devices */ #define PCAN_USB_MAX_CMD_LEN 32 struct peak_usb_device; @@ -73,7 +73,6 @@ struct peak_usb_adapter { u8 ep_msg_in; u8 ep_msg_out[PCAN_USB_MAX_CHANNEL]; u8 ts_used_bits; - u32 ts_period; u8 us_per_ts_shift; u32 us_per_ts_scale; @@ -114,8 +113,6 @@ struct peak_usb_device { unsigned int ctrl_idx; u32 state; - struct sk_buff *echo_skb[PCAN_USB_MAX_TX_URBS]; - struct usb_device *udev; struct net_device *netdev; @@ -132,8 +129,6 @@ struct peak_usb_device { u8 ep_msg_in; u8 ep_msg_out; - u16 bus_load; - struct peak_usb_device *prev_siblings; struct peak_usb_device *next_siblings; }; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 6f62b6f51051..b11eabad575b 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -1081,7 +1081,6 @@ const struct peak_usb_adapter pcan_usb_fd = { /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, @@ -1156,7 +1155,6 @@ const struct peak_usb_adapter pcan_usb_chip = { /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, @@ -1231,7 +1229,6 @@ const struct peak_usb_adapter pcan_usb_pro_fd = { /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, @@ -1306,7 +1303,6 @@ const struct peak_usb_adapter pcan_usb_x6 = { /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 2d1b645af76c..858ab22708fc 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -290,7 +290,7 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev, pr->data_type); /* check if channel in response corresponds too */ - else if ((req_channel != 0xff) && \ + else if ((req_channel != 0xff) && (pr->bus_act.channel != req_channel)) netdev_err(dev->netdev, "got rsp %xh but on chan%u: ignored\n", @@ -439,8 +439,7 @@ static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev, return err; pdn = (struct pcan_usb_pro_devid *)pc; - if (device_id) - *device_id = le32_to_cpu(pdn->serial_num); + *device_id = le32_to_cpu(pdn->serial_num); return err; } @@ -1058,7 +1057,6 @@ const struct peak_usb_adapter pcan_usb_pro = { /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h index 6f4504300e23..5d4cf14eb9d9 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h @@ -34,11 +34,11 @@ /* PCAN_USBPRO_INFO_BL vendor request record type */ struct __packed pcan_usb_pro_blinfo { __le32 ctrl_type; - u8 version[4]; - u8 day; - u8 month; - u8 year; - u8 dummy; + u8 version[4]; + u8 day; + u8 month; + u8 year; + u8 dummy; __le32 serial_num_hi; __le32 serial_num_lo; __le32 hw_type; @@ -48,11 +48,11 @@ struct __packed pcan_usb_pro_blinfo { /* PCAN_USBPRO_INFO_FW vendor request record type */ struct __packed pcan_usb_pro_fwinfo { __le32 ctrl_type; - u8 version[4]; - u8 day; - u8 month; - u8 year; - u8 dummy; + u8 version[4]; + u8 day; + u8 month; + u8 year; + u8 dummy; __le32 fw_type; }; @@ -78,39 +78,39 @@ struct __packed pcan_usb_pro_fwinfo { /* record structures */ struct __packed pcan_usb_pro_btr { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 dummy; __le32 CCBT; }; struct __packed pcan_usb_pro_busact { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 onoff; }; struct __packed pcan_usb_pro_silent { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 onoff; }; struct __packed pcan_usb_pro_filter { - u8 data_type; - u8 dummy; + u8 data_type; + u8 dummy; __le16 filter_mode; }; struct __packed pcan_usb_pro_setts { - u8 data_type; - u8 dummy; + u8 data_type; + u8 dummy; __le16 mode; }; struct __packed pcan_usb_pro_devid { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 dummy; __le32 serial_num; }; @@ -122,21 +122,21 @@ struct __packed pcan_usb_pro_devid { #define PCAN_USBPRO_LED_OFF 0x04 struct __packed pcan_usb_pro_setled { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 mode; __le32 timeout; }; struct __packed pcan_usb_pro_rxmsg { - u8 data_type; - u8 client; - u8 flags; - u8 len; + u8 data_type; + u8 client; + u8 flags; + u8 len; __le32 ts32; __le32 id; - u8 data[8]; + u8 data[8]; }; #define PCAN_USBPRO_STATUS_ERROR 0x0001 @@ -145,26 +145,26 @@ struct __packed pcan_usb_pro_rxmsg { #define PCAN_USBPRO_STATUS_QOVERRUN 0x0008 struct __packed pcan_usb_pro_rxstatus { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 status; __le32 ts32; __le32 err_frm; }; struct __packed pcan_usb_pro_rxts { - u8 data_type; - u8 dummy[3]; + u8 data_type; + u8 dummy[3]; __le32 ts64[2]; }; struct __packed pcan_usb_pro_txmsg { - u8 data_type; - u8 client; - u8 flags; - u8 len; + u8 data_type; + u8 client; + u8 flags; + u8 len; __le32 id; - u8 data[8]; + u8 data[8]; }; union pcan_usb_pro_rec { diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 2bd1bab71497..96f7c9eede35 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2570,6 +2570,17 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, mcr |= PMCR_RX_FC_EN; } + if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, 0) >= 0) { + switch (speed) { + case SPEED_1000: + mcr |= PMCR_FORCE_EEE1G; + break; + case SPEED_100: + mcr |= PMCR_FORCE_EEE100; + break; + } + } + mt7530_set(priv, MT7530_PMCR_P(port), mcr); } @@ -2800,6 +2811,36 @@ mt753x_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return priv->info->phy_write(ds, port, regnum, val); } +static int mt753x_get_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct mt7530_priv *priv = ds->priv; + u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port)); + + e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN); + e->tx_lpi_timer = GET_LPI_THRESH(eeecr); + + return 0; +} + +static int mt753x_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct mt7530_priv *priv = ds->priv; + u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN; + + if (e->tx_lpi_timer > 0xFFF) + return -EINVAL; + + set = SET_LPI_THRESH(e->tx_lpi_timer); + if (!e->tx_lpi_enabled) + /* Force LPI Mode without a delay */ + set |= LPI_MODE_EN; + mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set); + + return 0; +} + static const struct dsa_switch_ops mt7530_switch_ops = { .get_tag_protocol = mtk_get_tag_protocol, .setup = mt753x_setup, @@ -2835,6 +2876,8 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .phylink_mac_an_restart = mt753x_phylink_mac_an_restart, .phylink_mac_link_down = mt753x_phylink_mac_link_down, .phylink_mac_link_up = mt753x_phylink_mac_link_up, + .get_mac_eee = mt753x_get_mac_eee, + .set_mac_eee = mt753x_set_mac_eee, }; static const struct mt753x_info mt753x_table[] = { diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index ec36ea5dfd57..0204da486f3a 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -257,6 +257,8 @@ enum mt7530_vlan_port_attr { #define PMCR_RX_EN BIT(13) #define PMCR_BACKOFF_EN BIT(9) #define PMCR_BACKPR_EN BIT(8) +#define PMCR_FORCE_EEE1G BIT(7) +#define PMCR_FORCE_EEE100 BIT(6) #define PMCR_TX_FC_EN BIT(5) #define PMCR_RX_FC_EN BIT(4) #define PMCR_FORCE_SPEED_1000 BIT(3) @@ -281,7 +283,8 @@ enum mt7530_vlan_port_attr { #define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \ PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \ - PMCR_FORCE_FDX | PMCR_FORCE_LNK) + PMCR_FORCE_FDX | PMCR_FORCE_LNK | \ + PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100) #define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \ PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \ @@ -290,6 +293,15 @@ enum mt7530_vlan_port_attr { PMCR_FORCE_SPEED_1000 | \ PMCR_FORCE_FDX | PMCR_FORCE_LNK) +#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100) +#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24) +#define WAKEUP_TIME_100(x) (((x) & 0xFF) << 16) +#define LPI_THRESH_MASK GENMASK(15, 4) +#define LPI_THRESH_SHT 4 +#define SET_LPI_THRESH(x) (((x) << LPI_THRESH_SHT) & LPI_THRESH_MASK) +#define GET_LPI_THRESH(x) (((x) & LPI_THRESH_MASK) >> LPI_THRESH_SHT) +#define LPI_MODE_EN BIT(0) + #define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100) #define PMSR_EEE1G BIT(7) #define PMSR_EEE100M BIT(6) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 95f07fcd4f85..860dca41cf4b 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3161,10 +3161,17 @@ out_resources: return err; } +/* prod_id for switch families which do not have a PHY model number */ +static const u16 family_prod_id_table[] = { + [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341, + [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390, +}; + static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) { struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; struct mv88e6xxx_chip *chip = mdio_bus->chip; + u16 prod_id; u16 val; int err; @@ -3175,23 +3182,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) err = chip->info->ops->phy_read(chip, bus, phy, reg, &val); mv88e6xxx_reg_unlock(chip); - if (reg == MII_PHYSID2) { - /* Some internal PHYs don't have a model number. */ - if (chip->info->family != MV88E6XXX_FAMILY_6165) - /* Then there is the 6165 family. It gets is - * PHYs correct. But it can also have two - * SERDES interfaces in the PHY address - * space. And these don't have a model - * number. But they are not PHYs, so we don't - * want to give them something a PHY driver - * will recognise. - * - * Use the mv88e6390 family model number - * instead, for anything which really could be - * a PHY, - */ - if (!(val & 0x3f0)) - val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4; + /* Some internal PHYs don't have a model number. */ + if (reg == MII_PHYSID2 && !(val & 0x3f0) && + chip->info->family < ARRAY_SIZE(family_prod_id_table)) { + prod_id = family_prod_id_table[chip->info->family]; + if (prod_id) + val |= prod_id >> 4; } return err ? err : val; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 4b85f2b74872..d46460c5b44d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -82,6 +82,7 @@ source "drivers/net/ethernet/huawei/Kconfig" source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" +source "drivers/net/ethernet/microsoft/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" config JME diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 9394493e8187..cb3f9084a21b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ +obj-$(CONFIG_NET_VENDOR_MICROSOFT) += microsoft/ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 9c5891bbfe61..d77fafbc1530 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1449,10 +1449,10 @@ static int greth_of_probe(struct platform_device *ofdev) break; } if (i == 6) { - const u8 *addr; + u8 addr[ETH_ALEN]; - addr = of_get_mac_address(ofdev->dev.of_node); - if (!IS_ERR(addr)) { + err = of_get_mac_address(ofdev->dev.of_node, addr); + if (!err) { for (i = 0; i < 6; i++) macaddr[i] = (unsigned int) addr[i]; } else { diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 5ed80d9a6b9f..f99ae317c188 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -790,7 +790,6 @@ static int emac_probe(struct platform_device *pdev) struct emac_board_info *db; struct net_device *ndev; int ret = 0; - const char *mac_addr; ndev = alloc_etherdev(sizeof(struct emac_board_info)); if (!ndev) { @@ -853,12 +852,9 @@ static int emac_probe(struct platform_device *pdev) } /* Read MAC-address from DT */ - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - - /* Check if the MAC address is valid, if not get a random one */ - if (!is_valid_ether_addr(ndev->dev_addr)) { + ret = of_get_mac_address(np, ndev->dev_addr); + if (ret) { + /* if the MAC address is invalid get a random one */ eth_hw_addr_random(ndev); dev_warn(&pdev->dev, "using random MAC address %pM\n", ndev->dev_addr); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 907125abef2c..1c00d719e5d7 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1351,7 +1351,6 @@ static int altera_tse_probe(struct platform_device *pdev) struct resource *control_port; struct resource *dma_res; struct altera_tse_private *priv; - const unsigned char *macaddr; void __iomem *descmap; const struct of_device_id *of_id = NULL; @@ -1525,10 +1524,8 @@ static int altera_tse_probe(struct platform_device *pdev) priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; /* get default MAC address from device tree */ - macaddr = of_get_mac_address(pdev->dev.of_node); - if (!IS_ERR(macaddr)) - ether_addr_copy(ndev->dev_addr, macaddr); - else + ret = of_get_mac_address(pdev->dev.of_node, ndev->dev_addr); + if (ret) eth_hw_addr_random(ndev); /* get phy addr and create mdio */ diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index b56a9e2aecd9..67b8113a2b53 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -857,7 +857,6 @@ int arc_emac_probe(struct net_device *ndev, int interface) struct device_node *phy_node; struct phy_device *phydev = NULL; struct arc_emac_priv *priv; - const char *mac_addr; unsigned int id, clock_frequency, irq; int err; @@ -942,11 +941,8 @@ int arc_emac_probe(struct net_device *ndev, int interface) } /* Get MAC address from device tree */ - mac_addr = of_get_mac_address(dev->of_node); - - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - else + err = of_get_mac_address(dev->of_node, ndev->dev_addr); + if (err) eth_hw_addr_random(ndev); arc_emac_set_address_internal(ndev); diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 7352f98123c7..3a23b92ebfe3 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1856,7 +1856,6 @@ static int ag71xx_probe(struct platform_device *pdev) const struct ag71xx_dcfg *dcfg; struct net_device *ndev; struct resource *res; - const void *mac_addr; int tx_size, err, i; struct ag71xx *ag; @@ -1957,10 +1956,8 @@ static int ag71xx_probe(struct platform_device *pdev) ag->stop_desc->ctrl = 0; ag->stop_desc->next = (u32)ag->stop_desc_dma; - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); - if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) { + err = of_get_mac_address(np, ndev->dev_addr); + if (err) { netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); eth_random_addr(ndev->dev_addr); } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h index a0562a90fb6d..28ae5c16831e 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c.h +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h @@ -367,6 +367,7 @@ struct atl1c_hw { u16 phy_id1; u16 phy_id2; + spinlock_t intr_mask_lock; /* protect the intr_mask */ u32 intr_mask; u8 preamble_len; @@ -506,6 +507,7 @@ struct atl1c_adapter { struct net_device *netdev; struct pci_dev *pdev; struct napi_struct napi; + struct napi_struct tx_napi; struct page *rx_page; unsigned int rx_page_offset; unsigned int rx_frag_size; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index d54375b255dc..1d17c24e6d75 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -813,6 +813,7 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter) atl1c_set_rxbufsize(adapter, adapter->netdev); atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->mdio_lock); + spin_lock_init(&adapter->hw.intr_mask_lock); set_bit(__AT_DOWN, &adapter->flags); return 0; @@ -1530,20 +1531,19 @@ static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter) spin_unlock(&adapter->mdio_lock); } -static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, - enum atl1c_trans_queue type) +static int atl1c_clean_tx(struct napi_struct *napi, int budget) { - struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_adapter *adapter = + container_of(napi, struct atl1c_adapter, tx_napi); + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[atl1c_trans_normal]; struct atl1c_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 hw_next_to_clean; - u16 reg; unsigned int total_bytes = 0, total_packets = 0; + unsigned long flags; - reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX; - - AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean); + AT_READ_REGW(&adapter->hw, REG_TPD_PRI0_CIDX, &hw_next_to_clean); while (next_to_clean != hw_next_to_clean) { buffer_info = &tpd_ring->buffer_info[next_to_clean]; @@ -1564,7 +1564,15 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, netif_wake_queue(adapter->netdev); } - return true; + if (total_packets < budget) { + napi_complete_done(napi, total_packets); + spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags); + adapter->hw.intr_mask |= ISR_TX_PKT; + AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags); + return total_packets; + } + return budget; } /** @@ -1599,13 +1607,22 @@ static irqreturn_t atl1c_intr(int irq, void *data) AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); if (status & ISR_RX_PKT) { if (likely(napi_schedule_prep(&adapter->napi))) { + spin_lock(&hw->intr_mask_lock); hw->intr_mask &= ~ISR_RX_PKT; AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); + spin_unlock(&hw->intr_mask_lock); __napi_schedule(&adapter->napi); } } - if (status & ISR_TX_PKT) - atl1c_clean_tx_irq(adapter, atl1c_trans_normal); + if (status & ISR_TX_PKT) { + if (napi_schedule_prep(&adapter->tx_napi)) { + spin_lock(&hw->intr_mask_lock); + hw->intr_mask &= ~ISR_TX_PKT; + AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); + spin_unlock(&hw->intr_mask_lock); + __napi_schedule(&adapter->tx_napi); + } + } handled = IRQ_HANDLED; /* check if PCIE PHY Link down */ @@ -1876,6 +1893,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget) struct atl1c_adapter *adapter = container_of(napi, struct atl1c_adapter, napi); int work_done = 0; + unsigned long flags; /* Keep link state information with original netdev */ if (!netif_carrier_ok(adapter->netdev)) @@ -1886,8 +1904,10 @@ static int atl1c_clean(struct napi_struct *napi, int budget) if (work_done < budget) { quit_polling: napi_complete_done(napi, work_done); + spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags); adapter->hw.intr_mask |= ISR_RX_PKT; AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags); } return work_done; } @@ -2325,6 +2345,7 @@ static int atl1c_up(struct atl1c_adapter *adapter) atl1c_check_link_status(adapter); clear_bit(__AT_DOWN, &adapter->flags); napi_enable(&adapter->napi); + napi_enable(&adapter->tx_napi); atl1c_irq_enable(adapter); netif_start_queue(netdev); return err; @@ -2345,6 +2366,7 @@ static void atl1c_down(struct atl1c_adapter *adapter) set_bit(__AT_DOWN, &adapter->flags); netif_carrier_off(netdev); napi_disable(&adapter->napi); + napi_disable(&adapter->tx_napi); atl1c_irq_disable(adapter); atl1c_free_irq(adapter); /* disable ASPM if device inactive */ @@ -2593,7 +2615,9 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->mii.mdio_write = atl1c_mdio_write; adapter->mii.phy_id_mask = 0x1f; adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK; + dev_set_threaded(netdev, true); netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64); + netif_napi_add(netdev, &adapter->tx_napi, atl1c_clean_tx, 64); timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0); /* setup the private structure */ err = atl1c_sw_init(adapter); diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index b7afac5c7ca7..60d908507f51 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -686,7 +686,6 @@ static int bcm4908_enet_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct net_device *netdev; struct bcm4908_enet *enet; - const u8 *mac; int err; netdev = devm_alloc_etherdev(dev, sizeof(*enet)); @@ -716,10 +715,8 @@ static int bcm4908_enet_probe(struct platform_device *pdev) return err; SET_NETDEV_DEV(netdev, &pdev->dev); - mac = of_get_mac_address(dev->of_node); - if (!IS_ERR(mac)) - ether_addr_copy(netdev->dev_addr, mac); - else + err = of_get_mac_address(dev->of_node, netdev->dev_addr); + if (err) eth_hw_addr_random(netdev); netdev->netdev_ops = &bcm4908_enet_netdev_ops; netdev->min_mtu = ETH_ZLEN; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 777bbf6d2586..d9f0f0df8f7b 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2457,7 +2457,6 @@ static int bcm_sysport_probe(struct platform_device *pdev) struct bcm_sysport_priv *priv; struct device_node *dn; struct net_device *dev; - const void *macaddr; u32 txq, rxq; int ret; @@ -2552,12 +2551,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) } /* Initialize netdevice members */ - macaddr = of_get_mac_address(dn); - if (IS_ERR(macaddr)) { + ret = of_get_mac_address(dn, dev->dev_addr); + if (ret) { dev_warn(&pdev->dev, "using random Ethernet MAC\n"); eth_hw_addr_random(dev); - } else { - ether_addr_copy(dev->dev_addr, macaddr); } SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index a5fd161ab5ee..85fa0ab7201c 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -115,7 +115,7 @@ static int bgmac_probe(struct bcma_device *core) struct ssb_sprom *sprom = &core->bus->sprom; struct mii_bus *mii_bus; struct bgmac *bgmac; - const u8 *mac = NULL; + const u8 *mac; int err; bgmac = bgmac_alloc(&core->dev); @@ -128,11 +128,10 @@ static int bgmac_probe(struct bcma_device *core) bcma_set_drvdata(core, bgmac); - if (bgmac->dev->of_node) - mac = of_get_mac_address(bgmac->dev->of_node); + err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr); /* If no MAC address assigned via device tree, check SPROM */ - if (IS_ERR_OR_NULL(mac)) { + if (err) { switch (core->core_unit) { case 0: mac = sprom->et0mac; @@ -149,10 +148,9 @@ static int bgmac_probe(struct bcma_device *core) err = -ENOTSUPP; goto err; } + ether_addr_copy(bgmac->net_dev->dev_addr, mac); } - ether_addr_copy(bgmac->net_dev->dev_addr, mac); - /* On BCM4706 we need common core to access PHY */ if (core->id.id == BCMA_CORE_4706_MAC_GBIT && !core->bus->drv_gmac_cmn.core) { diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index f37f1c58f368..9834b77cf4b6 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -173,7 +173,7 @@ static int bgmac_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct bgmac *bgmac; struct resource *regs; - const u8 *mac_addr; + int ret; bgmac = bgmac_alloc(&pdev->dev); if (!bgmac) @@ -192,11 +192,10 @@ static int bgmac_probe(struct platform_device *pdev) bgmac->dev = &pdev->dev; bgmac->dma_dev = &pdev->dev; - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr); - else - dev_warn(&pdev->dev, "MAC address not present in device tree\n"); + ret = of_get_mac_address(np, bgmac->net_dev->dev_addr); + if (ret) + dev_warn(&pdev->dev, + "MAC address not present in device tree\n"); bgmac->irq = platform_get_irq(pdev, 0); if (bgmac->irq < 0) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 2f8b193a772d..832252313b18 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1930,6 +1930,20 @@ static int bnxt_get_fecparam(struct net_device *dev, return 0; } +static void bnxt_get_fec_stats(struct net_device *dev, + struct ethtool_fec_stats *fec_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return; + + rx = bp->rx_port_stats_ext.sw_stats; + fec_stats->corrected_bits.total = + *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); +} + static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, u32 fec) { @@ -3976,6 +3990,127 @@ ethtool_init_exit: mutex_unlock(&bp->hwrm_cmd_lock); } +static void bnxt_get_eth_phy_stats(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return; + + rx = bp->rx_port_stats_ext.sw_stats; + phy_stats->SymbolErrorDuringCarrier = + *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); +} + +static void bnxt_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx, *tx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) + return; + + rx = bp->port_stats.sw_stats; + tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + + mac_stats->FramesReceivedOK = + BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); + mac_stats->FramesTransmittedOK = + BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); +} + +static void bnxt_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) + return; + + rx = bp->port_stats.sw_stats; + ctrl_stats->MACControlFramesReceived = + BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); +} + +static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 2047 }, + { 2048, 4095 }, + { 4096, 9216 }, + { 9217, 16383 }, + {} +}; + +static void bnxt_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx, *tx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) + return; + + rx = bp->port_stats.sw_stats; + tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + + rmon_stats->jabbers = + BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); + rmon_stats->oversize_pkts = + BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); + rmon_stats->undersize_pkts = + BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); + + rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); + rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); + rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); + rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); + rmon_stats->hist[4] = + BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); + rmon_stats->hist[5] = + BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); + rmon_stats->hist[6] = + BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); + rmon_stats->hist[7] = + BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); + rmon_stats->hist[8] = + BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); + rmon_stats->hist[9] = + BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); + + rmon_stats->hist_tx[0] = + BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); + rmon_stats->hist_tx[1] = + BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); + rmon_stats->hist_tx[2] = + BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); + rmon_stats->hist_tx[3] = + BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); + rmon_stats->hist_tx[4] = + BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); + rmon_stats->hist_tx[5] = + BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); + rmon_stats->hist_tx[6] = + BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); + rmon_stats->hist_tx[7] = + BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); + rmon_stats->hist_tx[8] = + BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); + rmon_stats->hist_tx[9] = + BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); + + *ranges = bnxt_rmon_ranges; +} + void bnxt_ethtool_free(struct bnxt *bp) { kfree(bp->test_info); @@ -3991,6 +4126,7 @@ const struct ethtool_ops bnxt_ethtool_ops = { ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, + .get_fec_stats = bnxt_get_fec_stats, .get_fecparam = bnxt_get_fecparam, .set_fecparam = bnxt_set_fecparam, .get_pause_stats = bnxt_get_pause_stats, @@ -4034,4 +4170,8 @@ const struct ethtool_ops bnxt_ethtool_ops = { .set_dump = bnxt_set_dump, .get_dump_flag = bnxt_get_dump_flag, .get_dump_data = bnxt_get_dump_data, + .get_eth_phy_stats = bnxt_get_eth_phy_stats, + .get_eth_mac_stats = bnxt_get_eth_mac_stats, + .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, + .get_rmon_stats = bnxt_get_rmon_stats, }; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index ffd56a23f8b0..0e94db9cd45d 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3946,6 +3946,7 @@ static int macb_init(struct platform_device *pdev) reg = gem_readl(bp, DCFG8); bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), GEM_BFEXT(T2SCR, reg)); + INIT_LIST_HEAD(&bp->rx_fs_list.list); if (bp->max_tuples > 0) { /* also needs one ethtype match to check IPv4 */ if (GEM_BFEXT(SCR2ETH, reg) > 0) { @@ -3956,7 +3957,6 @@ static int macb_init(struct platform_device *pdev) /* Filtering is supported in hw but don't enable it in kernel now */ dev->hw_features |= NETIF_F_NTUPLE; /* init Rx flow definitions */ - INIT_LIST_HEAD(&bp->rx_fs_list.list); bp->rx_fs_list.count = 0; spin_lock_init(&bp->rx_fs_lock); } else @@ -4649,7 +4649,6 @@ static int macb_probe(struct platform_device *pdev) struct net_device *dev; struct resource *regs; void __iomem *mem; - const char *mac; struct macb *bp; int err, val; @@ -4764,15 +4763,11 @@ static int macb_probe(struct platform_device *pdev) if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) bp->rx_intr_mask |= MACB_BIT(RXUBR); - mac = of_get_mac_address(np); - if (PTR_ERR(mac) == -EPROBE_DEFER) { - err = -EPROBE_DEFER; + err = of_get_mac_address(np, bp->dev->dev_addr); + if (err == -EPROBE_DEFER) goto err_out_free_netdev; - } else if (!IS_ERR_OR_NULL(mac)) { - ether_addr_copy(bp->dev->dev_addr, mac); - } else { + else if (err) macb_get_hwaddr(bp); - } err = of_get_phy_mode(np, &interface); if (err) diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h index b248966837b4..7aad40b2aa73 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h @@ -412,7 +412,7 @@ | CN6XXX_INTR_M0UNWI_ERR \ | CN6XXX_INTR_M1UPB0_ERR \ | CN6XXX_INTR_M1UPWI_ERR \ - | CN6XXX_INTR_M1UPB0_ERR \ + | CN6XXX_INTR_M1UNB0_ERR \ | CN6XXX_INTR_M1UNWI_ERR \ | CN6XXX_INTR_INSTR_DB_OF_ERR \ | CN6XXX_INTR_SLIST_DB_OF_ERR \ diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index ecffebd513be..48ff6fb0eed9 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1385,7 +1385,6 @@ static int octeon_mgmt_probe(struct platform_device *pdev) struct net_device *netdev; struct octeon_mgmt *p; const __be32 *data; - const u8 *mac; struct resource *res_mix; struct resource *res_agl; struct resource *res_agl_prt_ctl; @@ -1502,11 +1501,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev) netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN; - mac = of_get_mac_address(pdev->dev.of_node); - - if (!IS_ERR(mac)) - ether_addr_copy(netdev->dev_addr, mac); - else + result = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + if (result) eth_hw_addr_random(netdev); p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 8ff28ed04b7f..0c783aadf393 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1474,7 +1474,6 @@ static int bgx_init_of_phy(struct bgx *bgx) device_for_each_child_node(&bgx->pdev->dev, fwn) { struct phy_device *pd; struct device_node *phy_np; - const char *mac; /* Should always be an OF node. But if it is not, we * cannot handle it, so exit the loop. @@ -1483,9 +1482,7 @@ static int bgx_init_of_phy(struct bgx *bgx) if (!node) break; - mac = of_get_mac_address(node); - if (!IS_ERR(mac)) - ether_addr_copy(bgx->lmac[lmac].mac, mac); + of_get_mac_address(node, bgx->lmac[lmac].mac); SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); bgx->lmac[lmac].lmacid = lmac; diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 1115b8f9ea4e..a3f5b80888e5 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -350,18 +350,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word, } /* - * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE - * @tx_info - driver specific tls info. - * return: NET_TX_OK/NET_XMIT_DROP. - */ -static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info) -{ - return chcr_set_tcb_field(tx_info, TCB_T_STATE_W, - TCB_T_STATE_V(TCB_T_STATE_M), - CHCR_TCB_STATE_CLOSED, 1); -} - -/* * chcr_ktls_dev_del: call back for tls_dev_del. * Remove the tid and l2t entry and close the connection. * it per connection basis. @@ -395,8 +383,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev, /* clear tid */ if (tx_info->tid != -1) { - /* clear tcb state and then release tid */ - chcr_ktls_mark_tcb_close(tx_info); cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, tx_info->tid, tx_info->ip_family); } @@ -574,7 +560,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, return 0; free_tid: - chcr_ktls_mark_tcb_close(tx_info); #if IS_ENABLED(CONFIG_IPV6) /* clear clip entry */ if (tx_info->ip_family == AF_INET6) @@ -672,10 +657,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, if (tx_info->pending_close) { spin_unlock(&tx_info->lock); if (!status) { - /* it's a late success, tcb status is established, - * mark it close. - */ - chcr_ktls_mark_tcb_close(tx_info); cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, tid, tx_info->ip_family); } @@ -1664,54 +1645,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb, } /* - * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid - * sending the same segment again. It will discard the segment which is before - * the current tx max. - * @tx_info - driver specific tls info. - * @q - TX queue. - * return: NET_TX_OK/NET_XMIT_DROP. - */ -static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info, - struct sge_eth_txq *q) -{ - struct fw_ulptx_wr *wr; - unsigned int ndesc; - int credits; - void *pos; - u32 len; - - len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16); - ndesc = DIV_ROUND_UP(len, 64); - - credits = chcr_txq_avail(&q->q) - ndesc; - if (unlikely(credits < 0)) { - chcr_eth_txq_stop(q); - return NETDEV_TX_BUSY; - } - - pos = &q->q.desc[q->q.pidx]; - - wr = pos; - /* ULPTX wr */ - wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); - wr->cookie = 0; - /* fill len in wr field */ - wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16))); - - pos += sizeof(*wr); - - pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, - TCB_SND_UNA_RAW_W, - TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M), - TCB_SND_UNA_RAW_V(0), 0); - - chcr_txq_advance(&q->q, ndesc); - cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc); - - return 0; -} - -/* * chcr_end_part_handler: This handler will handle the record which * is complete or if record's end part is received. T6 adapter has a issue that * it can't send out TAG with partial record so if its an end part then we have @@ -1735,7 +1668,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, struct sge_eth_txq *q, u32 skb_offset, u32 tls_end_offset, bool last_wr) { + bool free_skb_if_tx_fails = false; struct sk_buff *nskb = NULL; + /* check if it is a complete record */ if (tls_end_offset == record->len) { nskb = skb; @@ -1758,6 +1693,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, if (last_wr) dev_kfree_skb_any(skb); + else + free_skb_if_tx_fails = true; last_wr = true; @@ -1769,6 +1706,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, record->num_frags, (last_wr && tcp_push_no_fin), mss)) { + if (free_skb_if_tx_fails) + dev_kfree_skb_any(skb); goto out; } tx_info->prev_seq = record->end_seq; @@ -1905,11 +1844,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info, /* reset tcp_seq as per the prior_data_required len */ tcp_seq -= prior_data_len; } - /* reset snd una, so the middle record won't send the already - * sent part. - */ - if (chcr_ktls_update_snd_una(tx_info, q)) - goto out; atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts); } else { atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts); @@ -2010,12 +1944,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) * we will send the complete record again. */ + spin_lock_irqsave(&tx_ctx->base.lock, flags); + do { - int i; cxgb4_reclaim_completed_tx(adap, &q->q, true); - /* lock taken */ - spin_lock_irqsave(&tx_ctx->base.lock, flags); /* fetch the tls record */ record = tls_get_record(&tx_ctx->base, tcp_seq, &tx_info->record_no); @@ -2074,11 +2007,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) tls_end_offset, skb_offset, 0); - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); if (ret) { /* free the refcount taken earlier */ if (tls_end_offset < data_len) dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&tx_ctx->base.lock, flags); goto out; } @@ -2088,16 +2021,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) continue; } - /* increase page reference count of the record, so that there - * won't be any chance of page free in middle if in case stack - * receives ACK and try to delete the record. - */ - for (i = 0; i < record->num_frags; i++) - __skb_frag_ref(&record->frags[i]); - /* lock cleared */ - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); - - /* if a tls record is finishing in this SKB */ if (tls_end_offset <= data_len) { ret = chcr_end_part_handler(tx_info, skb, record, @@ -2122,13 +2045,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) data_len = 0; } - /* clear the frag ref count which increased locally before */ - for (i = 0; i < record->num_frags; i++) { - /* clear the frag ref count */ - __skb_frag_unref(&record->frags[i]); - } /* if any failure, come out from the loop. */ if (ret) { + spin_unlock_irqrestore(&tx_ctx->base.lock, flags); if (th->fin) dev_kfree_skb_any(skb); @@ -2143,6 +2062,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) } while (data_len > 0); + spin_unlock_irqrestore(&tx_ctx->base.lock, flags); atomic64_inc(&port_stats->ktls_tx_encrypted_packets); atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 252adfa5d837..e7f7121821be 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1385,7 +1385,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) { struct dm9000_plat_data *pdata; struct device_node *np = dev->of_node; - const void *mac_addr; + int ret; if (!IS_ENABLED(CONFIG_OF) || !np) return ERR_PTR(-ENXIO); @@ -1399,11 +1399,9 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) if (of_find_property(np, "davicom,no-eeprom", NULL)) pdata->flags |= DM9000_PLATF_NO_EEPROM; - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(pdata->dev_addr, mac_addr); - else if (PTR_ERR(mac_addr) == -EPROBE_DEFER) - return ERR_CAST(mac_addr); + ret = of_get_mac_address(np, pdata->dev_addr); + if (ret == -EPROBE_DEFER) + return ERR_PTR(ret); return pdata; } @@ -1471,8 +1469,10 @@ dm9000_probe(struct platform_device *pdev) /* Init network device */ ndev = alloc_etherdev(sizeof(struct board_info)); - if (!ndev) - return -ENOMEM; + if (!ndev) { + ret = -ENOMEM; + goto out_regulator_disable; + } SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 3d9b0b161e24..e1b43b07755b 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1151,11 +1151,7 @@ static int ethoc_probe(struct platform_device *pdev) ether_addr_copy(netdev->dev_addr, pdata->hwaddr); priv->phy_id = pdata->phy_id; } else { - const void *mac; - - mac = of_get_mac_address(pdev->dev.of_node); - if (!IS_ERR(mac)) - ether_addr_copy(netdev->dev_addr, mac); + of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); priv->phy_id = -1; } diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 815fb62c4b02..e3954d8835e7 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -575,7 +575,6 @@ static s32 nps_enet_probe(struct platform_device *pdev) struct net_device *ndev; struct nps_enet_priv *priv; s32 err = 0; - const char *mac_addr; if (!dev->of_node) return -ENODEV; @@ -602,10 +601,8 @@ static s32 nps_enet_probe(struct platform_device *pdev) dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base); /* set kernel MAC address to dev */ - mac_addr = of_get_mac_address(dev->of_node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - else + err = of_get_mac_address(dev->of_node, ndev->dev_addr); + if (err) eth_hw_addr_random(ndev); /* Get IRQ number */ diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index 644ef9ae02a3..c2ef74052ef8 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -11,7 +11,7 @@ fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpa fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o -fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o +fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o # Needed by the tracing framework CFLAGS_dpaa2-eth.o := -I$(src) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c new file mode 100644 index 000000000000..f9451ec5f2cb --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DPAA2 Ethernet Switch flower support + * + * Copyright 2021 NXP + * + */ + +#include "dpaa2-switch.h" + +static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls, + struct dpsw_acl_key *acl_key) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + struct netlink_ext_ack *extack = cls->common.extack; + struct dpsw_acl_fields *acl_h, *acl_m; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_IP) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported keys used"); + return -EOPNOTSUPP; + } + + acl_h = &acl_key->match; + acl_m = &acl_key->mask; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + acl_h->l3_protocol = match.key->ip_proto; + acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto); + acl_m->l3_protocol = match.mask->ip_proto; + acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]); + ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]); + ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]); + ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + acl_h->l2_vlan_id = match.key->vlan_id; + acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid); + acl_h->l2_pcp_dei = match.key->vlan_priority << 1 | + match.key->vlan_dei; + + acl_m->l2_vlan_id = match.mask->vlan_id; + acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid); + acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 | + match.mask->vlan_dei; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + acl_h->l3_source_ip = be32_to_cpu(match.key->src); + acl_h->l3_dest_ip = be32_to_cpu(match.key->dst); + acl_m->l3_source_ip = be32_to_cpu(match.mask->src); + acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + acl_h->l4_source_port = be16_to_cpu(match.key->src); + acl_h->l4_dest_port = be16_to_cpu(match.key->dst); + acl_m->l4_source_port = be16_to_cpu(match.mask->src); + acl_m->l4_dest_port = be16_to_cpu(match.mask->dst); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; + + flow_rule_match_ip(rule, &match); + if (match.mask->ttl != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on TTL not supported"); + return -EOPNOTSUPP; + } + + if ((match.mask->tos & 0x3) != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on ECN not supported, only DSCP"); + return -EOPNOTSUPP; + } + + acl_h->l3_dscp = match.key->tos >> 2; + acl_m->l3_dscp = match.mask->tos >> 2; + } + + return 0; +} + +int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpsw_acl_key *acl_key = &entry->key; + struct device *dev = ethsw->dev; + u8 *cmd_buff; + int err; + + cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL); + if (!cmd_buff) + return -ENOMEM; + + dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff); + + acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) { + dev_err(dev, "DMA mapping failed\n"); + return -EFAULT; + } + + err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, acl_entry_cfg); + + dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), + DMA_TO_DEVICE); + if (err) { + dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err); + return err; + } + + kfree(cmd_buff); + + return 0; +} + +static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; + struct dpsw_acl_key *acl_key = &entry->key; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct device *dev = ethsw->dev; + u8 *cmd_buff; + int err; + + cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL); + if (!cmd_buff) + return -ENOMEM; + + dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff); + + acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) { + dev_err(dev, "DMA mapping failed\n"); + return -EFAULT; + } + + err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, acl_entry_cfg); + + dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), + DMA_TO_DEVICE); + if (err) { + dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err); + return err; + } + + kfree(cmd_buff); + + return 0; +} + +static int +dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + struct list_head *pos, *n; + int index = 0; + + if (list_empty(&acl_tbl->entries)) { + list_add(&entry->list, &acl_tbl->entries); + return index; + } + + list_for_each_safe(pos, n, &acl_tbl->entries) { + tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list); + if (entry->prio < tmp->prio) + break; + index++; + } + list_add(&entry->list, pos->prev); + return index; +} + +static struct dpaa2_switch_acl_entry* +dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl, + int index) +{ + struct dpaa2_switch_acl_entry *tmp; + int i = 0; + + list_for_each_entry(tmp, &acl_tbl->entries, list) { + if (i == index) + return tmp; + ++i; + } + + return NULL; +} + +static int +dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry, + int precedence) +{ + int err; + + err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + if (err) + return err; + + entry->cfg.precedence = precedence; + return dpaa2_switch_acl_entry_add(acl_tbl, entry); +} + +static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + int index, i, precedence, err; + + /* Add the new ACL entry to the linked list and get its index */ + index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry); + + /* Move up in priority the ACL entries to make space + * for the new filter. + */ + precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1; + for (i = 0; i < index; i++) { + tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); + + err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + precedence); + if (err) + return err; + + precedence++; + } + + /* Add the new entry to hardware */ + entry->cfg.precedence = precedence; + err = dpaa2_switch_acl_entry_add(acl_tbl, entry); + acl_tbl->num_rules++; + + return err; +} + +static struct dpaa2_switch_acl_entry * +dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl, + unsigned long cookie) +{ + struct dpaa2_switch_acl_entry *tmp, *n; + + list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + if (tmp->cookie == cookie) + return tmp; + } + return NULL; +} + +static int +dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp, *n; + int index = 0; + + list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + if (tmp->cookie == entry->cookie) + return index; + index++; + } + return -ENOENT; +} + +static int +dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + int index, i, precedence, err; + + index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry); + + /* Remove from hardware the ACL entry */ + err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + if (err) + return err; + + acl_tbl->num_rules--; + + /* Remove it from the list also */ + list_del(&entry->list); + + /* Move down in priority the entries over the deleted one */ + precedence = entry->cfg.precedence; + for (i = index - 1; i >= 0; i--) { + tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); + err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + precedence); + if (err) + return err; + + precedence--; + } + + kfree(entry); + + return 0; +} + +static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw, + struct flow_action_entry *cls_act, + struct dpsw_acl_result *dpsw_act, + struct netlink_ext_ack *extack) +{ + int err = 0; + + switch (cls_act->id) { + case FLOW_ACTION_TRAP: + dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; + break; + case FLOW_ACTION_REDIRECT: + if (!dpaa2_switch_port_dev_check(cls_act->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not a DPAA2 switch port"); + return -EOPNOTSUPP; + } + + dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev); + dpsw_act->action = DPSW_ACL_ACTION_REDIRECT; + break; + case FLOW_ACTION_DROP: + dpsw_act->action = DPSW_ACL_ACTION_DROP; + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Action not supported"); + err = -EOPNOTSUPP; + goto out; + } + +out: + return err; +} + +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpaa2_switch_acl_entry *acl_entry; + struct flow_action_entry *act; + int err; + + if (!flow_offload_has_one_action(&rule->action)) { + NL_SET_ERR_MSG(extack, "Only singular actions are supported"); + return -EOPNOTSUPP; + } + + if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + NL_SET_ERR_MSG(extack, "Maximum filter capacity reached"); + return -ENOMEM; + } + + acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL); + if (!acl_entry) + return -ENOMEM; + + err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key); + if (err) + goto free_acl_entry; + + act = &rule->action.entries[0]; + err = dpaa2_switch_tc_parse_action(ethsw, act, + &acl_entry->cfg.result, extack); + if (err) + goto free_acl_entry; + + acl_entry->prio = cls->common.prio; + acl_entry->cookie = cls->cookie; + + err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + if (err) + goto free_acl_entry; + + return 0; + +free_acl_entry: + kfree(acl_entry); + + return err; +} + +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls) +{ + struct dpaa2_switch_acl_entry *entry; + + entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); + if (!entry) + return 0; + + return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); +} + +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpaa2_switch_acl_entry *acl_entry; + struct flow_action_entry *act; + int err; + + if (!flow_offload_has_one_action(&cls->rule->action)) { + NL_SET_ERR_MSG(extack, "Only singular actions are supported"); + return -EOPNOTSUPP; + } + + if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + NL_SET_ERR_MSG(extack, "Maximum filter capacity reached"); + return -ENOMEM; + } + + acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL); + if (!acl_entry) + return -ENOMEM; + + act = &cls->rule->action.entries[0]; + err = dpaa2_switch_tc_parse_action(ethsw, act, + &acl_entry->cfg.result, extack); + if (err) + goto free_acl_entry; + + acl_entry->prio = cls->common.prio; + acl_entry->cookie = cls->cookie; + + err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + if (err) + goto free_acl_entry; + + return 0; + +free_acl_entry: + kfree(acl_entry); + + return err; +} + +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls) +{ + struct dpaa2_switch_acl_entry *entry; + + entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); + if (!entry) + return 0; + + return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 80efc8116963..05de37c3b64c 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -14,6 +14,7 @@ #include <linux/kthread.h> #include <linux/workqueue.h> #include <linux/iommu.h> +#include <net/pkt_cls.h> #include <linux/fsl/mc.h> @@ -40,6 +41,17 @@ static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *e return NULL; } +static struct dpaa2_switch_acl_tbl * +dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw) +{ + int i; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) + if (!ethsw->acls[i].in_use) + return ðsw->acls[i]; + return NULL; +} + static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv, struct net_device *bridge_dev) { @@ -1114,6 +1126,259 @@ err_exit: return NETDEV_TX_OK; } +static int +dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *f) +{ + switch (f->command) { + case FLOW_CLS_REPLACE: + return dpaa2_switch_cls_flower_replace(acl_tbl, f); + case FLOW_CLS_DESTROY: + return dpaa2_switch_cls_flower_destroy(acl_tbl, f); + default: + return -EOPNOTSUPP; + } +} + +static int +dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *f) +{ + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return dpaa2_switch_cls_matchall_replace(acl_tbl, f); + case TC_CLSMATCHALL_DESTROY: + return dpaa2_switch_cls_matchall_destroy(acl_tbl, f); + default: + return -EOPNOTSUPP; + } +} + +static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + switch (type) { + case TC_SETUP_CLSFLOWER: + return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data); + case TC_SETUP_CLSMATCHALL: + return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(dpaa2_switch_block_cb_list); + +static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_acl_if_cfg acl_if_cfg; + int err; + + if (port_priv->acl_tbl) + return -EINVAL; + + acl_if_cfg.if_id[0] = port_priv->idx; + acl_if_cfg.num_ifs = 1; + err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, &acl_if_cfg); + if (err) { + netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); + return err; + } + + acl_tbl->ports |= BIT(port_priv->idx); + port_priv->acl_tbl = acl_tbl; + + return 0; +} + +static int +dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_acl_if_cfg acl_if_cfg; + int err; + + if (port_priv->acl_tbl != acl_tbl) + return -EINVAL; + + acl_if_cfg.if_id[0] = port_priv->idx; + acl_if_cfg.num_ifs = 1; + err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, &acl_if_cfg); + if (err) { + netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); + return err; + } + + acl_tbl->ports &= ~BIT(port_priv->idx); + port_priv->acl_tbl = NULL; + return 0; +} + +static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl; + int err; + + /* If the port is already bound to this ACL table then do nothing. This + * can happen when this port is the first one to join a tc block + */ + if (port_priv->acl_tbl == acl_tbl) + return 0; + + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl); + if (err) + return err; + + /* Mark the previous ACL table as being unused if this was the last + * port that was using it. + */ + if (old_acl_tbl->ports == 0) + old_acl_tbl->in_use = false; + + return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); +} + +static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *new_acl_tbl; + int err; + + /* We are the last port that leaves a block (an ACL table). + * We'll continue to use this table. + */ + if (acl_tbl->ports == BIT(port_priv->idx)) + return 0; + + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl); + if (err) + return err; + + if (acl_tbl->ports == 0) + acl_tbl->in_use = false; + + new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); + new_acl_tbl->in_use = true; + return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl); +} + +static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, + struct flow_block_offload *f) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *acl_tbl; + struct flow_block_cb *block_cb; + bool register_block = false; + int err; + + block_cb = flow_block_cb_lookup(f->block, + dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw); + + if (!block_cb) { + /* If the ACL table is not already known, then this port must + * be the first to join it. In this case, we can just continue + * to use our private table + */ + acl_tbl = port_priv->acl_tbl; + + block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw, acl_tbl, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + register_block = true; + } else { + acl_tbl = flow_block_cb_priv(block_cb); + } + + flow_block_cb_incref(block_cb); + err = dpaa2_switch_port_block_bind(port_priv, acl_tbl); + if (err) + goto err_block_bind; + + if (register_block) { + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, + &dpaa2_switch_block_cb_list); + } + + return 0; + +err_block_bind: + if (!flow_block_cb_decref(block_cb)) + flow_block_cb_free(block_cb); + return err; +} + +static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, + struct flow_block_offload *f) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *acl_tbl; + struct flow_block_cb *block_cb; + int err; + + block_cb = flow_block_cb_lookup(f->block, + dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw); + if (!block_cb) + return; + + acl_tbl = flow_block_cb_priv(block_cb); + err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl); + if (!err && !flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } +} + +static int dpaa2_switch_setup_tc_block(struct net_device *netdev, + struct flow_block_offload *f) +{ + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + f->driver_block_list = &dpaa2_switch_block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + return dpaa2_switch_setup_tc_block_bind(netdev, f); + case FLOW_BLOCK_UNBIND: + dpaa2_switch_setup_tc_block_unbind(netdev, f); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int dpaa2_switch_port_setup_tc(struct net_device *netdev, + enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: { + return dpaa2_switch_setup_tc_block(netdev, type_data); + } + default: + return -EOPNOTSUPP; + } + + return 0; +} + static const struct net_device_ops dpaa2_switch_port_ops = { .ndo_open = dpaa2_switch_port_open, .ndo_stop = dpaa2_switch_port_stop, @@ -1130,6 +1395,7 @@ static const struct net_device_ops dpaa2_switch_port_ops = { .ndo_start_xmit = dpaa2_switch_port_tx, .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, + .ndo_setup_tc = dpaa2_switch_port_setup_tc, }; bool dpaa2_switch_port_dev_check(const struct net_device *netdev) @@ -1832,7 +2098,7 @@ static void dpaa2_switch_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: - if (!fdb_info->added_by_user) + if (!fdb_info->added_by_user || fdb_info->is_local) break; if (is_unicast_ether_addr(fdb_info->addr)) err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), @@ -1847,7 +2113,7 @@ static void dpaa2_switch_event_work(struct work_struct *work) &fdb_info->info, NULL); break; case SWITCHDEV_FDB_DEL_TO_DEVICE: - if (!fdb_info->added_by_user) + if (!fdb_info->added_by_user || fdb_info->is_local) break; if (is_unicast_ether_addr(fdb_info->addr)) dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); @@ -2676,61 +2942,17 @@ err_close: static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv, const char *mac) { - struct net_device *netdev = port_priv->netdev; - struct dpsw_acl_entry_cfg acl_entry_cfg; - struct dpsw_acl_fields *acl_h; - struct dpsw_acl_fields *acl_m; - struct dpsw_acl_key acl_key; - struct device *dev; - u8 *cmd_buff; - int err; - - dev = port_priv->netdev->dev.parent; - acl_h = &acl_key.match; - acl_m = &acl_key.mask; - - if (port_priv->acl_num_rules >= DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES) { - netdev_err(netdev, "ACL full\n"); - return -ENOMEM; - } - - memset(&acl_entry_cfg, 0, sizeof(acl_entry_cfg)); - memset(&acl_key, 0, sizeof(acl_key)); + struct dpaa2_switch_acl_entry acl_entry = {0}; /* Match on the destination MAC address */ - ether_addr_copy(acl_h->l2_dest_mac, mac); - eth_broadcast_addr(acl_m->l2_dest_mac); + ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac); + eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac); - cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL); - if (!cmd_buff) - return -ENOMEM; - dpsw_acl_prepare_entry_cfg(&acl_key, cmd_buff); - - memset(&acl_entry_cfg, 0, sizeof(acl_entry_cfg)); - acl_entry_cfg.precedence = port_priv->acl_num_rules; - acl_entry_cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; - acl_entry_cfg.key_iova = dma_map_single(dev, cmd_buff, - DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, acl_entry_cfg.key_iova))) { - netdev_err(netdev, "DMA mapping failed\n"); - return -EFAULT; - } + /* Trap to CPU */ + acl_entry.cfg.precedence = 0; + acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; - err = dpsw_acl_add_entry(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->acl_tbl, &acl_entry_cfg); - - dma_unmap_single(dev, acl_entry_cfg.key_iova, sizeof(cmd_buff), - DMA_TO_DEVICE); - if (err) { - netdev_err(netdev, "dpsw_acl_add_entry() failed %d\n", err); - return err; - } - - port_priv->acl_num_rules++; - - return 0; + return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry); } static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) @@ -2743,12 +2965,12 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) }; struct net_device *netdev = port_priv->netdev; struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *acl_tbl; struct dpsw_fdb_cfg fdb_cfg = {0}; - struct dpsw_acl_if_cfg acl_if_cfg; struct dpsw_if_attr dpsw_if_attr; struct dpaa2_switch_fdb *fdb; struct dpsw_acl_cfg acl_cfg; - u16 fdb_id; + u16 fdb_id, acl_tbl_id; int err; /* Get the Tx queue for this specific port */ @@ -2792,21 +3014,22 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) /* Create an ACL table to be used by this switch port */ acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES; err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle, - &port_priv->acl_tbl, &acl_cfg); + &acl_tbl_id, &acl_cfg); if (err) { netdev_err(netdev, "dpsw_acl_add err %d\n", err); return err; } - acl_if_cfg.if_id[0] = port_priv->idx; - acl_if_cfg.num_ifs = 1; - err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, - port_priv->acl_tbl, &acl_if_cfg); - if (err) { - netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); - dpsw_acl_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, - port_priv->acl_tbl); - } + acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); + acl_tbl->ethsw = ethsw; + acl_tbl->id = acl_tbl_id; + acl_tbl->in_use = true; + acl_tbl->num_rules = 0; + INIT_LIST_HEAD(&acl_tbl->entries); + + err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); + if (err) + return err; err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa); if (err) @@ -2858,6 +3081,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) } kfree(ethsw->fdbs); + kfree(ethsw->acls); kfree(ethsw->ports); dpaa2_switch_takedown(sw_dev); @@ -2915,7 +3139,9 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, /* The DPAA2 switch's ingress path depends on the VLAN table, * thus we are not able to disable VLAN filtering. */ - port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER; + port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_FILTER | + NETIF_F_HW_TC; err = dpaa2_switch_port_init(port_priv, port_idx); if (err) @@ -2983,6 +3209,13 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) goto err_free_ports; } + ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls), + GFP_KERNEL); + if (!ethsw->acls) { + err = -ENOMEM; + goto err_free_fdbs; + } + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { err = dpaa2_switch_probe_port(ethsw, i); if (err) @@ -3031,6 +3264,8 @@ err_stop: err_free_netdev: for (i--; i >= 0; i--) free_netdev(ethsw->ports[i]->netdev); + kfree(ethsw->acls); +err_free_fdbs: kfree(ethsw->fdbs); err_free_ports: kfree(ethsw->ports); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h index 0ae1d27c811e..bdef71f234cb 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h @@ -18,6 +18,7 @@ #include <net/switchdev.h> #include <linux/if_bridge.h> #include <linux/fsl/mc.h> +#include <net/pkt_cls.h> #include <soc/fsl/dpaa2-io.h> #include "dpsw.h" @@ -80,6 +81,8 @@ (DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN) #define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES 16 +#define DPAA2_ETHSW_PORT_DEFAULT_TRAPS 1 + #define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE 256 extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops; @@ -101,6 +104,34 @@ struct dpaa2_switch_fdb { bool in_use; }; +struct dpaa2_switch_acl_entry { + struct list_head list; + u16 prio; + unsigned long cookie; + + struct dpsw_acl_entry_cfg cfg; + struct dpsw_acl_key key; +}; + +struct dpaa2_switch_acl_tbl { + struct list_head entries; + struct ethsw_core *ethsw; + u64 ports; + + u16 id; + u8 num_rules; + bool in_use; +}; + +static inline bool +dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl) +{ + if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >= + DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES) + return true; + return false; +} + /* Per port private data */ struct ethsw_port_priv { struct net_device *netdev; @@ -118,8 +149,7 @@ struct ethsw_port_priv { bool ucast_flood; bool learn_ena; - u16 acl_tbl; - u8 acl_num_rules; + struct dpaa2_switch_acl_tbl *acl_tbl; }; /* Switch data */ @@ -145,8 +175,21 @@ struct ethsw_core { int napi_users; struct dpaa2_switch_fdb *fdbs; + struct dpaa2_switch_acl_tbl *acls; }; +static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw, + struct net_device *netdev) +{ + int i; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) + if (ethsw->ports[i]->netdev == netdev) + return ethsw->ports[i]->idx; + + return -EINVAL; +} + static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw) { if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) { @@ -183,4 +226,21 @@ int dpaa2_switch_port_vlans_del(struct net_device *netdev, typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv, struct fdb_dump_entry *fdb_entry, void *data); + +/* TC offload */ + +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls); + +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls); + +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls); + +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls); + +int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry); #endif /* __ETHSW_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h index 1747cee19a72..cb13e740f72b 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h @@ -77,6 +77,7 @@ #define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090) #define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091) #define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092) +#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093) #define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094) #define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c index 6704efe89bc1..6352d6d1ecba 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c @@ -1544,3 +1544,38 @@ int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, return mc_send_command(mc_io, &cmd); } + +/** + * dpsw_acl_remove_entry() - Removes an entry from ACL. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL ID + * @cfg: Entry configuration + * + * warning: This function has to be called after dpsw_acl_set_entry_cfg() + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_entry_cfg *cfg) +{ + struct dpsw_cmd_acl_entry *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id); + cmd_params->precedence = cpu_to_le32(cfg->precedence); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + dpsw_set_field(cmd_params->result_action, + RESULT_ACTION, + cfg->result.action); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h index 08e37c475ae8..5ef221a25b02 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h @@ -749,4 +749,7 @@ void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 acl_id, const struct dpsw_acl_entry_cfg *cfg); + +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_entry_cfg *cfg); #endif /* __FSL_DPSW_H */ diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig index ab92382c399a..d88f60c2bb82 100644 --- a/drivers/net/ethernet/freescale/enetc/Kconfig +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -2,6 +2,7 @@ config FSL_ENETC tristate "ENETC PF driver" depends on PCI && PCI_MSI + depends on FSL_ENETC_IERB || FSL_ENETC_IERB=n select FSL_ENETC_MDIO select PHYLINK select PCS_LYNX @@ -25,6 +26,14 @@ config FSL_ENETC_VF If compiled as module (M), the module name is fsl-enetc-vf. +config FSL_ENETC_IERB + tristate "ENETC IERB driver" + help + This driver configures the Integrated Endpoint Register Block on NXP + LS1028A. + + If compiled as module (M), the module name is fsl-enetc-ierb. + config FSL_ENETC_MDIO tristate "ENETC MDIO driver" depends on PCI && MDIO_DEVRES && MDIO_BUS diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile index 74f7ac253b8b..a139f2e9d59f 100644 --- a/drivers/net/ethernet/freescale/enetc/Makefile +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -11,6 +11,9 @@ obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o fsl-enetc-vf-y := enetc_vf.o $(common-objs) fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o +obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o +fsl-enetc-ierb-y := enetc_ierb.o + obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 4a0adb0b8bd7..4f23829e7317 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -9,6 +9,26 @@ #include <linux/ptp_classify.h> #include <net/pkt_sched.h> +static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) +{ + int num_tx_rings = priv->num_tx_rings; + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + if (priv->rx_ring[i]->xdp.prog) + return num_tx_rings - num_possible_cpus(); + + return num_tx_rings; +} + +static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv, + struct enetc_bdr *tx_ring) +{ + int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; + + return priv->rx_ring[index]; +} + static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd) { if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect) @@ -468,7 +488,6 @@ static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, struct enetc_tx_swbd *tx_swbd) { struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); - struct enetc_bdr *rx_ring = priv->rx_ring[tx_ring->index]; struct enetc_rx_swbd rx_swbd = { .dma = tx_swbd->dma, .page = tx_swbd->page, @@ -476,6 +495,9 @@ static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, .dir = tx_swbd->dir, .len = tx_swbd->len, }; + struct enetc_bdr *rx_ring; + + rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); if (likely(enetc_swbd_unused(rx_ring))) { enetc_reuse_page(rx_ring, &rx_swbd); @@ -544,7 +566,6 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) if (xdp_frame) { xdp_return_frame(xdp_frame); - tx_swbd->xdp_frame = NULL; } else if (skb) { if (unlikely(tx_swbd->skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) { @@ -552,13 +573,12 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) * timestamping packet. And send one skb in * tx_skbs queue if has. */ - queue_work(system_wq, &priv->tx_onestep_tstamp); + schedule_work(&priv->tx_onestep_tstamp); } else if (unlikely(do_twostep_tstamp)) { enetc_tstamp_tx(skb, tstamp); do_twostep_tstamp = false; } napi_consume_skb(skb, napi_budget); - tx_swbd->skb = NULL; } tx_byte_cnt += tx_swbd->len; @@ -753,27 +773,35 @@ static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, return rx_swbd; } +/* Reuse the current page without performing half-page buffer flipping */ static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, struct enetc_rx_swbd *rx_swbd) { - if (likely(enetc_page_reusable(rx_swbd->page))) { - size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset; + size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset; + + enetc_reuse_page(rx_ring, rx_swbd); + dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + buffer_size, rx_swbd->dir); + + rx_swbd->page = NULL; +} + +/* Reuse the current page by performing half-page buffer flipping */ +static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + if (likely(enetc_page_reusable(rx_swbd->page))) { rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; page_ref_inc(rx_swbd->page); - enetc_reuse_page(rx_ring, rx_swbd); - - /* sync for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, - rx_swbd->page_offset, - buffer_size, rx_swbd->dir); + enetc_put_rx_buff(rx_ring, rx_swbd); } else { dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, rx_swbd->dir); + rx_swbd->page = NULL; } - - rx_swbd->page = NULL; } static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, @@ -793,7 +821,7 @@ static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, skb_reserve(skb, rx_ring->buffer_offset); __skb_put(skb, size); - enetc_put_rx_buff(rx_ring, rx_swbd); + enetc_flip_rx_buff(rx_ring, rx_swbd); return skb; } @@ -806,7 +834,7 @@ static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); - enetc_put_rx_buff(rx_ring, rx_swbd); + enetc_flip_rx_buff(rx_ring, rx_swbd); } static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring, @@ -816,12 +844,14 @@ static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring, if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK)))) return false; + enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); enetc_rxbd_next(rx_ring, rxbd, i); while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { dma_rmb(); bd_status = le32_to_cpu((*rxbd)->r.lstatus); + enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); enetc_rxbd_next(rx_ring, rxbd, i); } @@ -1051,7 +1081,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames, int xdp_tx_bd_cnt, i, k; int xdp_tx_frm_cnt = 0; - tx_ring = priv->tx_ring[smp_processor_id()]; + enetc_lock_mdio(); + + tx_ring = priv->xdp_tx_ring[smp_processor_id()]; prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); @@ -1079,6 +1111,8 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames, tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; + enetc_unlock_mdio(); + return xdp_tx_frm_cnt; } @@ -1144,24 +1178,8 @@ static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status, } } -/* Reuse the current page without performing half-page buffer flipping */ -static void enetc_put_xdp_buff(struct enetc_bdr *rx_ring, - struct enetc_rx_swbd *rx_swbd) -{ - enetc_reuse_page(rx_ring, rx_swbd); - - dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, - rx_swbd->page_offset, - ENETC_RXB_DMA_SIZE_XDP, - rx_swbd->dir); - - rx_swbd->page = NULL; -} - /* Convert RX buffer descriptors to TX buffer descriptors. These will be - * recycled back into the RX ring in enetc_clean_tx_ring. We need to scrub the - * RX software BDs because the ownership of the buffer no longer belongs to the - * RX ring, so enetc_refill_rx_ring may not reuse rx_swbd->page. + * recycled back into the RX ring in enetc_clean_tx_ring. */ static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr, struct enetc_bdr *rx_ring, @@ -1183,7 +1201,6 @@ static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr, tx_swbd->is_dma_page = true; tx_swbd->is_xdp_tx = true; tx_swbd->is_eof = false; - memset(rx_swbd, 0, sizeof(*rx_swbd)); } /* We rely on caller providing an rx_ring_last > rx_ring_first */ @@ -1196,8 +1213,8 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, int rx_ring_last) { while (rx_ring_first != rx_ring_last) { - enetc_put_xdp_buff(rx_ring, - &rx_ring->rx_swbd[rx_ring_first]); + enetc_put_rx_buff(rx_ring, + &rx_ring->rx_swbd[rx_ring_first]); enetc_bdr_idx_inc(rx_ring, &rx_ring_first); } rx_ring->stats.xdp_drops++; @@ -1227,8 +1244,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0; struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0}; struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); - struct enetc_bdr *tx_ring = priv->tx_ring[rx_ring->index]; int rx_frm_cnt = 0, rx_byte_cnt = 0; + struct enetc_bdr *tx_ring; int cleaned_cnt, i; u32 xdp_act; @@ -1266,6 +1283,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); switch (xdp_act) { + default: + bpf_warn_invalid_xdp_action(xdp_act); + fallthrough; case XDP_ABORTED: trace_xdp_exception(rx_ring->ndev, prog, xdp_act); fallthrough; @@ -1281,12 +1301,12 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, &i, &cleaned_cnt, ENETC_RXB_DMA_SIZE_XDP); if (unlikely(!skb)) - /* Exit the switch/case, not the loop */ - break; + goto out; napi_gro_receive(napi, skb); break; case XDP_TX: + tx_ring = priv->xdp_tx_ring[rx_ring->index]; xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr, rx_ring, orig_i, i); @@ -1298,6 +1318,17 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; xdp_tx_frm_cnt++; + /* The XDP_TX enqueue was successful, so we + * need to scrub the RX software BDs because + * the ownership of the buffers no longer + * belongs to the RX ring, and we must prevent + * enetc_refill_rx_ring() from reusing + * rx_swbd->page. + */ + while (orig_i != i) { + rx_ring->rx_swbd[orig_i].page = NULL; + enetc_bdr_idx_inc(rx_ring, &orig_i); + } } break; case XDP_REDIRECT: @@ -1318,8 +1349,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, tmp_orig_i = orig_i; while (orig_i != i) { - enetc_put_rx_buff(rx_ring, - &rx_ring->rx_swbd[orig_i]); + enetc_flip_rx_buff(rx_ring, + &rx_ring->rx_swbd[orig_i]); enetc_bdr_idx_inc(rx_ring, &orig_i); } @@ -1330,20 +1361,12 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, xdp_redirect_frm_cnt++; rx_ring->stats.xdp_redirect++; } - - if (unlikely(xdp_redirect_frm_cnt > ENETC_DEFAULT_TX_WORK)) { - xdp_do_flush_map(); - xdp_redirect_frm_cnt = 0; - } - - break; - default: - bpf_warn_invalid_xdp_action(xdp_act); } rx_frm_cnt++; } +out: rx_ring->next_to_clean = i; rx_ring->stats.packets += rx_frm_cnt; @@ -2033,6 +2056,7 @@ void enetc_start(struct net_device *ndev) int enetc_open(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + int num_stack_tx_queues; int err; err = enetc_setup_irqs(priv); @@ -2051,7 +2075,9 @@ int enetc_open(struct net_device *ndev) if (err) goto err_alloc_rx; - err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings); + num_stack_tx_queues = enetc_num_stack_tx_queues(priv); + + err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); if (err) goto err_set_queues; @@ -2124,15 +2150,17 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) struct enetc_ndev_priv *priv = netdev_priv(ndev); struct tc_mqprio_qopt *mqprio = type_data; struct enetc_bdr *tx_ring; + int num_stack_tx_queues; u8 num_tc; int i; + num_stack_tx_queues = enetc_num_stack_tx_queues(priv); mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; num_tc = mqprio->num_tc; if (!num_tc) { netdev_reset_tc(ndev); - netif_set_real_num_tx_queues(ndev, priv->num_tx_rings); + netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); /* Reset all ring priorities to 0 */ for (i = 0; i < priv->num_tx_rings; i++) { @@ -2144,7 +2172,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) } /* Check if we have enough BD rings available to accommodate all TCs */ - if (num_tc > priv->num_tx_rings) { + if (num_tc > num_stack_tx_queues) { netdev_err(ndev, "Max %d traffic classes supported\n", priv->num_tx_rings); return -EINVAL; @@ -2432,8 +2460,9 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) int enetc_alloc_msix(struct enetc_ndev_priv *priv) { struct pci_dev *pdev = priv->si->pdev; - int v_tx_rings; + int first_xdp_tx_ring; int i, n, err, nvec; + int v_tx_rings; nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; /* allocate MSIX for both messaging and Rx/Tx interrupts */ @@ -2508,6 +2537,9 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) } } + first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus(); + priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; + return 0; fail: diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index d52717bc73c7..08b283347d9c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -79,7 +79,7 @@ struct enetc_xdp_data { }; #define ENETC_RX_RING_DEFAULT_SIZE 2048 -#define ENETC_TX_RING_DEFAULT_SIZE 256 +#define ENETC_TX_RING_DEFAULT_SIZE 2048 #define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2) struct enetc_bdr { @@ -237,6 +237,22 @@ static inline bool enetc_si_is_pf(struct enetc_si *si) return !!(si->hw.port); } +static inline int enetc_pf_to_port(struct pci_dev *pf_pdev) +{ + switch (pf_pdev->devfn) { + case 0: + return 0; + case 1: + return 1; + case 2: + return 2; + case 6: + return 3; + default: + return -1; + } +} + #define ENETC_MAX_NUM_TXQS 8 #define ENETC_INT_NAME_MAX (IFNAMSIZ + 8) @@ -317,6 +333,7 @@ struct enetc_ndev_priv { u32 speed; /* store speed for compare update pspeed */ + struct enetc_bdr **xdp_tx_ring; struct enetc_bdr *tx_ring[16]; struct enetc_bdr *rx_ring[16]; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 49835e878bbb..ebccaf02411c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -708,6 +708,22 @@ static int enetc_set_wol(struct net_device *dev, return ret; } +static void enetc_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(priv->phylink, pause); +} + +static int enetc_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(priv->phylink, pause); +} + static int enetc_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { @@ -754,6 +770,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = { .get_ts_info = enetc_get_ts_info, .get_wol = enetc_get_wol, .set_wol = enetc_set_wol, + .get_pauseparam = enetc_get_pauseparam, + .set_pauseparam = enetc_set_pauseparam, }; static const struct ethtool_ops enetc_vf_ethtool_ops = { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 04ac7fc23ead..0f5f081a5baf 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -109,6 +109,7 @@ enum enetc_bdr_type {TX, RX}; /* RX BDR reg offsets */ #define ENETC_RBMR 0 #define ENETC_RBMR_BDS BIT(2) +#define ENETC_RBMR_CM BIT(4) #define ENETC_RBMR_VTE BIT(5) #define ENETC_RBMR_EN BIT(31) #define ENETC_RBSR 0x4 @@ -180,6 +181,8 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */ #define ENETC_PSIVLAN_EN BIT(31) #define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12) +#define ENETC_PPAUONTR 0x0410 +#define ENETC_PPAUOFFTR 0x0414 #define ENETC_PTXMBAR 0x0608 #define ENETC_PCAPR0 0x0900 #define ENETC_PCAPR0_RXBDR(val) ((val) >> 24) @@ -227,6 +230,7 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PM0_TX_EN BIT(0) #define ENETC_PM0_RX_EN BIT(1) #define ENETC_PM0_PROMISC BIT(4) +#define ENETC_PM0_PAUSE_IGN BIT(8) #define ENETC_PM0_CMD_XGLP BIT(10) #define ENETC_PM0_CMD_TXP BIT(11) #define ENETC_PM0_CMD_PHY_TX_EN BIT(15) @@ -239,6 +243,11 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PM_IMDIO_BASE 0x8030 +#define ENETC_PM0_PAUSE_QUANTA 0x8054 +#define ENETC_PM0_PAUSE_THRESH 0x8064 +#define ENETC_PM1_PAUSE_QUANTA 0x9054 +#define ENETC_PM1_PAUSE_THRESH 0x9064 + #define ENETC_PM0_SINGLE_STEP 0x80c0 #define ENETC_PM1_SINGLE_STEP 0x90c0 #define ENETC_PM0_SINGLE_STEP_CH BIT(7) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c new file mode 100644 index 000000000000..8b356c485507 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2021 NXP Semiconductors + * + * The Integrated Endpoint Register Block (IERB) is configured by pre-boot + * software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe + * card. Upon FLR, values from the IERB are transferred to the ENETC PFs, and + * are read-only in the PF memory space. + * + * This driver fixes up the power-on reset values for the ENETC shared FIFO, + * such that the TX and RX allocations are sufficient for jumbo frames, and + * that intelligent FIFO dropping is enabled before the internal data + * structures are corrupted. + * + * Even though not all ports might be used on a given board, we are not + * concerned with partitioning the FIFO, because the default values configure + * no strict reservations, so the entire FIFO can be used by the RX of a single + * port, or the TX of a single port. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include "enetc.h" +#include "enetc_ierb.h" + +/* IERB registers */ +#define ENETC_IERB_TXMBAR(port) (((port) * 0x100) + 0x8080) +#define ENETC_IERB_RXMBER(port) (((port) * 0x100) + 0x8090) +#define ENETC_IERB_RXMBLR(port) (((port) * 0x100) + 0x8094) +#define ENETC_IERB_RXBCR(port) (((port) * 0x100) + 0x80a0) +#define ENETC_IERB_TXBCR(port) (((port) * 0x100) + 0x80a8) +#define ENETC_IERB_FMBDTR 0xa000 + +#define ENETC_RESERVED_FOR_ICM 1024 + +struct enetc_ierb { + void __iomem *regs; +}; + +static void enetc_ierb_write(struct enetc_ierb *ierb, u32 offset, u32 val) +{ + iowrite32(val, ierb->regs + offset); +} + +int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev) +{ + struct enetc_ierb *ierb = platform_get_drvdata(pdev); + int port = enetc_pf_to_port(pf_pdev); + u16 tx_credit, rx_credit, tx_alloc; + + if (port < 0) + return -ENODEV; + + if (!ierb) + return -EPROBE_DEFER; + + /* By default, it is recommended to set the Host Transfer Agent + * per port transmit byte credit to "1000 + max_frame_size/2". + * The power-on reset value (1800 bytes) is rounded up to the nearest + * 100 assuming a maximum frame size of 1536 bytes. + */ + tx_credit = roundup(1000 + ENETC_MAC_MAXFRM_SIZE / 2, 100); + + /* Internal memory allocated for transmit buffering is guaranteed but + * not reserved; i.e. if the total transmit allocation is not used, + * then the unused portion is not left idle, it can be used for receive + * buffering but it will be reclaimed, if required, from receive by + * intelligently dropping already stored receive frames in the internal + * memory to ensure that the transmit allocation is respected. + * + * PaTXMBAR must be set to a value larger than + * PaTXBCR + 2 * max_frame_size + 32 + * if frame preemption is not enabled, or to + * 2 * PaTXBCR + 2 * p_max_frame_size (pMAC maximum frame size) + + * 2 * np_max_frame_size (eMAC maximum frame size) + 64 + * if frame preemption is enabled. + */ + tx_alloc = roundup(2 * tx_credit + 4 * ENETC_MAC_MAXFRM_SIZE + 64, 16); + + /* Initial credits, in units of 8 bytes, to the Ingress Congestion + * Manager for the maximum amount of bytes the port is allocated for + * pending traffic. + * It is recommended to set the initial credits to 2 times the maximum + * frame size (2 frames of maximum size). + */ + rx_credit = DIV_ROUND_UP(ENETC_MAC_MAXFRM_SIZE * 2, 8); + + enetc_ierb_write(ierb, ENETC_IERB_TXBCR(port), tx_credit); + enetc_ierb_write(ierb, ENETC_IERB_TXMBAR(port), tx_alloc); + enetc_ierb_write(ierb, ENETC_IERB_RXBCR(port), rx_credit); + + return 0; +} +EXPORT_SYMBOL(enetc_ierb_register_pf); + +static int enetc_ierb_probe(struct platform_device *pdev) +{ + struct enetc_ierb *ierb; + struct resource *res; + void __iomem *regs; + + ierb = devm_kzalloc(&pdev->dev, sizeof(*ierb), GFP_KERNEL); + if (!ierb) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + ierb->regs = regs; + + /* Free buffer depletion threshold in bytes. + * This sets the minimum amount of free buffer memory that should be + * maintained in the datapath sub system, and when the amount of free + * buffer memory falls below this threshold, a depletion indication is + * asserted, which may trigger "intelligent drop" frame releases from + * the ingress queues in the ICM. + * It is recommended to set the free buffer depletion threshold to 1024 + * bytes, since the ICM needs some FIFO memory for its own use. + */ + enetc_ierb_write(ierb, ENETC_IERB_FMBDTR, ENETC_RESERVED_FOR_ICM); + + platform_set_drvdata(pdev, ierb); + + return 0; +} + +static int enetc_ierb_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id enetc_ierb_match[] = { + { .compatible = "fsl,ls1028a-enetc-ierb", }, + {}, +}; +MODULE_DEVICE_TABLE(of, enetc_ierb_match); + +static struct platform_driver enetc_ierb_driver = { + .driver = { + .name = "fsl-enetc-ierb", + .of_match_table = enetc_ierb_match, + }, + .probe = enetc_ierb_probe, + .remove = enetc_ierb_remove, +}; + +module_platform_driver(enetc_ierb_driver); + +MODULE_DESCRIPTION("NXP ENETC IERB"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h new file mode 100644 index 000000000000..b3b774e0998a --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2021 NXP Semiconductors */ + +#include <linux/pci.h> +#include <linux/platform_device.h> + +#if IS_ENABLED(CONFIG_FSL_ENETC_IERB) + +int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev); + +#else + +static inline int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev) +{ + return -EOPNOTSUPP; +} + +#endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index f61fedf462e5..31274325159a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -4,8 +4,10 @@ #include <linux/mdio.h> #include <linux/module.h> #include <linux/fsl/enetc_mdio.h> +#include <linux/of_platform.h> #include <linux/of_mdio.h> #include <linux/of_net.h> +#include "enetc_ierb.h" #include "enetc_pf.h" #define ENETC_DRV_NAME_STR "ENETC PF driver" @@ -390,23 +392,54 @@ static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en) return 0; } -static void enetc_port_setup_primary_mac_address(struct enetc_si *si) +static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf, + int si) { - unsigned char mac_addr[MAX_ADDR_LEN]; - struct enetc_pf *pf = enetc_si_priv(si); - struct enetc_hw *hw = &si->hw; - int i; + struct device *dev = &pf->si->pdev->dev; + struct enetc_hw *hw = &pf->si->hw; + u8 mac_addr[ETH_ALEN] = { 0 }; + int err; - /* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */ - for (i = 0; i < pf->total_vfs + 1; i++) { - enetc_pf_get_primary_mac_addr(hw, i, mac_addr); - if (!is_zero_ether_addr(mac_addr)) - continue; + /* (1) try to get the MAC address from the device tree */ + if (np) { + err = of_get_mac_address(np, mac_addr); + if (err == -EPROBE_DEFER) + return err; + } + + /* (2) bootloader supplied MAC address */ + if (is_zero_ether_addr(mac_addr)) + enetc_pf_get_primary_mac_addr(hw, si, mac_addr); + + /* (3) choose a random one */ + if (is_zero_ether_addr(mac_addr)) { eth_random_addr(mac_addr); - dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n", - i, mac_addr); - enetc_pf_set_primary_mac_addr(hw, i, mac_addr); + dev_info(dev, "no MAC address specified for SI%d, using %pM\n", + si, mac_addr); + } + + enetc_pf_set_primary_mac_addr(hw, si, mac_addr); + + return 0; +} + +static int enetc_setup_mac_addresses(struct device_node *np, + struct enetc_pf *pf) +{ + int err, i; + + /* The PF might take its MAC from the device tree */ + err = enetc_setup_mac_address(np, pf, 0); + if (err) + return err; + + for (i = 0; i < pf->total_vfs; i++) { + err = enetc_setup_mac_address(NULL, pf, i + 1); + if (err) + return err; } + + return 0; } static void enetc_port_assign_rfs_entries(struct enetc_si *si) @@ -487,7 +520,6 @@ static void enetc_configure_port_mac(struct enetc_hw *hw) ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE)); enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE); - enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE); enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN | ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC); @@ -562,9 +594,6 @@ static void enetc_configure_port(struct enetc_pf *pf) /* split up RFS entries */ enetc_port_assign_rfs_entries(pf->si); - /* fix-up primary MAC addresses, if not set already */ - enetc_port_setup_primary_mac_address(pf->si); - /* enforce VLAN promisc mode for all SIs */ pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL; enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap); @@ -985,7 +1014,12 @@ static void enetc_pl_mac_link_up(struct phylink_config *config, int duplex, bool tx_pause, bool rx_pause) { struct enetc_pf *pf = phylink_to_enetc_pf(config); + u32 pause_off_thresh = 0, pause_on_thresh = 0; + u32 init_quanta = 0, refresh_quanta = 0; + struct enetc_hw *hw = &pf->si->hw; struct enetc_ndev_priv *priv; + u32 rbmr, cmd_cfg; + int idx; priv = netdev_priv(pf->si->ndev); if (priv->active_offloads & ENETC_F_QBV) @@ -993,9 +1027,60 @@ static void enetc_pl_mac_link_up(struct phylink_config *config, if (!phylink_autoneg_inband(mode) && phy_interface_mode_is_rgmii(interface)) - enetc_force_rgmii_mac(&pf->si->hw, speed, duplex); + enetc_force_rgmii_mac(hw, speed, duplex); + + /* Flow control */ + for (idx = 0; idx < priv->num_rx_rings; idx++) { + rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); + + if (tx_pause) + rbmr |= ENETC_RBMR_CM; + else + rbmr &= ~ENETC_RBMR_CM; + + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); + } + + if (tx_pause) { + /* When the port first enters congestion, send a PAUSE request + * with the maximum number of quanta. When the port exits + * congestion, it will automatically send a PAUSE frame with + * zero quanta. + */ + init_quanta = 0xffff; + + /* Also, set up the refresh timer to send follow-up PAUSE + * frames at half the quanta value, in case the congestion + * condition persists. + */ + refresh_quanta = 0xffff / 2; + + /* Start emitting PAUSE frames when 3 large frames (or more + * smaller frames) have accumulated in the FIFO waiting to be + * DMAed to the RX ring. + */ + pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE; + pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE; + } + + enetc_port_wr(hw, ENETC_PM0_PAUSE_QUANTA, init_quanta); + enetc_port_wr(hw, ENETC_PM1_PAUSE_QUANTA, init_quanta); + enetc_port_wr(hw, ENETC_PM0_PAUSE_THRESH, refresh_quanta); + enetc_port_wr(hw, ENETC_PM1_PAUSE_THRESH, refresh_quanta); + enetc_port_wr(hw, ENETC_PPAUONTR, pause_on_thresh); + enetc_port_wr(hw, ENETC_PPAUOFFTR, pause_off_thresh); - enetc_mac_enable(&pf->si->hw, true); + cmd_cfg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG); + + if (rx_pause) + cmd_cfg &= ~ENETC_PM0_PAUSE_IGN; + else + cmd_cfg |= ENETC_PM0_PAUSE_IGN; + + enetc_port_wr(hw, ENETC_PM0_CMD_CFG, cmd_cfg); + enetc_port_wr(hw, ENETC_PM1_CMD_CFG, cmd_cfg); + + enetc_mac_enable(hw, true); } static void enetc_pl_mac_link_down(struct phylink_config *config, @@ -1087,6 +1172,30 @@ static int enetc_init_port_rss_memory(struct enetc_si *si) return err; } +static int enetc_pf_register_with_ierb(struct pci_dev *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct platform_device *ierb_pdev; + struct device_node *ierb_node; + + /* Don't register with the IERB if the PF itself is disabled */ + if (!node || !of_device_is_available(node)) + return 0; + + ierb_node = of_find_compatible_node(NULL, NULL, + "fsl,ls1028a-enetc-ierb"); + if (!ierb_node || !of_device_is_available(ierb_node)) + return -ENODEV; + + ierb_pdev = of_find_device_by_node(ierb_node); + of_node_put(ierb_node); + + if (!ierb_pdev) + return -EPROBE_DEFER; + + return enetc_ierb_register_pf(ierb_pdev, pdev); +} + static int enetc_pf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1097,6 +1206,14 @@ static int enetc_pf_probe(struct pci_dev *pdev, struct enetc_pf *pf; int err; + err = enetc_pf_register_with_ierb(pdev); + if (err == -EPROBE_DEFER) + return err; + if (err) + dev_warn(&pdev->dev, + "Could not register with IERB driver: %pe, please update the device tree\n", + ERR_PTR(err)); + err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); if (err) { dev_err(&pdev->dev, "PCI probing failed\n"); @@ -1137,6 +1254,10 @@ static int enetc_pf_probe(struct pci_dev *pdev, pf->si = si; pf->total_vfs = pci_sriov_get_totalvfs(pdev); + err = enetc_setup_mac_addresses(node, pf); + if (err) + goto err_setup_mac_addresses; + enetc_configure_port(pf); enetc_get_si_caps(si); @@ -1204,6 +1325,7 @@ err_alloc_netdev: err_init_port_rss: err_init_port_rfs: err_device_disabled: +err_setup_mac_addresses: enetc_teardown_cbdr(&si->cbd_ring); err_setup_cbdr: err_map_pf_space: diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index cb7fa4bceaf2..af699f2ad095 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -455,11 +455,6 @@ static struct enetc_psfp epsfp = { static LIST_HEAD(enetc_block_cb_list); -static inline int enetc_get_port(struct enetc_ndev_priv *priv) -{ - return priv->si->pdev->devfn & 0x7; -} - /* Stream Identity Entry Set Descriptor */ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, struct enetc_streamid *sid, @@ -504,7 +499,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, si_conf = &cbd.sid_set; /* Only one port supported for one entry, set itself */ - si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv)); + si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev)); si_conf->id_type = 1; si_conf->oui[2] = 0x0; si_conf->oui[1] = 0x80; @@ -529,7 +524,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, si_conf->en = 0x80; si_conf->stream_handle = cpu_to_le32(sid->handle); - si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv)); + si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev)); si_conf->id_type = sid->filtertype; si_conf->oui[2] = 0x0; si_conf->oui[1] = 0x80; @@ -591,7 +586,8 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, } sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); - sfi_config->input_ports = cpu_to_le32(1 << enetc_get_port(priv)); + sfi_config->input_ports = + cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev)); /* The priority value which may be matched against the * frameâs priority value to determine a match for this entry. @@ -1562,10 +1558,10 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data) switch (f->command) { case FLOW_BLOCK_BIND: - set_bit(enetc_get_port(priv), &epsfp.dev_bitmap); + set_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap); break; case FLOW_BLOCK_UNBIND: - clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap); + clear_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap); if (!epsfp.dev_bitmap) clean_psfp_all(); break; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 70aea9c274fe..aecc111fbe73 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1665,6 +1665,7 @@ static void fec_get_mac(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); unsigned char *iap, tmpaddr[ETH_ALEN]; + int ret; /* * try to get mac address in following order: @@ -1680,9 +1681,9 @@ static void fec_get_mac(struct net_device *ndev) if (!is_valid_ether_addr(iap)) { struct device_node *np = fep->pdev->dev.of_node; if (np) { - const char *mac = of_get_mac_address(np); - if (!IS_ERR(mac)) - iap = (unsigned char *) mac; + ret = of_get_mac_address(np, tmpaddr); + if (!ret) + iap = tmpaddr; } } diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index b3bad429e03b..02c47658a215 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -813,7 +813,6 @@ static int mpc52xx_fec_probe(struct platform_device *op) const u32 *prop; int prop_size; struct device_node *np = op->dev.of_node; - const char *mac_addr; phys_addr_t rx_fifo; phys_addr_t tx_fifo; @@ -891,10 +890,8 @@ static int mpc52xx_fec_probe(struct platform_device *op) * * First try to read MAC address from DT */ - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) { - ether_addr_copy(ndev->dev_addr, mac_addr); - } else { + rv = of_get_mac_address(np, ndev->dev_addr); + if (rv) { struct mpc52xx_fec __iomem *fec = priv->fec; /* diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 901749a7a318..46ecb42f2ef8 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -605,7 +605,6 @@ static int mac_probe(struct platform_device *_of_dev) struct platform_device *of_dev; struct resource res; struct mac_priv_s *priv; - const u8 *mac_addr; u32 val; u8 fman_id; phy_interface_t phy_if; @@ -723,11 +722,9 @@ static int mac_probe(struct platform_device *_of_dev) priv->cell_index = (u8)val; /* Get the MAC address */ - mac_addr = of_get_mac_address(mac_node); - if (IS_ERR(mac_addr)) + err = of_get_mac_address(mac_node, mac_dev->addr); + if (err) dev_warn(dev, "of_get_mac_address(%pOF) failed\n", mac_node); - else - ether_addr_copy(mac_dev->addr, mac_addr); /* Get the port handles */ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL); @@ -853,7 +850,7 @@ static int mac_probe(struct platform_device *_of_dev) if (err < 0) dev_err(dev, "fman_set_mac_active_pause() = %d\n", err); - if (!IS_ERR(mac_addr)) + if (!is_zero_ether_addr(mac_dev->addr)) dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr); priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 78e008b81374..6ee325ad35c5 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -918,7 +918,6 @@ static int fs_enet_probe(struct platform_device *ofdev) const u32 *data; struct clk *clk; int err; - const u8 *mac_addr; const char *phy_connection_type; int privsize, len, ret = -ENODEV; @@ -1006,9 +1005,7 @@ static int fs_enet_probe(struct platform_device *ofdev) spin_lock_init(&fep->lock); spin_lock_init(&fep->tx_lock); - mac_addr = of_get_mac_address(ofdev->dev.of_node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); + of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr); ret = fep->ops->allocate_bd(ndev); if (ret) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3ec4d9fddd52..f2945abdb041 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -175,10 +175,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv) if (priv->rx_filer_enable) { rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; /* Program the RIR0 reg with the required distribution */ - if (priv->poll_mode == GFAR_SQ_POLLING) - gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); - else /* GFAR_MQ_POLLING */ - gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); + gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); } /* Restore PROMISC mode */ @@ -521,29 +518,9 @@ static int gfar_parse_group(struct device_node *np, grp->priv = priv; spin_lock_init(&grp->grplock); if (priv->mode == MQ_MG_MODE) { - u32 rxq_mask, txq_mask; - int ret; - + /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); - - ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask); - if (!ret) { - grp->rx_bit_map = rxq_mask ? - rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); - } - - ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask); - if (!ret) { - grp->tx_bit_map = txq_mask ? - txq_mask : (DEFAULT_MAPPING >> priv->num_grps); - } - - if (priv->poll_mode == GFAR_SQ_POLLING) { - /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ - grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); - grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); - } } else { grp->rx_bit_map = 0xFF; grp->tx_bit_map = 0xFF; @@ -640,7 +617,6 @@ static phy_interface_t gfar_get_interface(struct net_device *dev) static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) { const char *model; - const void *mac_addr; int err = 0, i; phy_interface_t interface; struct net_device *dev = NULL; @@ -650,18 +626,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) u32 stash_len = 0; u32 stash_idx = 0; unsigned int num_tx_qs, num_rx_qs; - unsigned short mode, poll_mode; + unsigned short mode; if (!np) return -ENODEV; - if (of_device_is_compatible(np, "fsl,etsec2")) { + if (of_device_is_compatible(np, "fsl,etsec2")) mode = MQ_MG_MODE; - poll_mode = GFAR_SQ_POLLING; - } else { + else mode = SQ_SG_MODE; - poll_mode = GFAR_SQ_POLLING; - } if (mode == SQ_SG_MODE) { num_tx_qs = 1; @@ -677,22 +650,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) return -EINVAL; } - if (poll_mode == GFAR_SQ_POLLING) { - num_tx_qs = num_grps; /* one txq per int group */ - num_rx_qs = num_grps; /* one rxq per int group */ - } else { /* GFAR_MQ_POLLING */ - u32 tx_queues, rx_queues; - int ret; - - /* parse the num of HW tx and rx queues */ - ret = of_property_read_u32(np, "fsl,num_tx_queues", - &tx_queues); - num_tx_qs = ret ? 1 : tx_queues; - - ret = of_property_read_u32(np, "fsl,num_rx_queues", - &rx_queues); - num_rx_qs = ret ? 1 : rx_queues; - } + num_tx_qs = num_grps; /* one txq per int group */ + num_rx_qs = num_grps; /* one rxq per int group */ } if (num_tx_qs > MAX_TX_QS) { @@ -718,7 +677,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) priv->ndev = dev; priv->mode = mode; - priv->poll_mode = poll_mode; priv->num_tx_queues = num_tx_qs; netif_set_real_num_rx_queues(dev, num_rx_qs); @@ -782,11 +740,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) if (stash_len || stash_idx) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; - mac_addr = of_get_mac_address(np); - - if (!IS_ERR(mac_addr)) { - ether_addr_copy(dev->dev_addr, mac_addr); - } else { + err = of_get_mac_address(np, dev->dev_addr); + if (err) { eth_hw_addr_random(dev); dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); } @@ -2695,106 +2650,6 @@ static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) return 0; } -static int gfar_poll_rx(struct napi_struct *napi, int budget) -{ - struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi_rx); - struct gfar_private *priv = gfargrp->priv; - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_rx_q *rx_queue = NULL; - int work_done = 0, work_done_per_q = 0; - int i, budget_per_q = 0; - unsigned long rstat_rxf; - int num_act_queues; - - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived - */ - gfar_write(®s->ievent, IEVENT_RX_MASK); - - rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; - - num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); - if (num_act_queues) - budget_per_q = budget/num_act_queues; - - for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { - /* skip queue if not active */ - if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) - continue; - - rx_queue = priv->rx_queue[i]; - work_done_per_q = - gfar_clean_rx_ring(rx_queue, budget_per_q); - work_done += work_done_per_q; - - /* finished processing this queue */ - if (work_done_per_q < budget_per_q) { - /* clear active queue hw indication */ - gfar_write(®s->rstat, - RSTAT_CLEAR_RXF0 >> i); - num_act_queues--; - - if (!num_act_queues) - break; - } - } - - if (!num_act_queues) { - u32 imask; - napi_complete_done(napi, work_done); - - /* Clear the halt bit in RSTAT */ - gfar_write(®s->rstat, gfargrp->rstat); - - spin_lock_irq(&gfargrp->grplock); - imask = gfar_read(®s->imask); - imask |= IMASK_RX_DEFAULT; - gfar_write(®s->imask, imask); - spin_unlock_irq(&gfargrp->grplock); - } - - return work_done; -} - -static int gfar_poll_tx(struct napi_struct *napi, int budget) -{ - struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi_tx); - struct gfar_private *priv = gfargrp->priv; - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_tx_q *tx_queue = NULL; - int has_tx_work = 0; - int i; - - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived - */ - gfar_write(®s->ievent, IEVENT_TX_MASK); - - for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { - tx_queue = priv->tx_queue[i]; - /* run Tx cleanup to completion */ - if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { - gfar_clean_tx_ring(tx_queue); - has_tx_work = 1; - } - } - - if (!has_tx_work) { - u32 imask; - napi_complete(napi); - - spin_lock_irq(&gfargrp->grplock); - imask = gfar_read(®s->imask); - imask |= IMASK_TX_DEFAULT; - gfar_write(®s->imask, imask); - spin_unlock_irq(&gfargrp->grplock); - } - - return 0; -} - /* GFAR error interrupt handler */ static irqreturn_t gfar_error(int irq, void *grp_id) { @@ -3352,17 +3207,10 @@ static int gfar_probe(struct platform_device *ofdev) /* Register for napi ...We are registering NAPI for each grp */ for (i = 0; i < priv->num_grps; i++) { - if (priv->poll_mode == GFAR_SQ_POLLING) { - netif_napi_add(dev, &priv->gfargrp[i].napi_rx, - gfar_poll_rx_sq, GFAR_DEV_WEIGHT); - netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, - gfar_poll_tx_sq, 2); - } else { - netif_napi_add(dev, &priv->gfargrp[i].napi_rx, - gfar_poll_rx, GFAR_DEV_WEIGHT); - netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, - gfar_poll_tx, 2); - } + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx_sq, GFAR_DEV_WEIGHT); + netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx_sq, 2); } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 8ced783f5302..5ea47df93e5e 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -909,22 +909,6 @@ enum { MQ_MG_MODE }; -/* GFAR_SQ_POLLING: Single Queue NAPI polling mode - * The driver supports a single pair of RX/Tx queues - * per interrupt group (Rx/Tx int line). MQ_MG mode - * devices have 2 interrupt groups, so the device will - * have a total of 2 Tx and 2 Rx queues in this case. - * GFAR_MQ_POLLING: Multi Queue NAPI polling mode - * The driver supports all the 8 Rx and Tx HW queues - * each queue mapped by the Device Tree to one of - * the 2 interrupt groups. This mode implies significant - * processing overhead (CPU and controller level). - */ -enum gfar_poll_mode { - GFAR_SQ_POLLING = 0, - GFAR_MQ_POLLING -}; - /* * Per TX queue stats */ @@ -1105,7 +1089,6 @@ struct gfar_private { unsigned long state; unsigned short mode; - unsigned short poll_mode; unsigned int num_tx_queues; unsigned int num_rx_queues; unsigned int num_grps; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index ef4e2febeb5b..e0936510fa34 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3562,7 +3562,6 @@ static int ucc_geth_probe(struct platform_device* ofdev) struct resource res; int err, ucc_num, max_speed = 0; const unsigned int *prop; - const void *mac_addr; phy_interface_t phy_interface; static const int enet_to_speed[] = { SPEED_10, SPEED_10, SPEED_10, @@ -3733,9 +3732,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) goto err_free_netdev; } - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(dev->dev_addr, mac_addr); + of_get_mac_address(np, dev->dev_addr); ugeth->ug_info = ug_info; ugeth->dev = device; diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 57c3bc4f7089..3c4db4a6b431 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -772,7 +772,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev) struct net_device *ndev; struct hisi_femac_priv *priv; struct phy_device *phy; - const char *mac_addr; int ret; ndev = alloc_etherdev(sizeof(*priv)); @@ -842,10 +841,8 @@ static int hisi_femac_drv_probe(struct platform_device *pdev) (unsigned long)phy->phy_id, phy_modes(phy->interface)); - mac_addr = of_get_mac_address(node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - if (!is_valid_ether_addr(ndev->dev_addr)) { + ret = of_get_mac_address(node, ndev->dev_addr); + if (ret) { eth_hw_addr_random(ndev); dev_warn(dev, "using random MAC address %pM\n", ndev->dev_addr); diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 8b2bf85039f1..c1aae0fca5e9 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -1098,7 +1098,6 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) struct net_device *ndev; struct hix5hd2_priv *priv; struct mii_bus *bus; - const char *mac_addr; int ret; ndev = alloc_etherdev(sizeof(struct hix5hd2_priv)); @@ -1220,10 +1219,8 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) goto out_phy_node; } - mac_addr = of_get_mac_address(node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - if (!is_valid_ether_addr(ndev->dev_addr)) { + ret = of_get_mac_address(node, ndev->dev_addr); + if (ret) { eth_hw_addr_random(ndev); netdev_warn(ndev, "using random MAC address %pM\n", ndev->dev_addr); diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 33defa4c180a..a2c17af57fde 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -172,4 +172,7 @@ struct hclgevf_mbx_arq_ring { (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) #define hclge_mbx_head_ptr_move_arq(arq) \ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) + +/* PF immediately push link status to VFs when link status changed */ +#define HCLGE_MBX_PUSH_LINK_STATUS_EN BIT(0) #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 1c17fdc780e9..c296ab64fb0a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2880,6 +2880,28 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) return hclge_get_mac_link_status(hdev, link_status); } +static void hclge_push_link_status(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; + + for (i = 0; i < pci_num_vf(hdev->pdev); i++) { + vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || + vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) + continue; + + ret = hclge_push_vf_link_status(vport); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to push link status to vf%u, ret = %d\n", + i, ret); + } + } +} + static void hclge_update_link_status(struct hclge_dev *hdev) { struct hnae3_handle *rhandle = &hdev->vport[0].roce; @@ -2908,6 +2930,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev) rclient->ops->link_status_change(rhandle, state); hdev->hw.mac.link = state; + hclge_push_link_status(hdev); } clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); @@ -3246,14 +3269,24 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + int link_state_old; + int ret; vport = hclge_get_vf_vport(hdev, vf); if (!vport) return -EINVAL; + link_state_old = vport->vf_info.link_state; vport->vf_info.link_state = link_state; - return 0; + ret = hclge_push_vf_link_status(vport); + if (ret) { + vport->vf_info.link_state = link_state_old; + dev_err(&hdev->pdev->dev, + "failed to push vf%d link status, ret = %d\n", vf, ret); + } + + return ret; } static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) @@ -11093,8 +11126,6 @@ retry: if (hdev->reset_type == HNAE3_FLR_RESET) hdev->rst_stats.flr_rst_cnt++; - else if (hdev->reset_type == HNAE3_FUNC_RESET) - hdev->rst_stats.pf_rst_cnt++; } static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index c1aaf7c534c9..ff1d47308c2d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -1089,4 +1089,5 @@ void hclge_report_hw_error(struct hclge_dev *hdev, enum hnae3_hw_error_type type); void hclge_inform_vf_promisc_info(struct hclge_vport *vport); void hclge_dbg_dump_rst_info(struct hclge_dev *hdev); +int hclge_push_vf_link_status(struct hclge_vport *vport); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index c88607bdda59..5512ffe0a149 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -490,16 +490,14 @@ static void hclge_get_vf_media_type(struct hclge_vport *vport, resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH; } -static int hclge_get_link_info(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req) +int hclge_push_vf_link_status(struct hclge_vport *vport) { #define HCLGE_VF_LINK_STATE_UP 1U #define HCLGE_VF_LINK_STATE_DOWN 0U struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[8]; - u8 dest_vfid; + u8 msg_data[9]; u16 duplex; /* mac.link can only be 0 or 1 */ @@ -520,11 +518,11 @@ static int hclge_get_link_info(struct hclge_vport *vport, memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[6], &duplex, sizeof(u16)); - dest_vfid = mbx_req->mbx_src_vfid; + msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN; /* send this requested info to VF */ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), - HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); + HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); } static void hclge_get_link_mode(struct hclge_vport *vport, @@ -794,7 +792,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) hclge_get_vf_tcinfo(vport, &resp_msg); break; case HCLGE_MBX_GET_LINK_STATUS: - ret = hclge_get_link_info(vport, req); + ret = hclge_push_vf_link_status(vport); if (ret) dev_err(&hdev->pdev->dev, "failed to inform link stat to VF, ret = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 07aa26ba0966..0db51ef15ef6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2340,10 +2340,11 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) hclgevf_tqps_update_stats(handle); - /* request the link status from the PF. PF would be able to tell VF - * about such updates in future so we might remove this later + /* VF does not need to request link status when this bit is set, because + * PF will push its link status to VFs when link status changed. */ - hclgevf_request_link_info(hdev); + if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) + hclgevf_request_link_info(hdev); hclgevf_update_link_mode(hdev); @@ -2657,6 +2658,7 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); + clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); hclgevf_reset_tqp_stats(handle); @@ -3644,7 +3646,7 @@ static void hclgevf_get_link_mode(struct hnae3_handle *handle, } #define MAX_SEPARATE_NUM 4 -#define SEPARATOR_VALUE 0xFFFFFFFF +#define SEPARATOR_VALUE 0xFDFCFBFA #define REG_NUM_PER_LINE 4 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index ade6e7f5be5b..265c9b0b4728 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -152,6 +152,7 @@ enum hclgevf_states { HCLGEVF_STATE_LINK_UPDATING, HCLGEVF_STATE_PROMISC_CHANGED, HCLGEVF_STATE_RST_FAIL, + HCLGEVF_STATE_PF_PUSH_LINK_STATUS, }; struct hclgevf_mac { @@ -176,9 +177,9 @@ struct hclgevf_hw { /* TQP stats */ struct hlcgevf_tqp_stats { - /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ + /* query_tqp_tx_queue_statistics, opcode id: 0x0B03 */ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ - /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ + /* query_tqp_rx_queue_statistics, opcode id: 0x0B13 */ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 5b2dcd97c107..9b17735b9f4c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -276,6 +276,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) u8 duplex; u32 speed; u32 tail; + u8 flag; u8 idx; /* we can safely clear it now as we are at start of the async message @@ -300,11 +301,16 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) link_status = msg_q[1]; memcpy(&speed, &msg_q[2], sizeof(speed)); duplex = (u8)msg_q[4]; + flag = (u8)msg_q[5]; /* update upper layer with new link link status */ hclgevf_update_link_status(hdev, link_status); hclgevf_update_speed_duplex(hdev, speed, duplex); + if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN) + set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, + &hdev->state); + break; case HCLGE_MBX_LINK_STAT_MODE: idx = (u8)msg_q[1]; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index ee9bf18c597f..5788bb956d73 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1173,19 +1173,13 @@ static int __ibmvnic_open(struct net_device *netdev) rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); if (rc) { - for (i = 0; i < adapter->req_rx_queues; i++) - napi_disable(&adapter->napi[i]); + ibmvnic_napi_disable(adapter); release_resources(adapter); return rc; } netif_tx_start_all_queues(netdev); - if (prev_state == VNIC_CLOSED) { - for (i = 0; i < adapter->req_rx_queues; i++) - napi_schedule(&adapter->napi[i]); - } - adapter->state = VNIC_OPEN; return rc; } @@ -1967,7 +1961,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, u64 old_num_rx_queues, old_num_tx_queues; u64 old_num_rx_slots, old_num_tx_slots; struct net_device *netdev = adapter->netdev; - int i, rc; + int rc; netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n", @@ -2158,10 +2152,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, /* refresh device's multicast list */ ibmvnic_set_multi(netdev); - /* kick napi */ - for (i = 0; i < adapter->req_rx_queues; i++) - napi_schedule(&adapter->napi[i]); - if (adapter->reset_reason == VNIC_RESET_FAILOVER || adapter->reset_reason == VNIC_RESET_MOBILITY) __netdev_notify_peers(netdev); @@ -2292,8 +2282,9 @@ static void __ibmvnic_reset(struct work_struct *work) adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); if (test_and_set_bit_lock(0, &adapter->resetting)) { - schedule_delayed_work(&adapter->ibmvnic_delayed_reset, - IBMVNIC_RESET_DELAY); + queue_delayed_work(system_long_wq, + &adapter->ibmvnic_delayed_reset, + IBMVNIC_RESET_DELAY); return; } @@ -2437,7 +2428,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, list_add_tail(&rwi->list, &adapter->rwi_list); netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", reset_reason_to_string(reason)); - schedule_work(&adapter->ibmvnic_reset); + queue_work(system_long_wq, &adapter->ibmvnic_reset); ret = 0; err: @@ -3255,9 +3246,6 @@ restart_loop: next = ibmvnic_next_scrq(adapter, scrq); for (i = 0; i < next->tx_comp.num_comps; i++) { - if (next->tx_comp.rcs[i]) - dev_err(dev, "tx error %x\n", - next->tx_comp.rcs[i]); index = be32_to_cpu(next->tx_comp.correlators[i]); if (index & IBMVNIC_TSO_POOL_MASK) { tx_pool = &adapter->tso_pool[pool]; @@ -3271,7 +3259,13 @@ restart_loop: num_entries += txbuff->num_entries; if (txbuff->skb) { total_bytes += txbuff->skb->len; - dev_consume_skb_irq(txbuff->skb); + if (next->tx_comp.rcs[i]) { + dev_err(dev, "tx error %x\n", + next->tx_comp.rcs[i]); + dev_kfree_skb_irq(txbuff->skb); + } else { + dev_consume_skb_irq(txbuff->skb); + } txbuff->skb = NULL; } else { netdev_warn(adapter->netdev, @@ -5503,7 +5497,7 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr, if (rc) { netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", rc); - return -EINVAL; + goto last_resort; } session_token = (__be64)retbuf[0]; @@ -5511,15 +5505,17 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr, be64_to_cpu(session_token)); rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, H_SESSION_ERR_DETECTED, session_token, 0, 0); - if (rc) { - netdev_err(netdev, "Client initiated failover failed, rc %ld\n", + if (rc) + netdev_err(netdev, + "H_VIOCTL initiated failover failed, rc %ld\n", rc); - return -EINVAL; - } + +last_resort: + netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); + ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); return count; } - static DEVICE_ATTR_WO(failover); static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 5aa86318ed3e..c1d155690341 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -294,6 +294,7 @@ config ICE tristate "Intel(R) Ethernet Connection E800 Series Support" default n depends on PCI_MSI + select DIMLIB select NET_DEVLINK select PLDMFW help diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9502e043a0b7..687ef52a8116 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -12357,6 +12357,7 @@ static int i40e_sw_init(struct i40e_pf *pf) { int err = 0; int size; + u16 pow; /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | @@ -12375,6 +12376,11 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); + + /* find the next higher power-of-2 of num cpus */ + pow = roundup_pow_of_two(num_online_cpus()); + pf->rss_size_max = min_t(int, pf->rss_size_max, pow); + if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; pf->alloc_rss_size = min_t(int, pf->rss_size_max, diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 07777ac4f098..7ae10fd87265 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -36,6 +36,7 @@ #include <linux/bpf.h> #include <linux/avf/virtchnl.h> #include <linux/cpu_rmap.h> +#include <linux/dim.h> #include <net/devlink.h> #include <net/ipv6.h> #include <net/xdp_sock.h> @@ -44,6 +45,9 @@ #include <net/gre.h> #include <net/udp_tunnel.h> #include <net/vxlan.h> +#if IS_ENABLED(CONFIG_DCB) +#include <scsi/iscsi_proto.h> +#endif /* CONFIG_DCB */ #include "ice_devids.h" #include "ice_type.h" #include "ice_txrx.h" @@ -194,50 +198,52 @@ struct ice_sw { }; enum ice_pf_state { - __ICE_TESTING, - __ICE_DOWN, - __ICE_NEEDS_RESTART, - __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ - __ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ - __ICE_PFR_REQ, /* set by driver and peers */ - __ICE_CORER_REQ, /* set by driver and peers */ - __ICE_GLOBR_REQ, /* set by driver and peers */ - __ICE_CORER_RECV, /* set by OICR handler */ - __ICE_GLOBR_RECV, /* set by OICR handler */ - __ICE_EMPR_RECV, /* set by OICR handler */ - __ICE_SUSPENDED, /* set on module remove path */ - __ICE_RESET_FAILED, /* set by reset/rebuild */ + ICE_TESTING, + ICE_DOWN, + ICE_NEEDS_RESTART, + ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ + ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ + ICE_PFR_REQ, /* set by driver and peers */ + ICE_CORER_REQ, /* set by driver and peers */ + ICE_GLOBR_REQ, /* set by driver and peers */ + ICE_CORER_RECV, /* set by OICR handler */ + ICE_GLOBR_RECV, /* set by OICR handler */ + ICE_EMPR_RECV, /* set by OICR handler */ + ICE_SUSPENDED, /* set on module remove path */ + ICE_RESET_FAILED, /* set by reset/rebuild */ /* When checking for the PF to be in a nominal operating state, the * bits that are grouped at the beginning of the list need to be - * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will + * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will * be checked. If you need to add a bit into consideration for nominal * operating state, it must be added before - * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position + * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position * without appropriate consideration. */ - __ICE_STATE_NOMINAL_CHECK_BITS, - __ICE_ADMINQ_EVENT_PENDING, - __ICE_MAILBOXQ_EVENT_PENDING, - __ICE_MDD_EVENT_PENDING, - __ICE_VFLR_EVENT_PENDING, - __ICE_FLTR_OVERFLOW_PROMISC, - __ICE_VF_DIS, - __ICE_CFG_BUSY, - __ICE_SERVICE_SCHED, - __ICE_SERVICE_DIS, - __ICE_FD_FLUSH_REQ, - __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ - __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ - __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ - __ICE_LINK_DEFAULT_OVERRIDE_PENDING, - __ICE_PHY_INIT_COMPLETE, - __ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ - __ICE_STATE_NBITS /* must be last */ + ICE_STATE_NOMINAL_CHECK_BITS, + ICE_ADMINQ_EVENT_PENDING, + ICE_MAILBOXQ_EVENT_PENDING, + ICE_MDD_EVENT_PENDING, + ICE_VFLR_EVENT_PENDING, + ICE_FLTR_OVERFLOW_PROMISC, + ICE_VF_DIS, + ICE_CFG_BUSY, + ICE_SERVICE_SCHED, + ICE_SERVICE_DIS, + ICE_FD_FLUSH_REQ, + ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ + ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ + ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ + ICE_LINK_DEFAULT_OVERRIDE_PENDING, + ICE_PHY_INIT_COMPLETE, + ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ + ICE_STATE_NBITS /* must be last */ }; enum ice_vsi_state { ICE_VSI_DOWN, ICE_VSI_NEEDS_RESTART, + ICE_VSI_NETDEV_ALLOCD, + ICE_VSI_NETDEV_REGISTERED, ICE_VSI_UMAC_FLTR_CHANGED, ICE_VSI_MMAC_FLTR_CHANGED, ICE_VSI_VLAN_FLTR_CHANGED, @@ -346,7 +352,7 @@ struct ice_q_vector { u16 reg_idx; u8 num_ring_rx; /* total number of Rx rings in vector */ u8 num_ring_tx; /* total number of Tx rings in vector */ - u8 itr_countdown; /* when 0 should adjust adaptive ITR */ + u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this * value to the device */ @@ -361,6 +367,8 @@ struct ice_q_vector { struct irq_affinity_notify affinity_notify; char name[ICE_INT_NAME_STR_LEN]; + + u16 total_events; /* net_dim(): number of interrupts processed */ } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { @@ -418,7 +426,7 @@ struct ice_pf { u16 num_msix_per_vf; /* used to ratelimit the MDD event logging */ unsigned long last_printed_mdd_jiffies; - DECLARE_BITMAP(state, __ICE_STATE_NBITS); + DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index be26775a7dfe..5985a7e5ca8a 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -113,6 +113,9 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) q_vector->v_idx = v_idx; q_vector->tx.itr_setting = ICE_DFLT_TX_ITR; q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; + q_vector->tx.itr_mode = ITR_DYNAMIC; + q_vector->rx.itr_mode = ITR_DYNAMIC; + if (vsi->type == ICE_VSI_VF) goto out; /* only set affinity_mask if the CPU is online */ @@ -740,25 +743,13 @@ void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) { ice_cfg_itr_gran(hw); - if (q_vector->num_ring_rx) { - struct ice_ring_container *rc = &q_vector->rx; - - rc->target_itr = ITR_TO_REG(rc->itr_setting); - rc->next_update = jiffies + 1; - rc->current_itr = rc->target_itr; - wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), - ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); - } + if (q_vector->num_ring_rx) + ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting); - if (q_vector->num_ring_tx) { - struct ice_ring_container *rc = &q_vector->tx; + if (q_vector->num_ring_tx) + ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting); - rc->target_itr = ITR_TO_REG(rc->itr_setting); - rc->next_update = jiffies + 1; - rc->current_itr = rc->target_itr; - wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), - ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); - } + ice_write_intrl(q_vector, q_vector->intrl); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 0f207a42ea77..87b33bdd4960 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -1097,6 +1097,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending) { u16 ntc = cq->rq.next_to_clean; + enum ice_aq_err rq_last_status; enum ice_status ret_code = 0; struct ice_aq_desc *desc; struct ice_dma_mem *bi; @@ -1130,13 +1131,12 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, desc = ICE_CTL_Q_DESC(cq->rq, ntc); desc_idx = ntc; - cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); + rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); flags = le16_to_cpu(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { ret_code = ICE_ERR_AQ_ERROR; ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", - le16_to_cpu(desc->opcode), - cq->rq_last_status); + le16_to_cpu(desc->opcode), rq_last_status); } memcpy(&e->desc, desc, sizeof(e->desc)); datalen = le16_to_cpu(desc->datalen); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index 77c2307d4fb8..fe75871e48ca 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -83,7 +83,6 @@ struct ice_rq_event_info { /* Control Queue information */ struct ice_ctl_q_info { enum ice_ctl_q qtype; - enum ice_aq_err rq_last_status; /* last status on receive queue */ struct ice_ctl_q_ring rq; /* receive queue */ struct ice_ctl_q_ring sq; /* send queue */ u32 sq_cmd_timeout; /* send queue cmd write back timeout */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 43c6af42de8a..849fcf605479 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -747,8 +747,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, struct ice_port_info *pi) { u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); - u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift; - u8 i, j, err, sync, oper, app_index, ice_app_sel_type; + u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j; + u8 i, err, sync, oper, app_index, ice_app_sel_type; u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift; struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg; @@ -804,7 +804,7 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FCOE_M; ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FCOE_S; ice_app_sel_type = ICE_APP_SEL_ETHTYPE; - ice_app_prot_id_type = ICE_APP_PROT_ID_FCOE; + ice_app_prot_id_type = ETH_P_FCOE; } else if (i == 1) { /* iSCSI APP */ ice_aqc_cee_status_mask = ICE_AQC_CEE_ISCSI_STATUS_M; @@ -812,14 +812,14 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_ISCSI_M; ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S; ice_app_sel_type = ICE_APP_SEL_TCPIP; - ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI; + ice_app_prot_id_type = ISCSI_LISTEN_PORT; for (j = 0; j < cmp_dcbcfg->numapps; j++) { u16 prot_id = cmp_dcbcfg->app[j].prot_id; u8 sel = cmp_dcbcfg->app[j].selector; if (sel == ICE_APP_SEL_TCPIP && - (prot_id == ICE_APP_PROT_ID_ISCSI || + (prot_id == ISCSI_LISTEN_PORT || prot_id == ICE_APP_PROT_ID_ISCSI_860)) { ice_app_prot_id_type = prot_id; break; @@ -832,7 +832,7 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FIP_M; ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FIP_S; ice_app_sel_type = ICE_APP_SEL_ETHTYPE; - ice_app_prot_id_type = ICE_APP_PROT_ID_FIP; + ice_app_prot_id_type = ETH_P_FIP; } status = (tlv_status & ice_aqc_cee_status_mask) >> diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 1e8f71ffc8ce..df02cffdf209 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -563,7 +563,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) dcbcfg->numapps = 1; dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE; dcbcfg->app[0].priority = 3; - dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE; + dcbcfg->app[0].prot_id = ETH_P_FCOE; ret = ice_pf_dcb_cfg(pf, dcbcfg, locked); kfree(dcbcfg); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index a39e890100d9..d9ddd0bcf65f 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -806,7 +806,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, if (eth_test->flags == ETH_TEST_FL_OFFLINE) { netdev_info(netdev, "offline testing starting\n"); - set_bit(__ICE_TESTING, pf->state); + set_bit(ICE_TESTING, pf->state); if (ice_active_vfs(pf)) { dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); @@ -816,7 +816,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, data[ICE_ETH_TEST_LOOP] = 1; data[ICE_ETH_TEST_LINK] = 1; eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__ICE_TESTING, pf->state); + clear_bit(ICE_TESTING, pf->state); goto skip_ol_tests; } /* If the device is online then take it offline */ @@ -837,7 +837,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, data[ICE_ETH_TEST_REG]) eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__ICE_TESTING, pf->state); + clear_bit(ICE_TESTING, pf->state); if (if_running) { int status = ice_open(netdev); @@ -1097,7 +1097,7 @@ static int ice_nway_reset(struct net_device *netdev) int err; /* If VSI state is up, then restart autoneg with link up */ - if (!test_bit(__ICE_DOWN, vsi->back->state)) + if (!test_bit(ICE_DOWN, vsi->back->state)) err = ice_set_link(vsi, true); else err = ice_set_link(vsi, false); @@ -2282,7 +2282,7 @@ ice_set_link_ksettings(struct net_device *netdev, goto done; } - while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { timeout--; if (!timeout) { err = -EBUSY; @@ -2392,7 +2392,7 @@ ice_set_link_ksettings(struct net_device *netdev, pi->phy.curr_user_speed_req = adv_link_speed; done: kfree(phy_caps); - clear_bit(__ICE_CFG_BUSY, pf->state); + clear_bit(ICE_CFG_BUSY, pf->state); return err; } @@ -2748,7 +2748,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) if (ice_xsk_any_rx_ring_ena(vsi)) return -EBUSY; - while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { timeout--; if (!timeout) return -EBUSY; @@ -2927,7 +2927,7 @@ free_tx: } done: - clear_bit(__ICE_CFG_BUSY, pf->state); + clear_bit(ICE_CFG_BUSY, pf->state); return err; } @@ -3046,7 +3046,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) } /* If we have link and don't have autoneg */ - if (!test_bit(__ICE_DOWN, pf->state) && + if (!test_bit(ICE_DOWN, pf->state) && !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { /* Send message that it might not necessarily work*/ netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); @@ -3510,13 +3510,13 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, switch (c_type) { case ICE_RX_CONTAINER: - ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); - ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; + ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc); + ec->rx_coalesce_usecs = rc->itr_setting; ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl; break; case ICE_TX_CONTAINER: - ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); - ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; + ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc); + ec->tx_coalesce_usecs = rc->itr_setting; break; default: dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type); @@ -3634,11 +3634,16 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, ICE_MAX_INTRL); return -EINVAL; } + if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl && + (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) { + netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n", + c_type_str); + return -EINVAL; + } if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) { rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high; - wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx), - ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high, - pf->hw.intrl_gran)); + ice_write_intrl(rc->ring->q_vector, + ec->rx_coalesce_usecs_high); } use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; @@ -3656,7 +3661,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, return -EINVAL; } - itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC; + itr_setting = rc->itr_setting; if (coalesce_usecs != itr_setting && use_adaptive_coalesce) { netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n", c_type_str, c_type_str); @@ -3670,12 +3675,18 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, } if (use_adaptive_coalesce) { - rc->itr_setting |= ICE_ITR_DYNAMIC; + rc->itr_mode = ITR_DYNAMIC; } else { - /* save the user set usecs */ + rc->itr_mode = ITR_STATIC; + /* store user facing value how it was set */ rc->itr_setting = coalesce_usecs; - /* device ITR granularity is in 2 usec increments */ - rc->target_itr = ITR_REG_ALIGN(rc->itr_setting); + /* write the change to the register */ + ice_write_itr(rc, coalesce_usecs); + /* force writes to take effect immediately, the flush shouldn't + * be done in the functions above because the intent is for + * them to do lazy writes. + */ + ice_flush(&pf->hw); } return 0; @@ -3737,8 +3748,6 @@ ice_print_if_odd_usecs(struct net_device *netdev, u16 itr_setting, if (use_adaptive_coalesce) return; - itr_setting = ITR_TO_REG(itr_setting); - if (itr_setting != coalesce_usecs && (coalesce_usecs % 2)) netdev_info(netdev, "User set %s-usecs to %d, device only supports even values. Rounding down and attempting to set %s-usecs to %d\n", c_type_str, coalesce_usecs, c_type_str, @@ -3793,7 +3802,6 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, return -EINVAL; set_complete: - return 0; } @@ -3906,30 +3914,33 @@ ice_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct ice_netdev_priv *np = netdev_priv(netdev); +#define SFF_READ_BLOCK_SIZE 8 + u8 value[SFF_READ_BLOCK_SIZE] = { 0 }; u8 addr = ICE_I2C_EEPROM_DEV_ADDR; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; enum ice_status status; bool is_sfp = false; - unsigned int i; + unsigned int i, j; u16 offset = 0; - u8 value = 0; u8 page = 0; if (!ee || !ee->len || !data) return -EINVAL; - status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, &value, 1, 0, + status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0, NULL); if (status) return -EIO; - if (value == ICE_MODULE_TYPE_SFP) + if (value[0] == ICE_MODULE_TYPE_SFP) is_sfp = true; - for (i = 0; i < ee->len; i++) { + memset(data, 0, ee->len); + for (i = 0; i < ee->len; i += SFF_READ_BLOCK_SIZE) { offset = i + ee->offset; + page = 0; /* Check if we need to access the other memory page */ if (is_sfp) { @@ -3945,11 +3956,37 @@ ice_get_module_eeprom(struct net_device *netdev, } } - status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, !is_sfp, - &value, 1, 0, NULL); - if (status) - value = 0; - data[i] = value; + /* Bit 2 of EEPROM address 0x02 declares upper + * pages are disabled on QSFP modules. + * SFP modules only ever use page 0. + */ + if (page == 0 || !(data[0x2] & 0x4)) { + /* If i2c bus is busy due to slow page change or + * link management access, call can fail. This is normal. + * So we retry this a few times. + */ + for (j = 0; j < 4; j++) { + status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, + !is_sfp, value, + SFF_READ_BLOCK_SIZE, + 0, NULL); + netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%X)\n", + addr, offset, page, is_sfp, + value[0], value[1], value[2], value[3], + value[4], value[5], value[6], value[7], + status); + if (status) { + usleep_range(1500, 2500); + memset(value, 0, SFF_READ_BLOCK_SIZE); + continue; + } + break; + } + + /* Make sure we have enough room for the new block */ + if ((i + SFF_READ_BLOCK_SIZE) < ee->len) + memcpy(data + i, value, SFF_READ_BLOCK_SIZE); + } } return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 440964defa4a..16de603b280c 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1452,7 +1452,7 @@ int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) return -EBUSY; } - if (test_bit(__ICE_FD_FLUSH_REQ, pf->state)) + if (test_bit(ICE_FD_FLUSH_REQ, pf->state)) return -EBUSY; mutex_lock(&hw->fdir_fltr_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 4b83960876f4..06ac9badee77 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -334,6 +334,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) return NULL; + /* cppcheck-suppress nullPointer */ if (index > ICE_MAX_BST_TCAMS_IN_BUF) return NULL; @@ -404,6 +405,7 @@ ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index, if (!section) return NULL; + /* cppcheck-suppress nullPointer */ if (index > ICE_MAX_LABELS_IN_BUF) return NULL; @@ -2067,6 +2069,7 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2) count++; list_for_each_entry(tmp2, list2, list) chk_count++; + /* cppcheck-suppress knownConditionTrueFalse */ if (!count || count != chk_count) return false; diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 67b5b9b9d009..de38a0fc9665 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -130,6 +130,7 @@ #define GLINT_DYN_CTL_ITR_INDX_M ICE_M(0x3, 3) #define GLINT_DYN_CTL_INTERVAL_S 5 #define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5) +#define GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24) #define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25) #define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30) #define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 16d0ee5b48a5..82e2ce23df3d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -158,6 +158,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) if (vsi->type == ICE_VSI_VF) vsi->vf_id = vf_id; + else + vsi->vf_id = ICE_INVAL_VFID; switch (vsi->type) { case ICE_VSI_PF: @@ -385,6 +387,8 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; + q_vector->total_events++; + napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -1309,14 +1313,13 @@ err_out: * LUT, while in the event of enable request for RSS, it will reconfigure RSS * LUT. */ -int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) +void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) { - int err = 0; u8 *lut; lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) - return -ENOMEM; + return; if (ena) { if (vsi->rss_lut_user) @@ -1326,9 +1329,8 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) vsi->rss_size); } - err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); + ice_set_rss_lut(vsi, lut, vsi->rss_table_size); kfree(lut); - return err; } /** @@ -1502,13 +1504,13 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) */ bool ice_pf_state_is_nominal(struct ice_pf *pf) { - DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 }; + DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 }; if (!pf) return false; - bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS); - if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS)) + bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS); + if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) return false; return true; @@ -1773,7 +1775,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) * This function converts a decimal interrupt rate limit in usecs to the format * expected by firmware. */ -u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) +static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) { u32 val = intrl / gran; @@ -1783,6 +1785,51 @@ u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) } /** + * ice_write_intrl - write throttle rate limit to interrupt specific register + * @q_vector: pointer to interrupt specific structure + * @intrl: throttle rate limit in microseconds to write + */ +void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl) +{ + struct ice_hw *hw = &q_vector->vsi->back->hw; + + wr32(hw, GLINT_RATE(q_vector->reg_idx), + ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25)); +} + +/** + * __ice_write_itr - write throttle rate to register + * @q_vector: pointer to interrupt data structure + * @rc: pointer to ring container + * @itr: throttle rate in microseconds to write + */ +static void __ice_write_itr(struct ice_q_vector *q_vector, + struct ice_ring_container *rc, u16 itr) +{ + struct ice_hw *hw = &q_vector->vsi->back->hw; + + wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), + ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S); +} + +/** + * ice_write_itr - write throttle rate to queue specific register + * @rc: pointer to ring container + * @itr: throttle rate in microseconds to write + */ +void ice_write_itr(struct ice_ring_container *rc, u16 itr) +{ + struct ice_q_vector *q_vector; + + if (!rc->ring) + return; + + q_vector = rc->ring->q_vector; + + __ice_write_itr(q_vector, rc, itr); +} + +/** * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW * @vsi: the VSI being configured * @@ -1802,9 +1849,6 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) ice_cfg_itr(hw, q_vector); - wr32(hw, GLINT_RATE(reg_idx), - ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); - /* Both Transmit Queue Interrupt Cause Control register * and Receive Queue Interrupt Cause control register * expects MSIX_INDX field to be the vector index @@ -2492,11 +2536,10 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) for (i = 0; i < vsi->num_q_vectors; i++) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; - u16 reg_idx = q_vector->reg_idx; - wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0); - wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0); + ice_write_intrl(q_vector, 0); for (q = 0; q < q_vector->num_ring_tx; q++) { + ice_write_itr(&q_vector->tx, 0); wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); if (ice_is_xdp_ena_vsi(vsi)) { u32 xdp_txq = txq + vsi->num_xdp_txq; @@ -2507,6 +2550,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) } for (q = 0; q < q_vector->num_ring_rx; q++) { + ice_write_itr(&q_vector->rx, 0); wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); rxq++; } @@ -2752,11 +2796,14 @@ int ice_vsi_release(struct ice_vsi *vsi) * PF that is running the work queue items currently. This is done to * avoid check_flush_dependency() warning on this wq */ - if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { + if (vsi->netdev && !ice_is_reset_in_progress(pf->state) && + (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state))) { unregister_netdev(vsi->netdev); - ice_devlink_destroy_port(vsi); + clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); } + ice_devlink_destroy_port(vsi); + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) ice_rss_clean(vsi); @@ -2811,10 +2858,16 @@ int ice_vsi_release(struct ice_vsi *vsi) ice_vsi_delete(vsi); ice_vsi_free_q_vectors(vsi); - /* make sure unregister_netdev() was called by checking __ICE_DOWN */ - if (vsi->netdev && test_bit(ICE_VSI_DOWN, vsi->state)) { - free_netdev(vsi->netdev); - vsi->netdev = NULL; + if (vsi->netdev) { + if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) { + unregister_netdev(vsi->netdev); + clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); + } + if (test_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state)) { + free_netdev(vsi->netdev); + vsi->netdev = NULL; + clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); + } } if (vsi->type == ICE_VSI_VF && @@ -2835,47 +2888,6 @@ int ice_vsi_release(struct ice_vsi *vsi) } /** - * ice_vsi_rebuild_update_coalesce_intrl - set interrupt rate limit for a q_vector - * @q_vector: pointer to q_vector which is being updated - * @stored_intrl_setting: original INTRL setting - * - * Set coalesce param in q_vector and update these parameters in HW. - */ -static void -ice_vsi_rebuild_update_coalesce_intrl(struct ice_q_vector *q_vector, - u16 stored_intrl_setting) -{ - struct ice_hw *hw = &q_vector->vsi->back->hw; - - q_vector->intrl = stored_intrl_setting; - wr32(hw, GLINT_RATE(q_vector->reg_idx), - ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); -} - -/** - * ice_vsi_rebuild_update_coalesce_itr - set coalesce for a q_vector - * @q_vector: pointer to q_vector which is being updated - * @rc: pointer to ring container - * @stored_itr_setting: original ITR setting - * - * Set coalesce param in q_vector and update these parameters in HW. - */ -static void -ice_vsi_rebuild_update_coalesce_itr(struct ice_q_vector *q_vector, - struct ice_ring_container *rc, - u16 stored_itr_setting) -{ - struct ice_hw *hw = &q_vector->vsi->back->hw; - - rc->itr_setting = stored_itr_setting; - - /* dynamic ITR values will be updated during Tx/Rx */ - if (!ITR_IS_DYNAMIC(rc->itr_setting)) - wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), - ITR_REG_ALIGN(rc->itr_setting) >> ICE_ITR_GRAN_S); -} - -/** * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors * @vsi: VSI connected with q_vectors * @coalesce: array of struct with stored coalesce @@ -2918,6 +2930,7 @@ static void ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, struct ice_coalesce_stored *coalesce, int size) { + struct ice_ring_container *rc; int i; if ((size && !coalesce) || !vsi) @@ -2940,41 +2953,51 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, * rings is less than are allocated (this means the number of * rings increased from previously), then write out the * values in the first element + * + * Also, always write the ITR, even if in ITR_IS_DYNAMIC + * as there is no harm because the dynamic algorithm + * will just overwrite. */ - if (i < vsi->alloc_rxq && coalesce[i].rx_valid) - ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i], - &vsi->q_vectors[i]->rx, - coalesce[i].itr_rx); - else if (i < vsi->alloc_rxq) - ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i], - &vsi->q_vectors[i]->rx, - coalesce[0].itr_rx); - - if (i < vsi->alloc_txq && coalesce[i].tx_valid) - ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i], - &vsi->q_vectors[i]->tx, - coalesce[i].itr_tx); - else if (i < vsi->alloc_txq) - ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i], - &vsi->q_vectors[i]->tx, - coalesce[0].itr_tx); - - ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i], - coalesce[i].intrl); + if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { + rc = &vsi->q_vectors[i]->rx; + rc->itr_setting = coalesce[i].itr_rx; + ice_write_itr(rc, rc->itr_setting); + } else if (i < vsi->alloc_rxq) { + rc = &vsi->q_vectors[i]->rx; + rc->itr_setting = coalesce[0].itr_rx; + ice_write_itr(rc, rc->itr_setting); + } + + if (i < vsi->alloc_txq && coalesce[i].tx_valid) { + rc = &vsi->q_vectors[i]->tx; + rc->itr_setting = coalesce[i].itr_tx; + ice_write_itr(rc, rc->itr_setting); + } else if (i < vsi->alloc_txq) { + rc = &vsi->q_vectors[i]->tx; + rc->itr_setting = coalesce[0].itr_tx; + ice_write_itr(rc, rc->itr_setting); + } + + vsi->q_vectors[i]->intrl = coalesce[i].intrl; + ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl); } /* the number of queue vectors increased so write whatever is in * the first element */ for (; i < vsi->num_q_vectors; i++) { - ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i], - &vsi->q_vectors[i]->tx, - coalesce[0].itr_tx); - ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i], - &vsi->q_vectors[i]->rx, - coalesce[0].itr_rx); - ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i], - coalesce[0].intrl); + /* transmit */ + rc = &vsi->q_vectors[i]->tx; + rc->itr_setting = coalesce[0].itr_tx; + ice_write_itr(rc, rc->itr_setting); + + /* receive */ + rc = &vsi->q_vectors[i]->rx; + rc->itr_setting = coalesce[0].itr_rx; + ice_write_itr(rc, rc->itr_setting); + + vsi->q_vectors[i]->intrl = coalesce[0].intrl; + ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl); } } @@ -2991,6 +3014,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) struct ice_coalesce_stored *coalesce; int prev_num_q_vectors = 0; struct ice_vf *vf = NULL; + enum ice_vsi_type vtype; enum ice_status status; struct ice_pf *pf; int ret, i; @@ -2999,7 +3023,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) return -EINVAL; pf = vsi->back; - if (vsi->type == ICE_VSI_VF) + vtype = vsi->type; + if (vtype == ICE_VSI_VF) vf = &pf->vf[vsi->vf_id]; coalesce = kcalloc(vsi->num_q_vectors, @@ -3017,7 +3042,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) * many interrupts each VF needs. SR-IOV MSIX resources are also * cleared in the same manner. */ - if (vsi->type != ICE_VSI_VF) { + if (vtype != ICE_VSI_VF) { /* reclaim SW interrupts back to the common pool */ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); pf->num_avail_sw_msix += vsi->num_q_vectors; @@ -3032,7 +3057,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ice_vsi_put_qs(vsi); ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi); - if (vsi->type == ICE_VSI_VF) + if (vtype == ICE_VSI_VF) ice_vsi_set_num_qs(vsi, vf->vf_id); else ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); @@ -3051,7 +3076,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) if (ret < 0) goto err_vsi; - switch (vsi->type) { + switch (vtype) { case ICE_VSI_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); @@ -3078,7 +3103,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) goto err_vectors; } /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ - if (vsi->type != ICE_VSI_CTRL) + if (vtype != ICE_VSI_CTRL) /* Do not exit if configuring RSS had an issue, at * least receive traffic on first queue. Hence no * need to capture return value @@ -3140,7 +3165,7 @@ err_rings: } err_vsi: ice_vsi_clear(vsi); - set_bit(__ICE_RESET_FAILED, pf->state); + set_bit(ICE_RESET_FAILED, pf->state); kfree(coalesce); return ret; } @@ -3151,10 +3176,10 @@ err_vsi: */ bool ice_is_reset_in_progress(unsigned long *state) { - return test_bit(__ICE_RESET_OICR_RECV, state) || - test_bit(__ICE_PFR_REQ, state) || - test_bit(__ICE_CORER_REQ, state) || - test_bit(__ICE_GLOBR_REQ, state); + return test_bit(ICE_RESET_OICR_RECV, state) || + test_bit(ICE_PFR_REQ, state) || + test_bit(ICE_CORER_REQ, state) || + test_bit(ICE_GLOBR_REQ, state); } #ifdef CONFIG_DCB @@ -3242,20 +3267,15 @@ out: /** * ice_update_ring_stats - Update ring statistics * @ring: ring to update - * @cont: used to increment per-vector counters * @pkts: number of processed packets * @bytes: number of processed bytes * * This function assumes that caller has acquired a u64_stats_sync lock. */ -static void -ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont, - u64 pkts, u64 bytes) +static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes) { ring->stats.bytes += bytes; ring->stats.pkts += pkts; - cont->total_bytes += bytes; - cont->total_pkts += pkts; } /** @@ -3267,7 +3287,7 @@ ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont, void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes) { u64_stats_update_begin(&tx_ring->syncp); - ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes); + ice_update_ring_stats(tx_ring, pkts, bytes); u64_stats_update_end(&tx_ring->syncp); } @@ -3280,7 +3300,7 @@ void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes) void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes) { u64_stats_update_begin(&rx_ring->syncp); - ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes); + ice_update_ring_stats(rx_ring, pkts, bytes); u64_stats_update_end(&rx_ring->syncp); } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 462c3ab7abad..511c2316c40c 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -85,7 +85,7 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi); void ice_vsi_free_tx_rings(struct ice_vsi *vsi); -int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); +void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); @@ -95,7 +95,8 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); int ice_status_to_errno(enum ice_status err); -u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); +void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); +void ice_write_itr(struct ice_ring_container *rc, u16 itr); enum ice_status ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 1b2f1e258e5c..6dbaa9099fdf 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -257,7 +257,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) if (!vsi->netdev) return -EINVAL; - while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) + while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) usleep_range(1000, 2000); changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; @@ -307,7 +307,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) * space reserved for promiscuous filters. */ if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && - !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC, + !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, vsi->state)) { promisc_forced_on = true; netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", @@ -391,7 +391,7 @@ out: set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); exit: - clear_bit(__ICE_CFG_BUSY, vsi->state); + clear_bit(ICE_CFG_BUSY, vsi->state); return err; } @@ -451,7 +451,7 @@ ice_prepare_for_reset(struct ice_pf *pf) unsigned int i; /* already prepared for reset */ - if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) + if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) return; /* Notify VFs of impending reset */ @@ -472,7 +472,7 @@ ice_prepare_for_reset(struct ice_pf *pf) ice_shutdown_all_ctrlq(hw); - set_bit(__ICE_PREPARED_FOR_RESET, pf->state); + set_bit(ICE_PREPARED_FOR_RESET, pf->state); } /** @@ -493,12 +493,12 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) /* trigger the reset */ if (ice_reset(hw, reset_type)) { dev_err(dev, "reset %d failed\n", reset_type); - set_bit(__ICE_RESET_FAILED, pf->state); - clear_bit(__ICE_RESET_OICR_RECV, pf->state); - clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); - clear_bit(__ICE_PFR_REQ, pf->state); - clear_bit(__ICE_CORER_REQ, pf->state); - clear_bit(__ICE_GLOBR_REQ, pf->state); + set_bit(ICE_RESET_FAILED, pf->state); + clear_bit(ICE_RESET_OICR_RECV, pf->state); + clear_bit(ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(ICE_PFR_REQ, pf->state); + clear_bit(ICE_CORER_REQ, pf->state); + clear_bit(ICE_GLOBR_REQ, pf->state); return; } @@ -509,8 +509,8 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) if (reset_type == ICE_RESET_PFR) { pf->pfr_count++; ice_rebuild(pf, reset_type); - clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); - clear_bit(__ICE_PFR_REQ, pf->state); + clear_bit(ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(ICE_PFR_REQ, pf->state); ice_reset_all_vfs(pf, true); } } @@ -526,20 +526,20 @@ static void ice_reset_subtask(struct ice_pf *pf) /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an * OICR interrupt. The OICR handler (ice_misc_intr) determines what type * of reset is pending and sets bits in pf->state indicating the reset - * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set + * type and ICE_RESET_OICR_RECV. So, if the latter bit is set * prepare for pending reset if not already (for PF software-initiated * global resets the software should already be prepared for it as - * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated + * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated * by firmware or software on other PFs, that bit is not set so prepare * for the reset now), poll for reset done, rebuild and return. */ - if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { + if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { /* Perform the largest reset requested */ - if (test_and_clear_bit(__ICE_CORER_RECV, pf->state)) + if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) reset_type = ICE_RESET_CORER; - if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state)) + if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) reset_type = ICE_RESET_GLOBR; - if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state)) + if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) reset_type = ICE_RESET_EMPR; /* return if no valid reset type requested */ if (reset_type == ICE_RESET_INVAL) @@ -548,7 +548,7 @@ static void ice_reset_subtask(struct ice_pf *pf) /* make sure we are ready to rebuild */ if (ice_check_reset(&pf->hw)) { - set_bit(__ICE_RESET_FAILED, pf->state); + set_bit(ICE_RESET_FAILED, pf->state); } else { /* done with reset. start rebuild */ pf->hw.reset_ongoing = false; @@ -556,11 +556,11 @@ static void ice_reset_subtask(struct ice_pf *pf) /* clear bit to resume normal operations, but * ICE_NEEDS_RESTART bit is set in case rebuild failed */ - clear_bit(__ICE_RESET_OICR_RECV, pf->state); - clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); - clear_bit(__ICE_PFR_REQ, pf->state); - clear_bit(__ICE_CORER_REQ, pf->state); - clear_bit(__ICE_GLOBR_REQ, pf->state); + clear_bit(ICE_RESET_OICR_RECV, pf->state); + clear_bit(ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(ICE_PFR_REQ, pf->state); + clear_bit(ICE_CORER_REQ, pf->state); + clear_bit(ICE_GLOBR_REQ, pf->state); ice_reset_all_vfs(pf, true); } @@ -568,19 +568,19 @@ static void ice_reset_subtask(struct ice_pf *pf) } /* No pending resets to finish processing. Check for new resets */ - if (test_bit(__ICE_PFR_REQ, pf->state)) + if (test_bit(ICE_PFR_REQ, pf->state)) reset_type = ICE_RESET_PFR; - if (test_bit(__ICE_CORER_REQ, pf->state)) + if (test_bit(ICE_CORER_REQ, pf->state)) reset_type = ICE_RESET_CORER; - if (test_bit(__ICE_GLOBR_REQ, pf->state)) + if (test_bit(ICE_GLOBR_REQ, pf->state)) reset_type = ICE_RESET_GLOBR; /* If no valid reset type requested just return */ if (reset_type == ICE_RESET_INVAL) return; /* reset if not already down or busy */ - if (!test_bit(__ICE_DOWN, pf->state) && - !test_bit(__ICE_CFG_BUSY, pf->state)) { + if (!test_bit(ICE_DOWN, pf->state) && + !test_bit(ICE_CFG_BUSY, pf->state)) { ice_do_reset(pf, reset_type); } } @@ -937,8 +937,8 @@ static void ice_watchdog_subtask(struct ice_pf *pf) int i; /* if interface is down do nothing */ - if (test_bit(__ICE_DOWN, pf->state) || - test_bit(__ICE_CFG_BUSY, pf->state)) + if (test_bit(ICE_DOWN, pf->state) || + test_bit(ICE_CFG_BUSY, pf->state)) return; /* make sure we don't do these things too often */ @@ -1182,7 +1182,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) u32 oldval, val; /* Do not clean control queue if/when PF reset fails */ - if (test_bit(__ICE_RESET_FAILED, pf->state)) + if (test_bit(ICE_RESET_FAILED, pf->state)) return 0; switch (q_type) { @@ -1317,13 +1317,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) + if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) return; if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) return; - clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); + clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); /* There might be a situation where new messages arrive to a control * queue between processing the last message and clearing the @@ -1344,13 +1344,13 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) + if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) return; if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) return; - clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); + clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); if (ice_ctrlq_pending(hw, &hw->mailboxq)) __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); @@ -1366,9 +1366,9 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf) */ void ice_service_task_schedule(struct ice_pf *pf) { - if (!test_bit(__ICE_SERVICE_DIS, pf->state) && - !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && - !test_bit(__ICE_NEEDS_RESTART, pf->state)) + if (!test_bit(ICE_SERVICE_DIS, pf->state) && + !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && + !test_bit(ICE_NEEDS_RESTART, pf->state)) queue_work(ice_wq, &pf->serv_task); } @@ -1378,32 +1378,32 @@ void ice_service_task_schedule(struct ice_pf *pf) */ static void ice_service_task_complete(struct ice_pf *pf) { - WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state)); + WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); /* force memory (pf->state) to sync before next service task */ smp_mb__before_atomic(); - clear_bit(__ICE_SERVICE_SCHED, pf->state); + clear_bit(ICE_SERVICE_SCHED, pf->state); } /** * ice_service_task_stop - stop service task and cancel works * @pf: board private structure * - * Return 0 if the __ICE_SERVICE_DIS bit was not already set, + * Return 0 if the ICE_SERVICE_DIS bit was not already set, * 1 otherwise. */ static int ice_service_task_stop(struct ice_pf *pf) { int ret; - ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state); + ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); if (pf->serv_tmr.function) del_timer_sync(&pf->serv_tmr); if (pf->serv_task.func) cancel_work_sync(&pf->serv_task); - clear_bit(__ICE_SERVICE_SCHED, pf->state); + clear_bit(ICE_SERVICE_SCHED, pf->state); return ret; } @@ -1415,7 +1415,7 @@ static int ice_service_task_stop(struct ice_pf *pf) */ static void ice_service_task_restart(struct ice_pf *pf) { - clear_bit(__ICE_SERVICE_DIS, pf->state); + clear_bit(ICE_SERVICE_DIS, pf->state); ice_service_task_schedule(pf); } @@ -1448,7 +1448,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) unsigned int i; u32 reg; - if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) { + if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { /* Since the VF MDD event logging is rate limited, check if * there are pending MDD events. */ @@ -1540,7 +1540,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_TX_PQM_VALID_M) { wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); vf->mdd_tx_events.count++; - set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", i); @@ -1550,7 +1550,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_TX_TCLAN_VALID_M) { wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); vf->mdd_tx_events.count++; - set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", i); @@ -1560,7 +1560,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_TX_TDPU_VALID_M) { wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); vf->mdd_tx_events.count++; - set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", i); @@ -1570,7 +1570,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_RX_VALID_M) { wr32(hw, VP_MDET_RX(i), 0xFFFF); vf->mdd_rx_events.count++; - set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_rx_err(pf)) dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", i); @@ -1735,7 +1735,7 @@ static void ice_init_link_dflt_override(struct ice_port_info *pi) * settings using the default override mask from the NVM. * * The PHY should only be configured with the default override settings the - * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state + * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state * is used to indicate that the user PHY cfg default override is initialized * and the PHY has not been configured with the default override settings. The * state is set here, and cleared in ice_configure_phy the first time the PHY is @@ -1767,7 +1767,7 @@ static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) cfg->link_fec_opt = ldo->fec_options; phy->curr_user_fec_req = ICE_FEC_AUTO; - set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); + set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); } /** @@ -1839,7 +1839,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) out: phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; - set_bit(__ICE_PHY_INIT_COMPLETE, pf->state); + set_bit(ICE_PHY_INIT_COMPLETE, pf->state); err_out: kfree(pcaps); return err; @@ -1923,7 +1923,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Speed - If default override pending, use curr_user_phy_cfg set in * ice_init_phy_user_cfg_ldo. */ - if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, + if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, vsi->back->state)) { cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; @@ -2002,7 +2002,7 @@ static void ice_check_media_subtask(struct ice_pf *pf) return; if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { - if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) + if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) ice_init_phy_user_cfg(pi); /* PHY settings are reset on media insertion, reconfigure @@ -2038,8 +2038,8 @@ static void ice_service_task(struct work_struct *work) /* bail if a reset/recovery cycle is pending or rebuild failed */ if (ice_is_reset_in_progress(pf->state) || - test_bit(__ICE_SUSPENDED, pf->state) || - test_bit(__ICE_NEEDS_RESTART, pf->state)) { + test_bit(ICE_SUSPENDED, pf->state) || + test_bit(ICE_NEEDS_RESTART, pf->state)) { ice_service_task_complete(pf); return; } @@ -2060,7 +2060,8 @@ static void ice_service_task(struct work_struct *work) ice_clean_mailboxq_subtask(pf); ice_sync_arfs_fltrs(pf); ice_flush_fdir_ctx(pf); - /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ + + /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ ice_service_task_complete(pf); /* If the tasks have taken longer than one service timer period @@ -2068,11 +2069,11 @@ static void ice_service_task(struct work_struct *work) * schedule the service task now. */ if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || - test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || - test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || - test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || - test_bit(__ICE_FD_VF_FLUSH_CTX, pf->state) || - test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) + test_bit(ICE_MDD_EVENT_PENDING, pf->state) || + test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || + test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || + test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || + test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) mod_timer(&pf->serv_tmr, jiffies); } @@ -2102,7 +2103,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) struct device *dev = ice_pf_to_dev(pf); /* bail out if earlier reset has failed */ - if (test_bit(__ICE_RESET_FAILED, pf->state)) { + if (test_bit(ICE_RESET_FAILED, pf->state)) { dev_dbg(dev, "earlier reset has failed\n"); return -EIO; } @@ -2114,13 +2115,13 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) switch (reset) { case ICE_RESET_PFR: - set_bit(__ICE_PFR_REQ, pf->state); + set_bit(ICE_PFR_REQ, pf->state); break; case ICE_RESET_CORER: - set_bit(__ICE_CORER_REQ, pf->state); + set_bit(ICE_CORER_REQ, pf->state); break; case ICE_RESET_GLOBR: - set_bit(__ICE_GLOBR_REQ, pf->state); + set_bit(ICE_GLOBR_REQ, pf->state); break; default: return -EINVAL; @@ -2625,8 +2626,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) u32 oicr, ena_mask; dev = ice_pf_to_dev(pf); - set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); - set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); + set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); + set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); @@ -2638,18 +2639,18 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_MAL_DETECT_M) { ena_mask &= ~PFINT_OICR_MAL_DETECT_M; - set_bit(__ICE_MDD_EVENT_PENDING, pf->state); + set_bit(ICE_MDD_EVENT_PENDING, pf->state); } if (oicr & PFINT_OICR_VFLR_M) { /* disable any further VFLR event notifications */ - if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { + if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { u32 reg = rd32(hw, PFINT_OICR_ENA); reg &= ~PFINT_OICR_VFLR_M; wr32(hw, PFINT_OICR_ENA, reg); } else { ena_mask &= ~PFINT_OICR_VFLR_M; - set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + set_bit(ICE_VFLR_EVENT_PENDING, pf->state); } } @@ -2675,13 +2676,13 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) * We also make note of which reset happened so that peer * devices/drivers can be informed. */ - if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) { + if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { if (reset == ICE_RESET_CORER) - set_bit(__ICE_CORER_RECV, pf->state); + set_bit(ICE_CORER_RECV, pf->state); else if (reset == ICE_RESET_GLOBR) - set_bit(__ICE_GLOBR_RECV, pf->state); + set_bit(ICE_GLOBR_RECV, pf->state); else - set_bit(__ICE_EMPR_RECV, pf->state); + set_bit(ICE_EMPR_RECV, pf->state); /* There are couple of different bits at play here. * hw->reset_ongoing indicates whether the hardware is @@ -2689,7 +2690,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) * is received and set back to false after the driver * has determined that the hardware is out of reset. * - * __ICE_RESET_OICR_RECV in pf->state indicates + * ICE_RESET_OICR_RECV in pf->state indicates * that a post reset rebuild is required before the * driver is operational again. This is set above. * @@ -2717,7 +2718,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_PCI_EXCEPTION_M | PFINT_OICR_ECC_ERR_M)) { - set_bit(__ICE_PFR_REQ, pf->state); + set_bit(ICE_PFR_REQ, pf->state); ice_service_task_schedule(pf); } } @@ -2976,6 +2977,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) if (!netdev) return -ENOMEM; + set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); vsi->netdev = netdev; np = netdev_priv(netdev); np->vsi = vsi; @@ -3188,6 +3190,7 @@ unroll_napi_add: if (vsi) { ice_napi_del(vsi); if (vsi->netdev) { + clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); free_netdev(vsi->netdev); vsi->netdev = NULL; } @@ -3321,7 +3324,7 @@ static int ice_init_pf(struct ice_pf *pf) timer_setup(&pf->serv_tmr, ice_service_timer, 0); pf->serv_tmr_period = HZ; INIT_WORK(&pf->serv_task, ice_service_task); - clear_bit(__ICE_SERVICE_SCHED, pf->state); + clear_bit(ICE_SERVICE_SCHED, pf->state); mutex_init(&pf->avail_q_mutex); pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); @@ -3530,7 +3533,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) if (!new_rx && !new_tx) return -EINVAL; - while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { timeout--; if (!timeout) return -EBUSY; @@ -3554,7 +3557,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) ice_pf_dcb_recfg(pf); ice_vsi_open(vsi); done: - clear_bit(__ICE_CFG_BUSY, pf->state); + clear_bit(ICE_CFG_BUSY, pf->state); return err; } @@ -3957,6 +3960,7 @@ static int ice_register_netdev(struct ice_pf *pf) if (err) goto err_register_netdev; + set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); netif_carrier_off(vsi->netdev); netif_tx_stop_all_queues(vsi->netdev); err = ice_devlink_create_port(vsi); @@ -3968,9 +3972,11 @@ static int ice_register_netdev(struct ice_pf *pf) return 0; err_devlink_create: unregister_netdev(vsi->netdev); + clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); err_register_netdev: free_netdev(vsi->netdev); vsi->netdev = NULL; + clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); return err; } @@ -3996,7 +4002,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (err) return err; - err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); + err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); if (err) { dev_err(dev, "BAR0 I/O map error %d\n", err); return err; @@ -4020,9 +4026,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pf->pdev = pdev; pci_set_drvdata(pdev, pf); - set_bit(__ICE_DOWN, pf->state); + set_bit(ICE_DOWN, pf->state); /* Disable service task until DOWN bit is cleared */ - set_bit(__ICE_SERVICE_DIS, pf->state); + set_bit(ICE_SERVICE_DIS, pf->state); hw = &pf->hw; hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; @@ -4162,7 +4168,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_alloc_sw_unroll; } - clear_bit(__ICE_SERVICE_DIS, pf->state); + clear_bit(ICE_SERVICE_DIS, pf->state); /* tell the firmware we are up */ err = ice_send_version(pf); @@ -4256,16 +4262,15 @@ probe_done: goto err_netdev_reg; /* ready to go, so clear down state bit */ - clear_bit(__ICE_DOWN, pf->state); - + clear_bit(ICE_DOWN, pf->state); return 0; err_netdev_reg: err_send_version_unroll: ice_vsi_release_all(pf); err_alloc_sw_unroll: - set_bit(__ICE_SERVICE_DIS, pf->state); - set_bit(__ICE_DOWN, pf->state); + set_bit(ICE_SERVICE_DIS, pf->state); + set_bit(ICE_DOWN, pf->state); devm_kfree(dev, pf->first_sw); err_msix_misc_unroll: ice_free_irq_msix_misc(pf); @@ -4365,11 +4370,11 @@ static void ice_remove(struct pci_dev *pdev) } if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { - set_bit(__ICE_VF_RESETS_DISABLED, pf->state); + set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); } - set_bit(__ICE_DOWN, pf->state); + set_bit(ICE_DOWN, pf->state); ice_service_task_stop(pf); ice_aq_cancel_waiting_tasks(pf); @@ -4529,13 +4534,13 @@ static int __maybe_unused ice_suspend(struct device *dev) disabled = ice_service_task_stop(pf); /* Already suspended?, then there is nothing to do */ - if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) { + if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { if (!disabled) ice_service_task_restart(pf); return 0; } - if (test_bit(__ICE_DOWN, pf->state) || + if (test_bit(ICE_DOWN, pf->state) || ice_is_reset_in_progress(pf->state)) { dev_err(dev, "can't suspend device in reset or already down\n"); if (!disabled) @@ -4607,16 +4612,16 @@ static int __maybe_unused ice_resume(struct device *dev) if (ret) dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); - clear_bit(__ICE_DOWN, pf->state); + clear_bit(ICE_DOWN, pf->state); /* Now perform PF reset and rebuild */ reset_type = ICE_RESET_PFR; /* re-enable service task for reset, but allow reset to schedule it */ - clear_bit(__ICE_SERVICE_DIS, pf->state); + clear_bit(ICE_SERVICE_DIS, pf->state); if (ice_schedule_reset(pf, reset_type)) dev_err(dev, "Reset during resume failed.\n"); - clear_bit(__ICE_SUSPENDED, pf->state); + clear_bit(ICE_SUSPENDED, pf->state); ice_service_task_restart(pf); /* Restart the service task */ @@ -4645,11 +4650,11 @@ ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) return PCI_ERS_RESULT_DISCONNECT; } - if (!test_bit(__ICE_SUSPENDED, pf->state)) { + if (!test_bit(ICE_SUSPENDED, pf->state)) { ice_service_task_stop(pf); - if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { - set_bit(__ICE_PFR_REQ, pf->state); + if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { + set_bit(ICE_PFR_REQ, pf->state); ice_prepare_for_reset(pf); } } @@ -4716,7 +4721,7 @@ static void ice_pci_err_resume(struct pci_dev *pdev) return; } - if (test_bit(__ICE_SUSPENDED, pf->state)) { + if (test_bit(ICE_SUSPENDED, pf->state)) { dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", __func__); return; @@ -4737,11 +4742,11 @@ static void ice_pci_err_reset_prepare(struct pci_dev *pdev) { struct ice_pf *pf = pci_get_drvdata(pdev); - if (!test_bit(__ICE_SUSPENDED, pf->state)) { + if (!test_bit(ICE_SUSPENDED, pf->state)) { ice_service_task_stop(pf); - if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { - set_bit(__ICE_PFR_REQ, pf->state); + if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { + set_bit(ICE_PFR_REQ, pf->state); ice_prepare_for_reset(pf); } } @@ -4888,7 +4893,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) return 0; } - if (test_bit(__ICE_DOWN, pf->state) || + if (test_bit(ICE_DOWN, pf->state) || ice_is_reset_in_progress(pf->state)) { netdev_err(netdev, "can't set mac %pM. device not ready\n", mac); @@ -5107,10 +5112,10 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) * separate if/else statements to guarantee each feature is checked */ if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) - ret = ice_vsi_manage_rss_lut(vsi, true); + ice_vsi_manage_rss_lut(vsi, true); else if (!(features & NETIF_F_RXHASH) && netdev->features & NETIF_F_RXHASH) - ret = ice_vsi_manage_rss_lut(vsi, false); + ice_vsi_manage_rss_lut(vsi, false); if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) @@ -5191,6 +5196,105 @@ int ice_vsi_cfg(struct ice_vsi *vsi) return err; } +/* THEORY OF MODERATION: + * The below code creates custom DIM profiles for use by this driver, because + * the ice driver hardware works differently than the hardware that DIMLIB was + * originally made for. ice hardware doesn't have packet count limits that + * can trigger an interrupt, but it *does* have interrupt rate limit support, + * and this code adds that capability to be used by the driver when it's using + * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver + * for how to "respond" to traffic and interrupts, so this driver uses a + * slightly different set of moderation parameters to get best performance. + */ +struct ice_dim { + /* the throttle rate for interrupts, basically worst case delay before + * an initial interrupt fires, value is stored in microseconds. + */ + u16 itr; + /* the rate limit for interrupts, which can cap a delay from a small + * ITR at a certain amount of interrupts per second. f.e. a 2us ITR + * could yield as much as 500,000 interrupts per second, but with a + * 10us rate limit, it limits to 100,000 interrupts per second. Value + * is stored in microseconds. + */ + u16 intrl; +}; + +/* Make a different profile for Rx that doesn't allow quite so aggressive + * moderation at the high end (it maxes out at 128us or about 8k interrupts a + * second. The INTRL/rate parameters here are only useful to cap small ITR + * values, which is why for larger ITR's - like 128, which can only generate + * 8k interrupts per second, there is no point to rate limit and the values + * are set to zero. The rate limit values do affect latency, and so must + * be reasonably small so to not impact latency sensitive tests. + */ +static const struct ice_dim rx_profile[] = { + {2, 10}, + {8, 16}, + {32, 0}, + {96, 0}, + {128, 0} +}; + +/* The transmit profile, which has the same sorts of values + * as the previous struct + */ +static const struct ice_dim tx_profile[] = { + {2, 10}, + {8, 16}, + {64, 0}, + {128, 0}, + {256, 0} +}; + +static void ice_tx_dim_work(struct work_struct *work) +{ + struct ice_ring_container *rc; + struct ice_q_vector *q_vector; + struct dim *dim; + u16 itr, intrl; + + dim = container_of(work, struct dim, work); + rc = container_of(dim, struct ice_ring_container, dim); + q_vector = container_of(rc, struct ice_q_vector, tx); + + if (dim->profile_ix >= ARRAY_SIZE(tx_profile)) + dim->profile_ix = ARRAY_SIZE(tx_profile) - 1; + + /* look up the values in our local table */ + itr = tx_profile[dim->profile_ix].itr; + intrl = tx_profile[dim->profile_ix].intrl; + + ice_write_itr(rc, itr); + ice_write_intrl(q_vector, intrl); + + dim->state = DIM_START_MEASURE; +} + +static void ice_rx_dim_work(struct work_struct *work) +{ + struct ice_ring_container *rc; + struct ice_q_vector *q_vector; + struct dim *dim; + u16 itr, intrl; + + dim = container_of(work, struct dim, work); + rc = container_of(dim, struct ice_ring_container, dim); + q_vector = container_of(rc, struct ice_q_vector, rx); + + if (dim->profile_ix >= ARRAY_SIZE(rx_profile)) + dim->profile_ix = ARRAY_SIZE(rx_profile) - 1; + + /* look up the values in our local table */ + itr = rx_profile[dim->profile_ix].itr; + intrl = rx_profile[dim->profile_ix].intrl; + + ice_write_itr(rc, itr); + ice_write_intrl(q_vector, intrl); + + dim->state = DIM_START_MEASURE; +} + /** * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured @@ -5205,6 +5309,12 @@ static void ice_napi_enable_all(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; + INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work); + q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + + INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work); + q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + if (q_vector->rx.ring || q_vector->tx.ring) napi_enable(&q_vector->napi); } @@ -5373,7 +5483,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; if (test_bit(ICE_VSI_DOWN, vsi->state) || - test_bit(__ICE_CFG_BUSY, pf->state)) + test_bit(ICE_CFG_BUSY, pf->state)) return; /* get stats as recorded by Tx/Rx rings */ @@ -5613,6 +5723,9 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) if (q_vector->rx.ring || q_vector->tx.ring) napi_disable(&q_vector->napi); + + cancel_work_sync(&q_vector->tx.dim.work); + cancel_work_sync(&q_vector->rx.dim.work); } } @@ -5625,7 +5738,7 @@ int ice_down(struct ice_vsi *vsi) int i, tx_err, rx_err, link_err = 0; /* Caller of this function is expected to set the - * vsi->state __ICE_DOWN bit + * vsi->state ICE_DOWN bit */ if (vsi->netdev) { netif_carrier_off(vsi->netdev); @@ -5973,7 +6086,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) enum ice_status ret; int err; - if (test_bit(__ICE_DOWN, pf->state)) + if (test_bit(ICE_DOWN, pf->state)) goto clear_recovery; dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); @@ -6089,7 +6202,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_replay_post(hw); /* if we get here, reset flow is successful */ - clear_bit(__ICE_RESET_FAILED, pf->state); + clear_bit(ICE_RESET_FAILED, pf->state); return; err_vsi_rebuild: @@ -6097,10 +6210,10 @@ err_sched_init_port: ice_sched_cleanup_all(hw); err_init_ctrlq: ice_shutdown_all_ctrlq(hw); - set_bit(__ICE_RESET_FAILED, pf->state); + set_bit(ICE_RESET_FAILED, pf->state); clear_recovery: /* set this bit in PF state to control service task scheduling */ - set_bit(__ICE_NEEDS_RESTART, pf->state); + set_bit(ICE_NEEDS_RESTART, pf->state); dev_err(dev, "Rebuild failed, unload and reload driver\n"); } @@ -6622,19 +6735,19 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) switch (pf->tx_timeout_recovery_level) { case 1: - set_bit(__ICE_PFR_REQ, pf->state); + set_bit(ICE_PFR_REQ, pf->state); break; case 2: - set_bit(__ICE_CORER_REQ, pf->state); + set_bit(ICE_CORER_REQ, pf->state); break; case 3: - set_bit(__ICE_GLOBR_REQ, pf->state); + set_bit(ICE_GLOBR_REQ, pf->state); break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); - set_bit(__ICE_DOWN, pf->state); + set_bit(ICE_DOWN, pf->state); set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); - set_bit(__ICE_SERVICE_DIS, pf->state); + set_bit(ICE_SERVICE_DIS, pf->state); break; } @@ -6685,7 +6798,7 @@ int ice_open_internal(struct net_device *netdev) enum ice_status status; int err; - if (test_bit(__ICE_NEEDS_RESTART, pf->state)) { + if (test_bit(ICE_NEEDS_RESTART, pf->state)) { netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); return -EIO; } @@ -6703,7 +6816,7 @@ int ice_open_internal(struct net_device *netdev) /* Set PHY if there is media, otherwise, turn off PHY */ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); - if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) { + if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { err = ice_init_phy_user_cfg(pi); if (err) { netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 75ccbfc07f99..fee37a5844cf 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -644,6 +644,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, /* Verify that the simple checksum is zero */ for (i = 0; i < sizeof(tmp); i++) + /* cppcheck-suppress objectIndex */ sum += ((u8 *)&tmp)[i]; if (sum) { diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 97562051fe14..2f097637e405 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -988,6 +988,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, *num_nodes_added = 0; while (*num_nodes_added < num_nodes) { u16 max_child_nodes, num_added = 0; + /* cppcheck-suppress unusedVariable */ u32 temp; status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index dfdf2c1fa9d3..e2b4b29ea207 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1223,216 +1223,50 @@ construct_skb: } /** - * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic - * @port_info: port_info structure containing the current link speed - * @avg_pkt_size: average size of Tx or Rx packets based on clean routine - * @itr: ITR value to update + * ice_net_dim - Update net DIM algorithm + * @q_vector: the vector associated with the interrupt * - * Calculate how big of an increment should be applied to the ITR value passed - * in based on wmem_default, SKB overhead, ethernet overhead, and the current - * link speed. + * Create a DIM sample and notify net_dim() so that it can possibly decide + * a new ITR value based on incoming packets, bytes, and interrupts. * - * The following is a calculation derived from: - * wmem_default / (size + overhead) = desired_pkts_per_int - * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate - * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value - * - * Assuming wmem_default is 212992 and overhead is 640 bytes per - * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the - * formula down to: - * - * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 - * ITR = -------------------------------------------- * -------------- - * rate pkt_size + 640 + * This function is a no-op if the ring is not configured to dynamic ITR. */ -static unsigned int -ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, - unsigned int avg_pkt_size, - unsigned int itr) +static void ice_net_dim(struct ice_q_vector *q_vector) { - switch (port_info->phy.link_info.link_speed) { - case ICE_AQ_LINK_SPEED_100GB: - itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - case ICE_AQ_LINK_SPEED_50GB: - itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - case ICE_AQ_LINK_SPEED_40GB: - itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - case ICE_AQ_LINK_SPEED_25GB: - itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - case ICE_AQ_LINK_SPEED_20GB: - itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - case ICE_AQ_LINK_SPEED_10GB: - default: - itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), - avg_pkt_size + 640); - break; - } - - if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { - itr &= ICE_ITR_ADAPTIVE_LATENCY; - itr += ICE_ITR_ADAPTIVE_MAX_USECS; - } + struct ice_ring_container *tx = &q_vector->tx; + struct ice_ring_container *rx = &q_vector->rx; - return itr; -} + if (ITR_IS_DYNAMIC(tx)) { + struct dim_sample dim_sample = {}; + u64 packets = 0, bytes = 0; + struct ice_ring *ring; -/** - * ice_update_itr - update the adaptive ITR value based on statistics - * @q_vector: structure containing interrupt and ring information - * @rc: structure containing ring performance data - * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. - */ -static void -ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) -{ - unsigned long next_update = jiffies; - unsigned int packets, bytes, itr; - bool container_is_rx; + ice_for_each_ring(ring, q_vector->tx) { + packets += ring->stats.pkts; + bytes += ring->stats.bytes; + } - if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) - return; + dim_update_sample(q_vector->total_events, packets, bytes, + &dim_sample); - /* If itr_countdown is set it means we programmed an ITR within - * the last 4 interrupt cycles. This has a side effect of us - * potentially firing an early interrupt. In order to work around - * this we need to throw out any data received for a few - * interrupts following the update. - */ - if (q_vector->itr_countdown) { - itr = rc->target_itr; - goto clear_counts; + net_dim(&tx->dim, dim_sample); } - container_is_rx = (&q_vector->rx == rc); - /* For Rx we want to push the delay up and default to low latency. - * for Tx we want to pull the delay down and default to high latency. - */ - itr = container_is_rx ? - ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : - ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; - - /* If we didn't update within up to 1 - 2 jiffies we can assume - * that either packets are coming in so slow there hasn't been - * any work, or that there is so much work that NAPI is dealing - * with interrupt moderation and we don't need to do anything. - */ - if (time_after(next_update, rc->next_update)) - goto clear_counts; - - prefetch(q_vector->vsi->port_info); - - packets = rc->total_pkts; - bytes = rc->total_bytes; - - if (container_is_rx) { - /* If Rx there are 1 to 4 packets and bytes are less than - * 9000 assume insufficient data to use bulk rate limiting - * approach unless Tx is already in bulk rate limiting. We - * are likely latency driven. - */ - if (packets && packets < 4 && bytes < 9000 && - (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { - itr = ICE_ITR_ADAPTIVE_LATENCY; - goto adjust_by_size_and_speed; - } - } else if (packets < 4) { - /* If we have Tx and Rx ITR maxed and Tx ITR is running in - * bulk mode and we are receiving 4 or fewer packets just - * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so - * that the Rx can relax. - */ - if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && - (q_vector->rx.target_itr & ICE_ITR_MASK) == - ICE_ITR_ADAPTIVE_MAX_USECS) - goto clear_counts; - } else if (packets > 32) { - /* If we have processed over 32 packets in a single interrupt - * for Tx assume we need to switch over to "bulk" mode. - */ - rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; - } + if (ITR_IS_DYNAMIC(rx)) { + struct dim_sample dim_sample = {}; + u64 packets = 0, bytes = 0; + struct ice_ring *ring; - /* We have no packets to actually measure against. This means - * either one of the other queues on this vector is active or - * we are a Tx queue doing TSO with too high of an interrupt rate. - * - * Between 4 and 56 we can assume that our current interrupt delay - * is only slightly too low. As such we should increase it by a small - * fixed amount. - */ - if (packets < 56) { - itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; - if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { - itr &= ICE_ITR_ADAPTIVE_LATENCY; - itr += ICE_ITR_ADAPTIVE_MAX_USECS; + ice_for_each_ring(ring, q_vector->rx) { + packets += ring->stats.pkts; + bytes += ring->stats.bytes; } - goto clear_counts; - } - - if (packets <= 256) { - itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); - itr &= ICE_ITR_MASK; - - /* Between 56 and 112 is our "goldilocks" zone where we are - * working out "just right". Just report that our current - * ITR is good for us. - */ - if (packets <= 112) - goto clear_counts; - /* If packet count is 128 or greater we are likely looking - * at a slight overrun of the delay we want. Try halving - * our delay to see if that will cut the number of packets - * in half per interrupt. - */ - itr >>= 1; - itr &= ICE_ITR_MASK; - if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) - itr = ICE_ITR_ADAPTIVE_MIN_USECS; + dim_update_sample(q_vector->total_events, packets, bytes, + &dim_sample); - goto clear_counts; + net_dim(&rx->dim, dim_sample); } - - /* The paths below assume we are dealing with a bulk ITR since - * number of packets is greater than 256. We are just going to have - * to compute a value and try to bring the count under control, - * though for smaller packet sizes there isn't much we can do as - * NAPI polling will likely be kicking in sooner rather than later. - */ - itr = ICE_ITR_ADAPTIVE_BULK; - -adjust_by_size_and_speed: - - /* based on checks above packets cannot be 0 so division is safe */ - itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, - bytes / packets, itr); - -clear_counts: - /* write back value */ - rc->target_itr = itr; - - /* next update should occur within next jiffy */ - rc->next_update = next_update + 1; - - rc->total_bytes = 0; - rc->total_pkts = 0; } /** @@ -1456,72 +1290,46 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); } -/* The act of updating the ITR will cause it to immediately trigger. In order - * to prevent this from throwing off adaptive update statistics we defer the - * update so that it can only happen so often. So after either Tx or Rx are - * updated we make the adaptive scheme wait until either the ITR completely - * expires via the next_update expiration or we have been through at least - * 3 interrupts. - */ -#define ITR_COUNTDOWN_START 3 - /** - * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt - * @q_vector: q_vector for which ITR is being updated and interrupt enabled + * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt + * @q_vector: the vector associated with the interrupt to enable + * + * Update the net_dim() algorithm and re-enable the interrupt associated with + * this vector. + * + * If the VSI is down, the interrupt will not be re-enabled. */ static void ice_update_ena_itr(struct ice_q_vector *q_vector) { - struct ice_ring_container *tx = &q_vector->tx; - struct ice_ring_container *rx = &q_vector->rx; struct ice_vsi *vsi = q_vector->vsi; + bool wb_en = q_vector->wb_on_itr; u32 itr_val; - /* when exiting WB_ON_ITR just reset the countdown and let ITR - * resume it's normal "interrupts-enabled" path - */ - if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) - q_vector->itr_countdown = 0; - - /* This will do nothing if dynamic updates are not enabled */ - ice_update_itr(q_vector, tx); - ice_update_itr(q_vector, rx); + if (test_bit(ICE_DOWN, vsi->state)) + return; - /* This block of logic allows us to get away with only updating - * one ITR value with each interrupt. The idea is to perform a - * pseudo-lazy update with the following criteria. - * - * 1. Rx is given higher priority than Tx if both are in same state - * 2. If we must reduce an ITR that is given highest priority. - * 3. We then give priority to increasing ITR based on amount. + /* When exiting WB_ON_ITR, let ITR resume its normal + * interrupts-enabled path. */ - if (rx->target_itr < rx->current_itr) { - /* Rx ITR needs to be reduced, this is highest priority */ - itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); - rx->current_itr = rx->target_itr; - q_vector->itr_countdown = ITR_COUNTDOWN_START; - } else if ((tx->target_itr < tx->current_itr) || - ((rx->target_itr - rx->current_itr) < - (tx->target_itr - tx->current_itr))) { - /* Tx ITR needs to be reduced, this is second priority - * Tx ITR needs to be increased more than Rx, fourth priority - */ - itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); - tx->current_itr = tx->target_itr; - q_vector->itr_countdown = ITR_COUNTDOWN_START; - } else if (rx->current_itr != rx->target_itr) { - /* Rx ITR needs to be increased, third priority */ - itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); - rx->current_itr = rx->target_itr; - q_vector->itr_countdown = ITR_COUNTDOWN_START; - } else { - /* Still have to re-enable the interrupts */ - itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); - if (q_vector->itr_countdown) - q_vector->itr_countdown--; + if (wb_en) + q_vector->wb_on_itr = false; + + /* This will do nothing if dynamic updates are not enabled. */ + ice_net_dim(q_vector); + + /* net_dim() updates ITR out-of-band using a work item */ + itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); + /* trigger an immediate software interrupt when exiting + * busy poll, to make sure to catch any pending cleanups + * that might have been missed due to interrupt state + * transition. + */ + if (wb_en) { + itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | + GLINT_DYN_CTL_SW_ITR_INDX_M | + GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; } - - if (!test_bit(ICE_VSI_DOWN, vsi->state)) - wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); } /** @@ -1543,7 +1351,7 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) struct ice_vsi *vsi = q_vector->vsi; /* already in wb_on_itr mode no need to change it */ - if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) + if (q_vector->wb_on_itr) return; /* use previously set ITR values for all of the ITR indices by @@ -1555,7 +1363,7 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | GLINT_DYN_CTL_WB_ON_ITR_M); - q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; + q_vector->wb_on_itr = true; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 701552d88bea..c5a92ac787d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -223,23 +223,20 @@ enum ice_rx_dtype { #define ICE_TX_ITR ICE_IDX_ITR1 #define ICE_ITR_8K 124 #define ICE_ITR_20K 50 -#define ICE_ITR_MAX 8160 -#define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC) -#define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC) -#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */ -#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC)) -#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC) +#define ICE_ITR_MAX 8160 /* 0x1FE0 */ +#define ICE_DFLT_TX_ITR ICE_ITR_20K +#define ICE_DFLT_RX_ITR ICE_ITR_20K +enum ice_dynamic_itr { + ITR_STATIC = 0, + ITR_DYNAMIC = 1 +}; + +#define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC) #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */ #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S) #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */ #define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK) -#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002 -#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002 -#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA -#define ICE_ITR_ADAPTIVE_LATENCY 0x8000 -#define ICE_ITR_ADAPTIVE_BULK 0x0000 - #define ICE_DFLT_INTRL 0 #define ICE_MAX_INTRL 236 @@ -339,17 +336,14 @@ static inline bool ice_ring_is_xdp(struct ice_ring *ring) struct ice_ring_container { /* head of linked-list of rings */ struct ice_ring *ring; - unsigned long next_update; /* jiffies value of next queue update */ - unsigned int total_bytes; /* total bytes processed this int */ - unsigned int total_pkts; /* total packets processed this int */ + struct dim dim; /* data for net_dim algorithm */ u16 itr_idx; /* index in the interrupt vector */ - u16 target_itr; /* value in usecs divided by the hw->itr_gran */ - u16 current_itr; /* value in usecs divided by the hw->itr_gran */ - /* high bit set means dynamic ITR, rest is used to store user - * readable ITR value in usecs and must be converted before programming - * to a register. + /* this matches the maximum number of ITR bits, but in usec + * values, so it is shifted left one bit (bit zero is ignored) */ - u16 itr_setting; + u16 itr_setting:13; + u16 itr_reserved:2; + u16 itr_mode:1; }; struct ice_coalesce_stored { diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 7ead1c13f16f..9b80962ff92f 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -551,10 +551,7 @@ struct ice_dcb_app_priority_table { #define ICE_TLV_STATUS_OPER 0x1 #define ICE_TLV_STATUS_SYNC 0x2 #define ICE_TLV_STATUS_ERR 0x4 -#define ICE_APP_PROT_ID_FCOE 0x8906 -#define ICE_APP_PROT_ID_ISCSI 0x0cbc #define ICE_APP_PROT_ID_ISCSI_860 0x035c -#define ICE_APP_PROT_ID_FIP 0x8914 #define ICE_APP_SEL_ETHTYPE 0x1 #define ICE_APP_SEL_TCPIP 0x2 #define ICE_CEE_APP_SEL_ETHTYPE 0x0 diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 1f4ba38b1599..eee180d8c024 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -1548,7 +1548,7 @@ static void ice_vf_fdir_timer(struct timer_list *t) ctx_done->v_opcode = ctx_irq->v_opcode; spin_unlock_irqrestore(&fdir->ctx_lock, flags); - set_bit(__ICE_FD_VF_FLUSH_CTX, pf->state); + set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); ice_service_task_schedule(pf); } @@ -1596,7 +1596,7 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, if (!ret) dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); - set_bit(__ICE_FD_VF_FLUSH_CTX, pf->state); + set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); ice_service_task_schedule(pf); } @@ -1847,7 +1847,7 @@ void ice_flush_fdir_ctx(struct ice_pf *pf) { int i; - if (!test_and_clear_bit(__ICE_FD_VF_FLUSH_CTX, pf->state)) + if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) return; ice_for_each_vf(pf, i) { diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index e68d52a6b11d..e38d4adc5b8d 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -371,7 +371,7 @@ void ice_free_vfs(struct ice_pf *pf) if (!pf->vf) return; - while (test_and_set_bit(__ICE_VF_DIS, pf->state)) + while (test_and_set_bit(ICE_VF_DIS, pf->state)) usleep_range(1000, 2000); /* Disable IOV before freeing resources. This lets any VF drivers @@ -424,7 +424,7 @@ void ice_free_vfs(struct ice_pf *pf) wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } } - clear_bit(__ICE_VF_DIS, pf->state); + clear_bit(ICE_VF_DIS, pf->state); clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); } @@ -1258,7 +1258,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) return false; /* If VFs have been disabled, there is no need to reset */ - if (test_and_set_bit(__ICE_VF_DIS, pf->state)) + if (test_and_set_bit(ICE_VF_DIS, pf->state)) return false; /* Begin reset on all VFs at once */ @@ -1314,7 +1314,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) } ice_flush(hw); - clear_bit(__ICE_VF_DIS, pf->state); + clear_bit(ICE_VF_DIS, pf->state); return true; } @@ -1334,7 +1334,7 @@ static bool ice_is_vf_disabled(struct ice_vf *vf) * means something else is resetting the VF, so we shouldn't continue. * Otherwise, set disable VF state bit for actual reset, and continue. */ - return (test_bit(__ICE_VF_DIS, pf->state) || + return (test_bit(ICE_VF_DIS, pf->state) || test_bit(ICE_VF_STATE_DIS, vf->vf_states)); } @@ -1359,7 +1359,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) dev = ice_pf_to_dev(pf); - if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { + if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", vf->vf_id); return true; @@ -1651,7 +1651,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) /* Disable global interrupt 0 so we don't try to handle the VFLR. */ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); - set_bit(__ICE_OICR_INTR_DIS, pf->state); + set_bit(ICE_OICR_INTR_DIS, pf->state); ice_flush(hw); ret = pci_enable_sriov(pf->pdev, num_vfs); @@ -1679,7 +1679,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) goto err_unroll_sriov; } - clear_bit(__ICE_VF_DIS, pf->state); + clear_bit(ICE_VF_DIS, pf->state); return 0; err_unroll_sriov: @@ -1691,7 +1691,7 @@ err_pci_disable_sriov: err_unroll_intr: /* rearm interrupts here */ ice_irq_dynamic_ena(hw, NULL, NULL); - clear_bit(__ICE_OICR_INTR_DIS, pf->state); + clear_bit(ICE_OICR_INTR_DIS, pf->state); return ret; } @@ -1809,7 +1809,7 @@ void ice_process_vflr_event(struct ice_pf *pf) unsigned int vf_id; u32 reg; - if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || + if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || !pf->num_alloc_vfs) return; @@ -4194,7 +4194,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) int i; /* check that there are pending MDD events to print */ - if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state)) + if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) return; /* VF MDD event logs are rate limited to one second intervals */ @@ -4234,7 +4234,6 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) */ void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { - struct pci_dev *vfdev; u16 vf_id; int pos; @@ -4243,6 +4242,8 @@ void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); if (pos) { + struct pci_dev *vfdev; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); vfdev = pci_get_device(pdev->vendor, vf_id, NULL); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 17ab8ef024ad..faa7b8d96adb 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -108,9 +108,6 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) ice_cfg_itr(hw, q_vector); - wr32(hw, GLINT_RATE(reg_idx), - ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); - ice_for_each_ring(ring, q_vector->tx) ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx, q_vector->tx.itr_idx); @@ -159,7 +156,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) rx_ring = vsi->rx_rings[q_idx]; q_vector = rx_ring->q_vector; - while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) { + while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) { timeout--; if (!timeout) return -EBUSY; @@ -249,7 +246,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) if (err) goto free_buf; - clear_bit(__ICE_CFG_BUSY, vsi->state); + clear_bit(ICE_CFG_BUSY, vsi->state); ice_qvec_toggle_napi(vsi, q_vector, true); ice_qvec_ena_irq(vsi, q_vector); @@ -758,7 +755,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, struct ice_vsi *vsi = np->vsi; struct ice_ring *ring; - if (test_bit(__ICE_DOWN, vsi->state)) + if (test_bit(ICE_DOWN, vsi->state)) return -ENETDOWN; if (!ice_is_xdp_ena_vsi(vsi)) diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index d2e2c50ce257..ca5429774994 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -340,10 +340,10 @@ #define I210_RXPBSIZE_PB_32KB 0x00000020 #define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ #define I210_TXPBSIZE_MASK 0xC0FFFFFF -#define I210_TXPBSIZE_PB0_8KB (8 << 0) -#define I210_TXPBSIZE_PB1_8KB (8 << 6) -#define I210_TXPBSIZE_PB2_4KB (4 << 12) -#define I210_TXPBSIZE_PB3_4KB (4 << 18) +#define I210_TXPBSIZE_PB0_6KB (6 << 0) +#define I210_TXPBSIZE_PB1_6KB (6 << 6) +#define I210_TXPBSIZE_PB2_6KB (6 << 12) +#define I210_TXPBSIZE_PB3_6KB (6 << 18) #define I210_DTXMXPKTSZ_DEFAULT 0x00000098 diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index fd8eb2f9ab9d..e63ee3cca5ea 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -484,6 +484,31 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) } /** + * igb_i21x_hw_doublecheck - double checks potential HW issue in i21X + * @hw: pointer to the HW structure + * + * Checks if multicast array is wrote correctly + * If not then rewrites again to register + **/ +static void igb_i21x_hw_doublecheck(struct e1000_hw *hw) +{ + bool is_failed; + int i; + + do { + is_failed = false; + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) { + if (array_rd32(E1000_MTA, i) != hw->mac.mta_shadow[i]) { + is_failed = true; + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + break; + } + } + } while (is_failed); +} + +/** * igb_update_mc_addr_list - Update Multicast addresses * @hw: pointer to the HW structure * @mc_addr_list: array of multicast addresses to program @@ -516,6 +541,8 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); wrfl(); + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + igb_i21x_hw_doublecheck(hw); } /** diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index c9e8c65a3cfe..038a9fd1af44 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1921,8 +1921,8 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter) */ val = rd32(E1000_TXPBS); val &= ~I210_TXPBSIZE_MASK; - val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB | - I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB; + val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB | + I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB; wr32(E1000_TXPBS, val); val = rd32(E1000_RXPBS); diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 91493a73355d..25871351730b 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -28,6 +28,11 @@ void igc_ethtool_set_ops(struct net_device *); #define MAX_ETYPE_FILTER 8 #define IGC_RETA_SIZE 128 +/* SDP support */ +#define IGC_N_EXTTS 2 +#define IGC_N_PEROUT 2 +#define IGC_N_SDP 4 + enum igc_mac_filter_type { IGC_MAC_FILTER_TYPE_DST = 0, IGC_MAC_FILTER_TYPE_SRC @@ -223,6 +228,14 @@ struct igc_adapter { char fw_version[32]; struct bpf_prog *xdp_prog; + + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGC_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGC_N_PEROUT]; }; void igc_up(struct igc_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 35ed997af075..0103dda32f39 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -8,6 +8,8 @@ #define REQ_TX_DESCRIPTOR_MULTIPLE 8 #define REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ #define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ /* Definitions for power management and wakeup registers */ @@ -96,6 +98,9 @@ #define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ #define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ + /* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ #define MAX_JUMBO_FRAME_SIZE 0x2600 @@ -403,6 +408,64 @@ #define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ #define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ +/* Timer selection bits */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for auxiliary time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for target time stamp */ + +/* TSAUXC Configuration Bits */ +#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define IGC_TSAUXC_DISABLE1 BIT(27) /* Disable SYSTIM0 Count Operation. */ +#define IGC_TSAUXC_DISABLE2 BIT(28) /* Disable SYSTIM1 Count Operation. */ +#define IGC_TSAUXC_DISABLE3 BIT(29) /* Disable SYSTIM2 Count Operation. */ +#define IGC_TSAUXC_DIS_TS_CLEAR BIT(30) /* Disable EN_TT0/1 auto clear. */ +#define IGC_TSAUXC_DISABLE0 BIT(31) /* Disable SYSTIM0 Count Operation. */ + +/* SDP Configuration Bits */ +#define IGC_AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define IGC_AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define IGC_AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define IGC_AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define IGC_TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define IGC_TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define IGC_TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define IGC_TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define IGC_TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define IGC_TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define IGC_TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define IGC_TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + /* Transmit Scheduling */ #define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 #define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 8722294ab90c..9722449d7633 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -65,6 +65,8 @@ static const struct igc_stats igc_gstrings_stats[] = { IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + IGC_STAT("tx_lpi_counter", stats.tlpic), + IGC_STAT("rx_lpi_counter", stats.rlpic), }; #define IGC_NETDEV_STAT(_net_stat) { \ diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index cc83bb5c15e8..b2ef9fde97b3 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -229,10 +229,11 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || words == 0) { hw_dbg("nvm parameter(s) out of bounds\n"); - goto out; + return ret_val; } for (i = 0; i < words; i++) { + ret_val = -IGC_ERR_NVM; eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | (data[i] << IGC_NVM_RW_REG_DATA) | IGC_NVM_RW_REG_START; @@ -254,7 +255,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, } } -out: return ret_val; } diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 10765491e357..069471b7ffb0 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -4250,9 +4250,20 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev, static void igc_tsync_interrupt(struct igc_adapter *adapter) { + u32 ack, tsauxc, sec, nsec, tsicr; struct igc_hw *hw = &adapter->hw; - u32 tsicr = rd32(IGC_TSICR); - u32 ack = 0; + struct ptp_clock_event event; + struct timespec64 ts; + + tsicr = rd32(IGC_TSICR); + ack = 0; + + if (tsicr & IGC_TSICR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_SYS_WRAP; + } if (tsicr & IGC_TSICR_TXTS) { /* retrieve hardware timestamp */ @@ -4260,6 +4271,54 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter) ack |= IGC_TSICR_TXTS; } + if (tsicr & IGC_TSICR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT0; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT0; + } + + if (tsicr & IGC_TSICR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT1; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT1; + } + + if (tsicr & IGC_TSICR_AUTT0) { + nsec = rd32(IGC_AUXSTMPL0); + sec = rd32(IGC_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT0; + } + + if (tsicr & IGC_TSICR_AUTT1) { + nsec = rd32(IGC_AUXSTMPL1); + sec = rd32(IGC_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT1; + } + /* acknowledge the interrupts */ wr32(IGC_TSICR, ack); } diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index dfa3b247fcd8..69617d2c1be2 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -120,12 +120,289 @@ static int igc_ptp_settime_i225(struct ptp_clock_info *ptp, return 0; } +static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGC_N_SDP] = { + IGC_CTRL_SDP0_DIR, + IGC_CTRL_SDP1_DIR, + IGC_CTRL_EXT_SDP2_DIR, + IGC_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0, + IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0, + }; + static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1, + IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1, + }; + static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0, + IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0, + }; + static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin]) + tssdp &= ~IGC_AUX0_TS_SDP_EN; + + if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin]) + tssdp &= ~IGC_AUX1_TS_SDP_EN; + + tssdp &= ~igc_ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_fc1[pin]; + else + tssdp |= igc_ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_tt1[pin]; + else + tssdp |= igc_ts_sdp_sel_tt0[pin]; + } + tssdp |= igc_ts_sdp_en[pin]; + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~igc_ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~IGC_AUX1_SEL_SDP3; + tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN; + } else { + tssdp &= ~IGC_AUX0_SEL_SDP3; + tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN; + } + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { + struct igc_adapter *igc = + container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = IGC_TSAUXC_EN_TS1; + tsim_mask = IGC_TSICR_AUTT1; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TS0; + tsim_mask = IGC_TSICR_AUTT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (on) { + igc_pin_extts(igc, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && (ns <= 70000000LL || ns == 125000000LL || + ns == 250000000LL || ns == 500000000LL)) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK1; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT1; + tsim_mask = IGC_TSICR_TT1; + } + trgttiml = IGC_TRGTTIML1; + trgttimh = IGC_TRGTTIMH1; + freqout = IGC_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK0; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT0; + tsim_mask = IGC_TSICR_TT0; + } + trgttiml = IGC_TRGTTIML0; + trgttimh = IGC_TRGTTIMH0; + freqout = IGC_FREQOUT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1); + tsim &= ~IGC_TSICR_TT1; + } else { + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0); + tsim &= ~IGC_TSICR_TT0; + } + if (on) { + int i = rq->perout.index; + + igc_pin_perout(igc, i, pin, use_freq); + igc->perout[i].start.tv_sec = rq->perout.start.sec; + igc->perout[i].start.tv_nsec = rq->perout.start.nsec; + igc->perout[i].period.tv_sec = ts.tv_sec; + igc->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, rq->perout.start.sec); + /* For now, always select timer 0 as source. */ + wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsim = rd32(IGC_TSIM); + if (on) + tsim |= IGC_TSICR_SYS_WRAP; + else + tsim &= ~IGC_TSICR_SYS_WRAP; + igc->pps_sys_wrap_on = on; + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + default: + break; + } + return -EOPNOTSUPP; } +static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + /** * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp * @adapter: board private structure @@ -486,9 +763,17 @@ void igc_ptp_init(struct igc_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct igc_hw *hw = &adapter->hw; + int i; switch (hw->mac.type) { case igc_i225: + for (i = 0; i < IGC_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 62499999; @@ -497,6 +782,12 @@ void igc_ptp_init(struct igc_adapter *adapter) adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; adapter->ptp_caps.settime64 = igc_ptp_settime_i225; adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; + adapter->ptp_caps.n_per_out = IGC_N_PEROUT; + adapter->ptp_caps.n_pins = IGC_N_SDP; + adapter->ptp_caps.verify = igc_ptp_verify_pin; break; default: adapter->ptp_clock = NULL; @@ -598,7 +889,9 @@ void igc_ptp_reset(struct igc_adapter *adapter) case igc_i225: wr32(IGC_TSAUXC, 0x0); wr32(IGC_TSSDP, 0x0); - wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS); + wr32(IGC_TSIM, + IGC_TSICR_INTERRUPTS | + (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); wr32(IGC_IMS, IGC_IMS_TS); break; default: diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 3e5cb7aef9da..cc174853554b 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -192,6 +192,16 @@ #define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ #define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ #define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define IGC_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define IGC_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define IGC_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ #define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ #define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index c00332d2e02a..72e6ebffea33 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -361,7 +361,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) } #ifdef IXGBE_FCOE - /* Reprogam FCoE hardware offloads when the traffic class + /* Reprogram FCoE hardware offloads when the traffic class * FCoE is using changes. This happens if the APP info * changes or the up2tc mapping is updated. */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7ba1c2985ef7..c5ec17d19c59 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6536,6 +6536,13 @@ err_setup_tx: return err; } +static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring) +{ + struct ixgbe_q_vector *q_vector = rx_ring->q_vector; + + return q_vector ? q_vector->napi.napi_id : 0; +} + /** * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: pointer to ixgbe_adapter @@ -6583,7 +6590,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, - rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0) + rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0) goto err; rx_ring->xdp_prog = adapter->xdp_prog; @@ -6892,6 +6899,11 @@ static int __maybe_unused ixgbe_resume(struct device *dev_d) adapter->hw.hw_addr = adapter->io_addr; + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); pci_set_master(pdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 73bc170d1ae9..24aa97f993ca 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -380,6 +380,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case X557_PHY_ID2: phy_type = ixgbe_phy_x550em_ext_t; break; + case BCM54616S_E_PHY_ID: + phy_type = ixgbe_phy_ext_1g_t; + break; default: phy_type = ixgbe_phy_unknown; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 2be1c4c72435..2647937f7f4d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1407,6 +1407,7 @@ struct ixgbe_nvm_version { #define QT2022_PHY_ID 0x0043A400 #define ATH_PHY_ID 0x03429050 #define AQ_FW_REV 0x20 +#define BCM54616S_E_PHY_ID 0x03625D10 /* Special PHY Init Routine */ #define IXGBE_PHY_INIT_OFFSET_NL 0x002B @@ -3383,10 +3384,6 @@ struct ixgbe_hw_stats { /* forward declaration */ struct ixgbe_hw; -/* iterator type for walking multicast address lists */ -typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, - u32 *vmdq); - /* Function pointer table */ struct ixgbe_eeprom_operations { s32 (*init_params)(struct ixgbe_hw *); diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index d1e9e306653b..1d8209df4162 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -16,9 +16,6 @@ struct ixgbe_hw; -/* iterator type for walking multicast address lists */ -typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, - u32 *vmdq); struct ixgbe_mac_operations { s32 (*init_hw)(struct ixgbe_hw *); s32 (*reset_hw)(struct ixgbe_hw *); diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 0f8ef8f1232c..41c2ad210bc9 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -435,7 +435,6 @@ static int xrx200_probe(struct platform_device *pdev) struct resource *res; struct xrx200_priv *priv; struct net_device *net_dev; - const u8 *mac; int err; /* alloc the network device */ @@ -477,10 +476,8 @@ static int xrx200_probe(struct platform_device *pdev) return PTR_ERR(priv->clk); } - mac = of_get_mac_address(np); - if (!IS_ERR(mac)) - ether_addr_copy(net_dev->dev_addr, mac); - else + err = of_get_mac_address(np, net_dev->dev_addr); + if (err) eth_hw_addr_random(net_dev); /* bring up the dma engine and IP core */ diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index ca1681aa951a..d207bfcaf31d 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2702,7 +2702,6 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, struct platform_device *ppdev; struct mv643xx_eth_platform_data ppd; struct resource res; - const char *mac_addr; int ret; int dev_num = 0; @@ -2733,9 +2732,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, return -EINVAL; } - mac_addr = of_get_mac_address(pnp); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ppd.mac_addr, mac_addr); + of_get_mac_address(pnp, ppd.mac_addr); mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index f20dfd1d7a6b..7d5cd9bc6c99 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -5141,7 +5141,6 @@ static int mvneta_probe(struct platform_device *pdev) struct net_device *dev; struct phylink *phylink; struct phy *comphy; - const char *dt_mac_addr; char hw_mac_addr[ETH_ALEN]; phy_interface_t phy_mode; const char *mac_from; @@ -5237,10 +5236,9 @@ static int mvneta_probe(struct platform_device *pdev) goto err_free_ports; } - dt_mac_addr = of_get_mac_address(dn); - if (!IS_ERR(dt_mac_addr)) { + err = of_get_mac_address(dn, dev->dev_addr); + if (!err) { mac_from = "device tree"; - ether_addr_copy(dev->dev_addr, dt_mac_addr); } else { mvneta_get_mac_addr(pp, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 4812cdb4609e..7cc7d72d761e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -918,9 +918,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - /* Set L4 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct iphdr) - 4, + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); @@ -1335,7 +1334,7 @@ static void mvpp2_prs_vid_init(struct mvpp2 *priv) static int mvpp2_prs_etype_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; - int tid; + int tid, ihl; /* Ethertype: PPPoE */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, @@ -1427,67 +1426,43 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) MVPP2_PRS_RI_UDF3_MASK); mvpp2_prs_hw_write(priv, &pe); - /* Ethertype: IPv4 without options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, - MVPP2_PRS_IPV4_HEAD_MASK | - MVPP2_PRS_IPV4_IHL_MASK); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + - sizeof(struct iphdr) - 4, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: IPv4 with options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; + /* Ethertype: IPv4 with header length >= 5 */ + for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) { + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD, - MVPP2_PRS_IPV4_HEAD_MASK); + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; - /* Clear ri before updating */ - pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | ihl, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + MVPP2_ETH_TYPE_LEN + (ihl * 4), + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + } /* Ethertype: IPv6 without options */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, @@ -1674,7 +1649,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) pe.index = tid; mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD | + MVPP2_PRS_IPV4_IHL_MIN, MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK); @@ -1788,9 +1764,8 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv) mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - /* Set L4 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct iphdr) - 4, + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index c16e5b9947bd..5ce5907be591 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -28,7 +28,8 @@ #define MVPP2_PRS_IPV4_MC 0xe0 #define MVPP2_PRS_IPV4_MC_MASK 0xf0 #define MVPP2_PRS_IPV4_BC_MASK 0xff -#define MVPP2_PRS_IPV4_IHL 0x5 +#define MVPP2_PRS_IPV4_IHL_MIN 0x5 +#define MVPP2_PRS_IPV4_IHL_MAX 0xf #define MVPP2_PRS_IPV4_IHL_MASK 0xf #define MVPP2_PRS_IPV6_MC 0xff #define MVPP2_PRS_IPV6_MC_MASK 0xff diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 25dd903a3e92..f08c420a5803 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -456,20 +456,17 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw) { struct device_node *base_mac_np; struct device_node *np; - const char *base_mac; + int ret; np = of_find_compatible_node(NULL, NULL, "marvell,prestera"); base_mac_np = of_parse_phandle(np, "base-mac-provider", 0); - base_mac = of_get_mac_address(base_mac_np); - of_node_put(base_mac_np); - if (!IS_ERR(base_mac)) - ether_addr_copy(sw->base_mac, base_mac); - - if (!is_valid_ether_addr(sw->base_mac)) { + ret = of_get_mac_address(base_mac_np, sw->base_mac); + if (ret) { eth_random_addr(sw->base_mac); dev_info(prestera_dev(sw), "using random base mac address\n"); } + of_node_put(base_mac_np); return prestera_hw_switch_mac_set(sw, sw->base_mac); } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c index 49e052273f30..cb564890a3dc 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -798,7 +798,7 @@ static void prestera_fdb_event_work(struct work_struct *work) switch (swdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &swdev_work->fdb_info; - if (!fdb_info->added_by_user) + if (!fdb_info->added_by_user || fdb_info->is_local) break; err = prestera_port_fdb_set(port, fdb_info, true); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3712e1786091..e967867828d8 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1392,7 +1392,6 @@ static int pxa168_eth_probe(struct platform_device *pdev) struct resource *res; struct clk *clk; struct device_node *np; - const unsigned char *mac_addr = NULL; int err; printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); @@ -1435,12 +1434,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); - if (pdev->dev.of_node) - mac_addr = of_get_mac_address(pdev->dev.of_node); - - if (!IS_ERR_OR_NULL(mac_addr)) { - ether_addr_copy(dev->dev_addr, mac_addr); - } else { + err = of_get_mac_address(pdev->dev.of_node, dev->dev_addr); + if (err) { /* try reading the mac address, if set by the bootloader */ pxa168_eth_get_mac_address(dev, dev->dev_addr); if (!is_valid_ether_addr(dev->dev_addr)) { diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 68c154d715d6..222c32367b2c 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4728,7 +4728,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, { struct sky2_port *sky2; struct net_device *dev = alloc_etherdev(sizeof(*sky2)); - const void *iap; + int ret; if (!dev) return NULL; @@ -4798,10 +4798,8 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, * 1) from device tree data * 2) from internal registers set by bootloader */ - iap = of_get_mac_address(hw->pdev->dev.of_node); - if (!IS_ERR(iap)) - ether_addr_copy(dev->dev_addr, iap); - else + ret = of_get_mac_address(hw->pdev->dev.of_node, dev->dev_addr); + if (ret) memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 810def064f11..6b00c12c6c43 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2484,14 +2484,11 @@ static int __init mtk_init(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - const char *mac_addr; - - mac_addr = of_get_mac_address(mac->of_node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(dev->dev_addr, mac_addr); + int ret; - /* If the mac address is invalid, use random mac address */ - if (!is_valid_ether_addr(dev->dev_addr)) { + ret = of_get_mac_address(mac->of_node, dev->dev_addr); + if (ret) { + /* If the mac address is invalid, use random mac address */ eth_hw_addr_random(dev); dev_err(eth->dev, "generated random MAC address %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 71e1ccea6e72..3ad10c793308 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -2,9 +2,8 @@ /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ #include <linux/kernel.h> -#include <linux/jiffies.h> -#include <linux/delay.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include "mtk_ppe.h" @@ -44,18 +43,17 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val) static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) { - unsigned long timeout = jiffies + HZ; - - while (time_is_before_jiffies(timeout)) { - if (!(ppe_r32(ppe, MTK_PPE_GLO_CFG) & MTK_PPE_GLO_CFG_BUSY)) - return 0; + int ret; + u32 val; - usleep_range(10, 20); - } + ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val, + !(val & MTK_PPE_GLO_CFG_BUSY), + 20, MTK_PPE_WAIT_TIMEOUT_US); - dev_err(ppe->dev, "PPE table busy"); + if (ret) + dev_err(ppe->dev, "PPE table busy"); - return -ETIMEDOUT; + return ret; } static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index 51bd5e75bbbd..242fb8f2ae65 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -12,6 +12,7 @@ #define MTK_PPE_ENTRIES_SHIFT 3 #define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT) #define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1) +#define MTK_PPE_WAIT_TIMEOUT_US 1000000 #define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0) #define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8) diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 4975106fbc42..f47f319f3ae0 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -43,7 +43,7 @@ struct mtk_flow_entry { static const struct rhashtable_params mtk_flow_ht_params = { .head_offset = offsetof(struct mtk_flow_entry, node), - .head_offset = offsetof(struct mtk_flow_entry, cookie), + .key_offset = offsetof(struct mtk_flow_entry, cookie), .key_len = sizeof(unsigned long), .automatic_shrinking = true, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 8bde58379ac6..a1223e904190 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -50,7 +50,7 @@ mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o # Core extra # mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \ - ecpf.o rdma.o + ecpf.o rdma.o esw/legacy.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \ esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \ esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 38c7c44fe883..44c458443428 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -246,6 +246,11 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink, struct mlx5_devlink_trap *dl_trap; int err = 0; + if (is_mdev_switchdev_mode(dev)) { + NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode"); + return -EOPNOTSUPP; + } + dl_trap = mlx5_find_trap_by_id(dev, trap->id); if (!dl_trap) { mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id); @@ -456,6 +461,50 @@ static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id return 0; } + +static int mlx5_devlink_esw_port_metadata_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (!MLX5_ESWITCH_MANAGER(dev)) + return -EOPNOTSUPP; + + return mlx5_esw_offloads_vport_metadata_set(dev->priv.eswitch, ctx->val.vbool); +} + +static int mlx5_devlink_esw_port_metadata_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (!MLX5_ESWITCH_MANAGER(dev)) + return -EOPNOTSUPP; + + ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch); + return 0; +} + +static int mlx5_devlink_esw_port_metadata_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + u8 esw_mode; + + if (!MLX5_ESWITCH_MANAGER(dev)) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported"); + return -EOPNOTSUPP; + } + esw_mode = mlx5_eswitch_mode(dev); + if (esw_mode == MLX5_ESWITCH_OFFLOADS) { + NL_SET_ERR_MSG_MOD(extack, + "E-Switch must either disabled or non switchdev mode"); + return -EBUSY; + } + return 0; +} + #endif static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id, @@ -490,6 +539,12 @@ static const struct devlink_param mlx5_devlink_params[] = { BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL, mlx5_devlink_large_group_num_validate), + DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA, + "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + mlx5_devlink_esw_port_metadata_get, + mlx5_devlink_esw_port_metadata_set, + mlx5_devlink_esw_port_metadata_validate), #endif DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME), mlx5_devlink_enable_remote_dev_reset_get, @@ -519,6 +574,18 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, value); + + if (MLX5_ESWITCH_MANAGER(dev)) { + if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) { + dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; + value.vbool = true; + } else { + value.vbool = false; + } + devlink_param_driverinit_value_set(devlink, + MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA, + value); + } #endif } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h index eff107dad922..7318d44b774b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h @@ -10,6 +10,7 @@ enum mlx5_devlink_param_id { MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, + MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA, }; struct mlx5_trap_ctx { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e1c51eabe8fe..b636d63358d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -325,9 +325,9 @@ enum { MLX5E_SQ_STATE_RECOVERING, MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_AM, - MLX5E_SQ_STATE_TLS, MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, MLX5E_SQ_STATE_PENDING_XSK_TX, + MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, }; struct mlx5e_tx_mpwqe { @@ -500,6 +500,8 @@ struct mlx5e_xdpsq { struct mlx5e_channel *channel; } ____cacheline_aligned_in_smp; +struct mlx5e_ktls_resync_resp; + struct mlx5e_icosq { /* data path */ u16 cc; @@ -519,6 +521,7 @@ struct mlx5e_icosq { u32 sqn; u16 reserved_room; unsigned long state; + struct mlx5e_ktls_resync_resp *ktls_resync; /* control path */ struct mlx5_wq_ctrl wq_ctrl; @@ -1015,10 +1018,10 @@ int fn##_ctx(struct mlx5e_priv *priv, void *context) \ return fn(priv); \ } int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); -int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, - struct mlx5e_channels *new_chs, - mlx5e_fp_preactivate preactivate, - void *context); +int mlx5e_safe_switch_params(struct mlx5e_priv *priv, + struct mlx5e_params *new_params, + mlx5e_fp_preactivate preactivate, + void *context, bool reset); int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv); int mlx5e_num_channels_changed(struct mlx5e_priv *priv); int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 7b2b52e75222..f6ba568e00be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -621,6 +621,9 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev, mlx5e_build_sq_param_common(mdev, param); param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */ + param->is_tls = mlx5_accel_is_ktls_rx(mdev); + if (param->is_tls) + param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); MLX5_SET(wq, wq, log_wq_sz, log_wq_size); mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 602e41a2bddd..fcc51ec6084e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -30,6 +30,7 @@ struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; bool is_mpw; + bool is_tls; u16 stop_room; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 308fd279669e..89510cac46c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode { *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \ } while (0) -#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ - do { \ - unsigned long policy_long; \ - u16 *__policy = &(policy); \ - bool _write = (write); \ - \ - policy_long = *__policy; \ - if (_write && *__policy) \ - *__policy = find_first_bit(&policy_long, \ - sizeof(policy_long) * BITS_PER_BYTE);\ - MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ - if (!_write && *__policy) \ - *__policy = 1 << *__policy; \ - } while (0) - /* get/set FEC admin field for a given speed */ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write, enum mlx5e_fec_supported_link_mode link_mode) @@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write, MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g); break; case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X: - MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x); + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g_1x); break; case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X: - MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x); + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_2x); break; case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X: - MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x); + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_4x); break; case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X: - MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x); + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x); break; default: return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 72e7dd6d78c0..d907c1acd4d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -792,6 +792,9 @@ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set) if (!priv->profile->rx_ptp_support) return 0; + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return 0; + if (set) { if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) { netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index f9fdf3606bbd..0eb125316fe2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -323,10 +323,12 @@ static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq, struct mlx5e_priv *priv = rq->priv; struct mlx5e_params *params; u32 rq_stride, rq_sz; + bool real_time; int err; params = &priv->channels.params; rq_sz = mlx5e_rqwq_get_size(rq); + real_time = mlx5_is_real_time_rq(priv->mdev); rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL)); err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); @@ -345,6 +347,10 @@ static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq, if (err) return err; + err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); + if (err) + return err; + err = mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 1a0505bd1e9a..9d361efd5ff7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -257,12 +257,14 @@ mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *txqsq) { u32 sq_stride, sq_sz; + bool real_time; int err; err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); if (err) return err; + real_time = mlx5_is_real_time_sq(txqsq->mdev); sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq); sq_stride = MLX5_SEND_WQE_BB; @@ -274,6 +276,10 @@ mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg, if (err) return err; + err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); + if (err) + return err; + err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index cc0efac7b812..00af0b831a28 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -123,11 +123,10 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, mlx5e_udp_gso_handle_tx_skb(skb); #ifdef CONFIG_MLX5_EN_TLS - if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) { - /* May send SKBs and WQEs. */ + /* May send SKBs and WQEs. */ + if (mlx5e_tls_skb_offloaded(skb)) if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls))) return false; - } #endif #ifdef CONFIG_MLX5_EN_IPSEC @@ -186,7 +185,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq, struct mlx5_wqe_inline_seg *inlseg) { #ifdef CONFIG_MLX5_EN_TLS - mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls); + mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls); #endif #ifdef CONFIG_MLX5_EN_IPSEC diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index baa58b62e8df..aaa579bf9a39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -12,6 +12,9 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv); int mlx5e_ktls_init_rx(struct mlx5e_priv *priv); void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv); int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable); +struct mlx5e_ktls_resync_resp * +mlx5e_ktls_rx_resync_create_resp_list(void); +void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list); #else static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) @@ -33,6 +36,14 @@ static inline int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enab return -EOPNOTSUPP; } +static inline struct mlx5e_ktls_resync_resp * +mlx5e_ktls_rx_resync_create_resp_list(void) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void +mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {} #endif #endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 8c0f78c09215..4e58fade7a60 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -56,6 +56,7 @@ struct mlx5e_ktls_offload_context_rx { /* resync */ struct mlx5e_ktls_rx_resync_ctx resync; + struct list_head list; }; static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx) @@ -72,6 +73,32 @@ static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx refcount_inc(&priv_rx->resync.refcnt); } +struct mlx5e_ktls_resync_resp { + /* protects list changes */ + spinlock_t lock; + struct list_head list; +}; + +void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) +{ + kvfree(resp_list); +} + +struct mlx5e_ktls_resync_resp * +mlx5e_ktls_rx_resync_create_resp_list(void) +{ + struct mlx5e_ktls_resync_resp *resp_list; + + resp_list = kvzalloc(sizeof(*resp_list), GFP_KERNEL); + if (!resp_list) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&resp_list->list); + spin_lock_init(&resp_list->lock); + + return resp_list; +} + static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn) { int err, inlen; @@ -119,8 +146,7 @@ out: complete(&priv_rx->add_ctx); } -static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv, - struct sock *sk) +static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv) { INIT_WORK(&rule->work, accel_rule_handle_work); rule->priv = priv; @@ -359,33 +385,32 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, /* Function can be called with the refcount being either elevated or not. * It does not affect the refcount. */ -static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, - struct mlx5e_channel *c) +static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, + struct mlx5e_channel *c) { struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info; - struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5e_ktls_resync_resp *ktls_resync; struct mlx5e_icosq *sq; - int err; + bool trigger_poll; memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); - err = 0; sq = &c->async_icosq; - spin_lock_bh(&c->async_icosq_lock); + ktls_resync = sq->ktls_resync; - cseg = post_static_params(sq, priv_rx); - if (IS_ERR(cseg)) { - priv_rx->rq_stats->tls_resync_res_skip++; - err = PTR_ERR(cseg); - goto unlock; - } - /* Do not increment priv_rx refcnt, CQE handling is empty */ - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); - priv_rx->rq_stats->tls_resync_res_ok++; -unlock: - spin_unlock_bh(&c->async_icosq_lock); + spin_lock_bh(&ktls_resync->lock); + list_add_tail(&priv_rx->list, &ktls_resync->list); + trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + spin_unlock_bh(&ktls_resync->lock); - return err; + if (!trigger_poll) + return; + + if (!napi_if_scheduled_mark_missed(&c->napi)) { + spin_lock_bh(&c->async_icosq_lock); + mlx5e_trigger_irq(sq); + spin_unlock_bh(&c->async_icosq_lock); + } } /* Function can be called with the refcount being either elevated or not. @@ -618,7 +643,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, init_completion(&priv_rx->add_ctx); - accel_rule_init(&priv_rx->rule, priv, sk); + accel_rule_init(&priv_rx->rule, priv); resync = &priv_rx->resync; resync_init(resync, priv); tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core; @@ -676,3 +701,65 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) */ mlx5e_ktls_priv_rx_put(priv_rx); } + +bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) +{ + struct mlx5e_ktls_offload_context_rx *priv_rx, *tmp; + struct mlx5e_ktls_resync_resp *ktls_resync; + struct mlx5_wqe_ctrl_seg *db_cseg; + struct mlx5e_icosq *sq; + LIST_HEAD(local_list); + int i, j; + + sq = &c->async_icosq; + + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) + return false; + + ktls_resync = sq->ktls_resync; + db_cseg = NULL; + i = 0; + + spin_lock(&ktls_resync->lock); + list_for_each_entry_safe(priv_rx, tmp, &ktls_resync->list, list) { + list_move(&priv_rx->list, &local_list); + if (++i == budget) + break; + } + if (list_empty(&ktls_resync->list)) + clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + spin_unlock(&ktls_resync->lock); + + spin_lock(&c->async_icosq_lock); + for (j = 0; j < i; j++) { + struct mlx5_wqe_ctrl_seg *cseg; + + priv_rx = list_first_entry(&local_list, + struct mlx5e_ktls_offload_context_rx, + list); + cseg = post_static_params(sq, priv_rx); + if (IS_ERR(cseg)) + break; + list_del(&priv_rx->list); + db_cseg = cseg; + } + if (db_cseg) + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg); + spin_unlock(&c->async_icosq_lock); + + priv_rx->rq_stats->tls_resync_res_ok += j; + + if (!list_empty(&local_list)) { + /* This happens only if ICOSQ is full. + * There is no need to mark busy or explicitly ask for a NAPI cycle, + * it will be triggered by the outstanding ICOSQ completions. + */ + spin_lock(&ktls_resync->lock); + list_splice(&local_list, &ktls_resync->list); + set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + spin_unlock(&ktls_resync->lock); + priv_rx->rq_stats->tls_resync_res_retry++; + } + + return i == budget; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index ee04e916fa21..8f79335057dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -40,6 +40,14 @@ mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, } return false; } + +bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget); + +static inline bool +mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget) +{ + return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state); +} #else static inline bool mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, @@ -49,6 +57,18 @@ mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, return false; } +static inline bool +mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) +{ + return false; +} + +static inline bool +mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget) +{ + return false; +} + #endif /* CONFIG_MLX5_EN_TLS */ #endif /* __MLX5E_TLS_TXRX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 2b51d3222ca1..82dc09aaa7fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -263,9 +263,6 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, int datalen; u32 skb_seq; - if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) - return true; - datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (!datalen) return true; @@ -301,12 +298,6 @@ err_out: return false; } -void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, - struct mlx5e_accel_tx_tls_state *state) -{ - cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); -} - static int tls_update_resync_sn(struct net_device *netdev, struct sk_buff *skb, struct mlx5e_tls_metadata *mdata) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h index 9923132c9440..0ca0a023fb8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -47,8 +47,18 @@ u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state); -void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, - struct mlx5e_accel_tx_tls_state *state); + +static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb) +{ + return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); +} + +static inline void +mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, + struct mlx5e_accel_tx_tls_state *state) +{ + cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); +} void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, u32 *cqe_bcnt); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index d5b1eb74d5e5..5cd466ec6492 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -392,11 +392,11 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) { struct arfs_rule *arfs_rule; struct hlist_node *htmp; + HLIST_HEAD(del_list); int quota = 0; int i; int j; - HLIST_HEAD(del_list); spin_lock_bh(&priv->fs.arfs->arfs_lock); mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) { if (!work_pending(&arfs_rule->arfs_work) && @@ -422,10 +422,10 @@ static void arfs_del_rules(struct mlx5e_priv *priv) { struct hlist_node *htmp; struct arfs_rule *rule; + HLIST_HEAD(del_list); int i; int j; - HLIST_HEAD(del_list); spin_lock_bh(&priv->fs.arfs->arfs_lock); mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) { hlist_del_init(&rule->hlist); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index f23c67575073..a4c8d8d00d5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1149,35 +1149,23 @@ static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context) static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) { - struct mlx5e_channels new_channels = {}; - bool reset_channels = true; - bool opened; - int err = 0; + struct mlx5e_params new_params; + bool reset = true; + int err; mutex_lock(&priv->state_lock); - new_channels.params = priv->channels.params; - mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params, + new_params = priv->channels.params; + mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params, trust_state); - opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (!opened) - reset_channels = false; - /* Skip if tx_min_inline is the same */ - if (new_channels.params.tx_min_inline_mode == - priv->channels.params.tx_min_inline_mode) - reset_channels = false; - - if (reset_channels) { - err = mlx5e_safe_switch_channels(priv, &new_channels, - mlx5e_update_trust_state_hw, - &trust_state); - } else { - err = mlx5e_update_trust_state_hw(priv, &trust_state); - if (!err && !opened) - priv->channels.params = new_channels.params; - } + if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode) + reset = false; + + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_update_trust_state_hw, + &trust_state, reset); mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c8057a44d5ab..8360289813f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -326,7 +326,7 @@ static void mlx5e_get_ringparam(struct net_device *dev, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param) { - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; u8 log_rq_size; u8 log_sq_size; int err = 0; @@ -365,20 +365,15 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, mutex_lock(&priv->state_lock); - new_channels.params = priv->channels.params; - new_channels.params.log_rq_mtu_frames = log_rq_size; - new_channels.params.log_sq_size = log_sq_size; + new_params = priv->channels.params; + new_params.log_rq_mtu_frames = log_rq_size; + new_params.log_sq_size = log_sq_size; - err = mlx5e_validate_params(priv->mdev, &new_channels.params); + err = mlx5e_validate_params(priv->mdev, &new_params); if (err) goto unlock; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - goto unlock; - } - - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); unlock: mutex_unlock(&priv->state_lock); @@ -423,8 +418,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, { struct mlx5e_params *cur_params = &priv->channels.params; unsigned int count = ch->combined_count; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; bool arfs_enabled; + bool opened; int err = 0; if (!count) { @@ -459,28 +455,18 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, goto out; } - new_channels.params = *cur_params; - new_channels.params.num_channels = count; - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - struct mlx5e_params old_params; - - old_params = *cur_params; - *cur_params = new_channels.params; - err = mlx5e_num_channels_changed(priv); - if (err) - *cur_params = old_params; + new_params = *cur_params; + new_params.num_channels = count; - goto out; - } + opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - arfs_enabled = priv->netdev->features & NETIF_F_NTUPLE; + arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE); if (arfs_enabled) mlx5e_arfs_disable(priv); /* Switch to new channels, set new parameters and close old ones */ - err = mlx5e_safe_switch_channels(priv, &new_channels, - mlx5e_num_channels_changed_ctx, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_num_channels_changed_ctx, NULL, true); if (arfs_enabled) { int err2 = mlx5e_arfs_enable(priv); @@ -575,8 +561,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, { struct dim_cq_moder *rx_moder, *tx_moder; struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; bool reset_rx, reset_tx; + bool reset = true; int err = 0; if (!MLX5_CAP_GEN(mdev, cq_moderation)) @@ -597,51 +584,47 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, } mutex_lock(&priv->state_lock); - new_channels.params = priv->channels.params; + new_params = priv->channels.params; - rx_moder = &new_channels.params.rx_cq_moderation; + rx_moder = &new_params.rx_cq_moderation; rx_moder->usec = coal->rx_coalesce_usecs; rx_moder->pkts = coal->rx_max_coalesced_frames; - new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; - tx_moder = &new_channels.params.tx_cq_moderation; + tx_moder = &new_params.tx_cq_moderation; tx_moder->usec = coal->tx_coalesce_usecs; tx_moder->pkts = coal->tx_max_coalesced_frames; - new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; + new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; if (reset_rx) { - u8 mode = MLX5E_GET_PFLAG(&new_channels.params, + u8 mode = MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER); - mlx5e_reset_rx_moderation(&new_channels.params, mode); + mlx5e_reset_rx_moderation(&new_params, mode); } if (reset_tx) { - u8 mode = MLX5E_GET_PFLAG(&new_channels.params, + u8 mode = MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER); - mlx5e_reset_tx_moderation(&new_channels.params, mode); - } - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - goto out; + mlx5e_reset_tx_moderation(&new_params, mode); } - if (!reset_rx && !reset_tx) { + /* If DIM state hasn't changed, it's possible to modify interrupt + * moderation parameters on the fly, even if the channels are open. + */ + if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (!coal->use_adaptive_rx_coalesce) mlx5e_set_priv_channels_rx_coalesce(priv, coal); if (!coal->use_adaptive_tx_coalesce) mlx5e_set_priv_channels_tx_coalesce(priv, coal); - priv->channels.params = new_channels.params; - goto out; + reset = false; } - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); -out: mutex_unlock(&priv->state_lock); return err; } @@ -1602,6 +1585,14 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return mlx5_set_port_wol(mdev, mlx5_wol_mode); } +static void mlx5e_get_fec_stats(struct net_device *netdev, + struct ethtool_fec_stats *fec_stats) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_stats_fec_get(priv, fec_stats); +} + static int mlx5e_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) { @@ -1852,7 +1843,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; bool mode_changed; u8 cq_period_mode, current_cq_period_mode; @@ -1871,18 +1862,13 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, if (!mode_changed) return 0; - new_channels.params = priv->channels.params; + new_params = priv->channels.params; if (is_rx_cq) - mlx5e_set_rx_cq_mode_params(&new_channels.params, cq_period_mode); + mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); else - mlx5e_set_tx_cq_mode_params(&new_channels.params, cq_period_mode); - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - return 0; - } + mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); - return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); } static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable) @@ -1898,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val) { bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS); - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; int err = 0; if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) @@ -1907,21 +1893,16 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val if (curr_val == new_val) return 0; - new_channels.params = priv->channels.params; - MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); + new_params = priv->channels.params; + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) - new_channels.params.ptp_rx = new_val; - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - return 0; - } + new_params.ptp_rx = new_val; - if (new_channels.params.ptp_rx == priv->channels.params.ptp_rx) - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + if (new_params.ptp_rx == priv->channels.params.ptp_rx) + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); else - err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_ptp_rx_manage_fs_ctx, - &new_channels.params.ptp_rx); + err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx, + &new_params.ptp_rx, true); if (err) return err; @@ -1955,7 +1936,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; if (enable) { if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) @@ -1967,17 +1948,12 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return -EINVAL; } - new_channels.params = priv->channels.params; - - MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_STRIDING_RQ, enable); - mlx5e_set_rq_type(mdev, &new_channels.params); + new_params = priv->channels.params; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - return 0; - } + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable); + mlx5e_set_rq_type(mdev, &new_params); - return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); } static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) @@ -2006,23 +1982,16 @@ static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool e { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; - int err; + struct mlx5e_params new_params; if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)) return -EOPNOTSUPP; - new_channels.params = priv->channels.params; - - MLX5E_SET_PFLAG(&new_channels.params, flag, enable); + new_params = priv->channels.params; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - return 0; - } + MLX5E_SET_PFLAG(&new_params, flag, enable); - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); - return err; + return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); } static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable) @@ -2039,7 +2008,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; int err; if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) @@ -2055,27 +2024,15 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) return -EINVAL; } - new_channels.params = priv->channels.params; - MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable); + new_params = priv->channels.params; + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable); /* No need to verify SQ stop room as * ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both * has the same log_sq_size. */ - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - struct mlx5e_params old_params; - - old_params = priv->channels.params; - priv->channels.params = new_channels.params; - err = mlx5e_num_channels_changed(priv); - if (err) - priv->channels.params = old_params; - goto out; - } - - err = mlx5e_safe_switch_channels(priv, &new_channels, - mlx5e_num_channels_changed_ctx, NULL); -out: + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_num_channels_changed_ctx, NULL, true); if (!err) priv->tx_ptp_opened = true; @@ -2168,12 +2125,216 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return mlx5e_ethtool_set_rxnfc(dev, cmd); } +static int query_port_status_opcode(struct mlx5_core_dev *mdev, u32 *status_opcode) +{ + struct mlx5_ifc_pddr_troubleshooting_page_bits *pddr_troubleshooting_page; + u32 in[MLX5_ST_SZ_DW(pddr_reg)] = {}; + u32 out[MLX5_ST_SZ_DW(pddr_reg)]; + int err; + + MLX5_SET(pddr_reg, in, local_port, 1); + MLX5_SET(pddr_reg, in, page_select, + MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE); + + pddr_troubleshooting_page = MLX5_ADDR_OF(pddr_reg, in, page_data); + MLX5_SET(pddr_troubleshooting_page, pddr_troubleshooting_page, + group_opcode, MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR); + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PDDR, 0, 0); + if (err) + return err; + + pddr_troubleshooting_page = MLX5_ADDR_OF(pddr_reg, out, page_data); + *status_opcode = MLX5_GET(pddr_troubleshooting_page, pddr_troubleshooting_page, + status_opcode); + return 0; +} + +struct mlx5e_ethtool_link_ext_state_opcode_mapping { + u32 status_opcode; + enum ethtool_link_ext_state link_ext_state; + u8 link_ext_substate; +}; + +static const struct mlx5e_ethtool_link_ext_state_opcode_mapping +mlx5e_link_ext_state_opcode_map[] = { + /* States relating to the autonegotiation or issues therein */ + {2, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED}, + {3, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED}, + {4, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED}, + {36, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE}, + {38, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE}, + {39, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD}, + + /* Failure during link training */ + {5, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED}, + {6, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT}, + {7, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY}, + {8, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, 0}, + {14, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT}, + + /* Logical mismatch in physical coding sublayer or forward error correction sublayer */ + {9, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK}, + {10, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK}, + {11, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS}, + {12, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED}, + {13, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED}, + + /* Signal integrity issues */ + {15, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, 0}, + {17, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS}, + {42, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE}, + + /* No cable connected */ + {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0}, + + /* Failure is related to cable, e.g., unsupported cable */ + {16, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {20, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {29, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {1029, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {1031, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, 0}, + + /* Failure is related to EEPROM, e.g., failure during reading or parsing the data */ + {1027, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0}, + + /* Failure during calibration algorithm */ + {23, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, 0}, + + /* The hardware is not able to provide the power required from cable or module */ + {1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0}, + + /* The module is overheated */ + {1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0}, +}; + +static void +mlx5e_set_link_ext_state(struct mlx5e_ethtool_link_ext_state_opcode_mapping + link_ext_state_mapping, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + switch (link_ext_state_mapping.link_ext_state) { + case ETHTOOL_LINK_EXT_STATE_AUTONEG: + link_ext_state_info->autoneg = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE: + link_ext_state_info->link_training = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH: + link_ext_state_info->link_logical_mismatch = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY: + link_ext_state_info->bad_signal_integrity = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE: + link_ext_state_info->cable_issue = + link_ext_state_mapping.link_ext_substate; + break; + default: + break; + } + + link_ext_state_info->link_ext_state = link_ext_state_mapping.link_ext_state; +} + +static int +mlx5e_get_link_ext_state(struct net_device *dev, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + struct mlx5e_ethtool_link_ext_state_opcode_mapping link_ext_state_mapping; + struct mlx5e_priv *priv = netdev_priv(dev); + u32 status_opcode = 0; + int i; + + /* Exit without data if the interface state is OK, since no extended data is + * available in such case + */ + if (netif_carrier_ok(dev)) + return -ENODATA; + + if (query_port_status_opcode(priv->mdev, &status_opcode) || + !status_opcode) + return -ENODATA; + + for (i = 0; i < ARRAY_SIZE(mlx5e_link_ext_state_opcode_map); i++) { + link_ext_state_mapping = mlx5e_link_ext_state_opcode_map[i]; + if (link_ext_state_mapping.status_opcode == status_opcode) { + mlx5e_set_link_ext_state(link_ext_state_mapping, + link_ext_state_info); + return 0; + } + } + + return -ENODATA; +} + +static void mlx5e_get_eth_phy_stats(struct net_device *netdev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_stats_eth_phy_get(priv, phy_stats); +} + +static void mlx5e_get_eth_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_stats_eth_mac_get(priv, mac_stats); +} + +static void mlx5e_get_eth_ctrl_stats(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_stats_eth_ctrl_get(priv, ctrl_stats); +} + +static void mlx5e_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_stats_rmon_get(priv, rmon_stats, ranges); +} + const struct ethtool_ops mlx5e_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE, .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, + .get_link_ext_state = mlx5e_get_link_ext_state, .get_strings = mlx5e_get_strings, .get_sset_count = mlx5e_get_sset_count, .get_ethtool_stats = mlx5e_get_ethtool_stats, @@ -2209,6 +2370,11 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .self_test = mlx5e_self_test, .get_msglevel = mlx5e_get_msglevel, .set_msglevel = mlx5e_set_msglevel, + .get_fec_stats = mlx5e_get_fec_stats, .get_fecparam = mlx5e_get_fecparam, .set_fecparam = mlx5e_set_fecparam, + .get_eth_phy_stats = mlx5e_get_eth_phy_stats, + .get_eth_mac_stats = mlx5e_get_eth_mac_stats, + .get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats, + .get_rmon_stats = mlx5e_get_rmon_stats, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2f47608bb9b9..feb347e81448 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -510,8 +510,9 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, rq->page_pool = NULL; goto err_free_by_rq_type; } - err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, - MEM_TYPE_PAGE_POOL, rq->page_pool); + if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, + MEM_TYPE_PAGE_POOL, rq->page_pool); } if (err) goto err_free_by_rq_type; @@ -1133,8 +1134,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); - if (mlx5_accel_is_tls_device(c->priv->mdev)) - set_bit(MLX5E_SQ_STATE_TLS, &sq->state); if (param->is_mpw) set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); sq->stop_room = param->stop_room; @@ -1410,8 +1409,17 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, if (err) goto err_free_icosq; + if (param->is_tls) { + sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list(); + if (IS_ERR(sq->ktls_resync)) { + err = PTR_ERR(sq->ktls_resync); + goto err_destroy_icosq; + } + } return 0; +err_destroy_icosq: + mlx5e_destroy_sq(c->mdev, sq->sqn); err_free_icosq: mlx5e_free_icosq(sq); @@ -1433,6 +1441,8 @@ void mlx5e_close_icosq(struct mlx5e_icosq *sq) { struct mlx5e_channel *c = sq->channel; + if (sq->ktls_resync) + mlx5e_ktls_rx_resync_destroy_resp_list(sq->ktls_resync); mlx5e_destroy_sq(c->mdev, sq->sqn); mlx5e_free_icosq_descs(sq); mlx5e_free_icosq(sq); @@ -2834,6 +2844,29 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) mlx5e_deactivate_channels(&priv->channels); } +static int mlx5e_switch_priv_params(struct mlx5e_priv *priv, + struct mlx5e_params *new_params, + mlx5e_fp_preactivate preactivate, + void *context) +{ + struct mlx5e_params old_params; + + old_params = priv->channels.params; + priv->channels.params = *new_params; + + if (preactivate) { + int err; + + err = preactivate(priv, context); + if (err) { + priv->channels.params = old_params; + return err; + } + } + + return 0; +} + static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, struct mlx5e_channels *new_chs, mlx5e_fp_preactivate preactivate, @@ -2876,35 +2909,32 @@ out: return err; } -int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, - struct mlx5e_channels *new_chs, - mlx5e_fp_preactivate preactivate, - void *context) +int mlx5e_safe_switch_params(struct mlx5e_priv *priv, + struct mlx5e_params *params, + mlx5e_fp_preactivate preactivate, + void *context, bool reset) { + struct mlx5e_channels new_chs = {}; int err; - err = mlx5e_open_channels(priv, new_chs); + reset &= test_bit(MLX5E_STATE_OPENED, &priv->state); + if (!reset) + return mlx5e_switch_priv_params(priv, params, preactivate, context); + + new_chs.params = *params; + err = mlx5e_open_channels(priv, &new_chs); if (err) return err; - - err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context); + err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context); if (err) - goto err_close; - - return 0; - -err_close: - mlx5e_close_channels(new_chs); + mlx5e_close_channels(&new_chs); return err; } int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) { - struct mlx5e_channels new_channels = {}; - - new_channels.params = priv->channels.params; - return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + return mlx5e_safe_switch_params(priv, &priv->channels.params, NULL, NULL, true); } void mlx5e_timestamp_init(struct mlx5e_priv *priv) @@ -3361,7 +3391,7 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, struct tc_mqprio_qopt *mqprio) { - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; u8 tc = mqprio->num_tc; int err = 0; @@ -3380,23 +3410,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, goto out; } - new_channels.params = priv->channels.params; - new_channels.params.num_tc = tc ? tc : 1; + new_params = priv->channels.params; + new_params.num_tc = tc ? tc : 1; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - struct mlx5e_params old_params; - - old_params = priv->channels.params; - priv->channels.params = new_channels.params; - err = mlx5e_num_channels_changed(priv); - if (err) - priv->channels.params = old_params; - - goto out; - } - - err = mlx5e_safe_switch_channels(priv, &new_channels, - mlx5e_num_channels_changed_ctx, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_num_channels_changed_ctx, NULL, true); out: priv->max_opened_tc = max_t(u8, priv->max_opened_tc, @@ -3621,10 +3639,10 @@ static int set_feature_lro(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; struct mlx5e_params *cur_params; + struct mlx5e_params new_params; + bool reset = true; int err = 0; - bool reset; mutex_lock(&priv->state_lock); @@ -3642,30 +3660,17 @@ static int set_feature_lro(struct net_device *netdev, bool enable) goto out; } - reset = test_bit(MLX5E_STATE_OPENED, &priv->state); - - new_channels.params = *cur_params; - new_channels.params.lro_en = enable; + new_params = *cur_params; + new_params.lro_en = enable; - if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { + if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) == - mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL)) + mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL)) reset = false; } - if (!reset) { - struct mlx5e_params old_params; - - old_params = *cur_params; - *cur_params = new_channels.params; - err = mlx5e_modify_tirs_lro(priv); - if (err) - *cur_params = old_params; - goto out; - } - - err = mlx5e_safe_switch_channels(priv, &new_channels, - mlx5e_modify_tirs_lro_ctx, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_modify_tirs_lro_ctx, NULL, reset); out: mutex_unlock(&priv->state_lock); return err; @@ -3894,26 +3899,23 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, mlx5e_fp_preactivate preactivate) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; struct mlx5e_params *params; + bool reset = true; int err = 0; - bool reset; mutex_lock(&priv->state_lock); params = &priv->channels.params; - reset = !params->lro_en; - reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); - - new_channels.params = *params; - new_channels.params.sw_mtu = new_mtu; - err = mlx5e_validate_params(priv->mdev, &new_channels.params); + new_params = *params; + new_params.sw_mtu = new_mtu; + err = mlx5e_validate_params(priv->mdev, &new_params); if (err) goto out; if (params->xdp_prog && - !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) { + !mlx5e_rx_is_linear_skb(&new_params, NULL)) { netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", new_mtu, mlx5e_xdp_max_mtu(params, NULL)); err = -EINVAL; @@ -3922,47 +3924,34 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, if (priv->xsk.refcnt && !mlx5e_xsk_validate_mtu(netdev, &priv->channels, - &new_channels.params, priv->mdev)) { + &new_params, priv->mdev)) { err = -EINVAL; goto out; } + if (params->lro_en) + reset = false; + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { - bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, - &new_channels.params, - NULL); + bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL); + bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, + &new_params, NULL); u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL); - u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params, NULL); - - /* If XSK is active, XSK RQs are linear. */ - is_linear |= priv->xsk.refcnt; + u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_params, NULL); - /* Always reset in linear mode - hw_mtu is used in data path. */ - reset = reset && (is_linear || (ppw_old != ppw_new)); - } - - if (!reset) { - unsigned int old_mtu = params->sw_mtu; - - params->sw_mtu = new_mtu; - if (preactivate) { - err = preactivate(priv, NULL); - if (err) { - params->sw_mtu = old_mtu; - goto out; - } - } - netdev->mtu = params->sw_mtu; - goto out; + /* Always reset in linear mode - hw_mtu is used in data path. + * Check that the mode was non-linear and didn't change. + * If XSK is active, XSK RQs are linear. + */ + if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt && + ppw_old == ppw_new) + reset = false; } - err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL); - if (err) - goto out; - - netdev->mtu = new_channels.params.sw_mtu; + err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset); out: + netdev->mtu = params->sw_mtu; mutex_unlock(&priv->state_lock); return err; } @@ -3981,7 +3970,7 @@ int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx) int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) { - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; struct hwtstamp_config config; bool rx_cqe_compress_def; int err; @@ -4003,13 +3992,13 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) } mutex_lock(&priv->state_lock); - new_channels.params = priv->channels.params; + new_params = priv->channels.params; rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def; /* RX HW timestamp */ switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: - new_channels.params.ptp_rx = false; + new_params.ptp_rx = false; break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -4026,7 +4015,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - new_channels.params.ptp_rx = rx_cqe_compress_def; + new_params.ptp_rx = rx_cqe_compress_def; config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: @@ -4034,15 +4023,11 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) return -ERANGE; } - if (new_channels.params.ptp_rx == priv->channels.params.ptp_rx) + if (new_params.ptp_rx == priv->channels.params.ptp_rx) goto out; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - goto out; - } - err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_ptp_rx_manage_fs_ctx, - &new_channels.params.ptp_rx); + err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx, + &new_params.ptp_rx, true); if (err) { mutex_unlock(&priv->state_lock); return err; @@ -4360,7 +4345,7 @@ static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue) static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) { struct net_device *netdev = priv->netdev; - struct mlx5e_channels new_channels = {}; + struct mlx5e_params new_params; if (priv->channels.params.lro_en) { netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); @@ -4373,16 +4358,16 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) return -EINVAL; } - new_channels.params = priv->channels.params; - new_channels.params.xdp_prog = prog; + new_params = priv->channels.params; + new_params.xdp_prog = prog; /* No XSK params: AF_XDP can't be enabled yet at the point of setting * the XDP program. */ - if (!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) { + if (!mlx5e_rx_is_linear_skb(&new_params, NULL)) { netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", - new_channels.params.sw_mtu, - mlx5e_xdp_max_mtu(&new_channels.params, NULL)); + new_params.sw_mtu, + mlx5e_xdp_max_mtu(&new_params, NULL)); return -EINVAL; } @@ -4402,9 +4387,10 @@ static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_params new_params; struct bpf_prog *old_prog; - bool reset, was_opened; int err = 0; + bool reset; int i; mutex_lock(&priv->state_lock); @@ -4415,46 +4401,29 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) goto unlock; } - was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); /* no need for full reset when exchanging programs */ reset = (!priv->channels.params.xdp_prog || !prog); - if (was_opened && !reset) - /* num_channels is invariant here, so we can take the - * batched reference right upfront. - */ - bpf_prog_add(prog, priv->channels.num); - - if (was_opened && reset) { - struct mlx5e_channels new_channels = {}; - - new_channels.params = priv->channels.params; - new_channels.params.xdp_prog = prog; - mlx5e_set_rq_type(priv->mdev, &new_channels.params); - old_prog = priv->channels.params.xdp_prog; + new_params = priv->channels.params; + new_params.xdp_prog = prog; + if (reset) + mlx5e_set_rq_type(priv->mdev, &new_params); + old_prog = priv->channels.params.xdp_prog; - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); - if (err) - goto unlock; - } else { - /* exchange programs, extra prog reference we got from caller - * as long as we don't fail from this point onwards. - */ - old_prog = xchg(&priv->channels.params.xdp_prog, prog); - } + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); + if (err) + goto unlock; if (old_prog) bpf_prog_put(old_prog); - if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */ - mlx5e_set_rq_type(priv->mdev, &priv->channels.params); - - if (!was_opened || reset) + if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset) goto unlock; /* exchanging programs w/o reset, we update ref counts on behalf * of the channels RQs here. */ + bpf_prog_add(prog, priv->channels.num); for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index e58ef8c713e4..34eb1118670f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -52,7 +52,7 @@ #include "diag/en_rep_tracepoint.h" #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ - max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) + max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1 static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index ae0570ea08bf..e4f5b6395148 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -184,6 +184,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) }, #endif @@ -344,6 +345,7 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; + s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry; s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; s->rx_tls_err += rq_stats->tls_err; #endif @@ -768,35 +770,112 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); } -#define MLX5E_READ_CTR64_BE_F(ptr, c) \ +#define MLX5E_READ_CTR64_BE_F(ptr, set, c) \ be64_to_cpu(*(__be64 *)((char *)ptr + \ MLX5_BYTE_OFF(ppcnt_reg, \ - counter_set.eth_802_3_cntrs_grp_data_layout.c##_high))) + counter_set.set.c##_high))) -void mlx5e_stats_pause_get(struct mlx5e_priv *priv, - struct ethtool_pause_stats *pause_stats) +static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev, + u32 *ppcnt_ieee_802_3) { - u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; - struct mlx5_core_dev *mdev = priv->mdev; u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) - return; + return -EOPNOTSUPP; MLX5_SET(ppcnt_reg, in, local_port, 1); MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3, - sz, MLX5_REG_PPCNT, 0, 0); + return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3, + sz, MLX5_REG_PPCNT, 0, 0); +} + +void mlx5e_stats_pause_get(struct mlx5e_priv *priv, + struct ethtool_pause_stats *pause_stats) +{ + u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + + if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) + return; pause_stats->tx_pause_frames = MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, a_pause_mac_ctrl_frames_transmitted); pause_stats->rx_pause_frames = MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, a_pause_mac_ctrl_frames_received); } +void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv, + struct ethtool_eth_phy_stats *phy_stats) +{ + u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + + if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) + return; + + phy_stats->SymbolErrorDuringCarrier = + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, + a_symbol_error_during_carrier); +} + +void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv, + struct ethtool_eth_mac_stats *mac_stats) +{ + u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + + if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) + return; + +#define RD(name) \ + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \ + eth_802_3_cntrs_grp_data_layout, \ + name) + + mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok); + mac_stats->FramesReceivedOK = RD(a_frames_received_ok); + mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors); + mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok); + mac_stats->OctetsReceivedOK = RD(a_octets_received_ok); + mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok); + mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok); + mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok); + mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok); + mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors); + mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field); + mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors); +#undef RD +} + +void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + + if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) + return; + + ctrl_stats->MACControlFramesTransmitted = + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, + a_mac_control_frames_transmitted); + ctrl_stats->MACControlFramesReceived = + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, + a_mac_control_frames_received); + ctrl_stats->UnsupportedOpcodesReceived = + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + eth_802_3_cntrs_grp_data_layout, + a_unsupported_opcodes_received); +} + #define PPORT_2863_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_2863_cntrs_grp_data_layout.c##_high) @@ -908,6 +987,59 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); } +static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 2047 }, + { 2048, 4095 }, + { 4096, 8191 }, + { 8192, 10239 }, + {} +}; + +void mlx5e_stats_rmon_get(struct mlx5e_priv *priv, + struct ethtool_rmon_stats *rmon, + const struct ethtool_rmon_hist_range **ranges) +{ + u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + + MLX5_SET(ppcnt_reg, in, local_port, 1); + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); + if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters, + sz, MLX5_REG_PPCNT, 0, 0)) + return; + +#define RD(name) \ + MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \ + eth_2819_cntrs_grp_data_layout, \ + name) + + rmon->undersize_pkts = RD(ether_stats_undersize_pkts); + rmon->fragments = RD(ether_stats_fragments); + rmon->jabbers = RD(ether_stats_jabbers); + + rmon->hist[0] = RD(ether_stats_pkts64octets); + rmon->hist[1] = RD(ether_stats_pkts65to127octets); + rmon->hist[2] = RD(ether_stats_pkts128to255octets); + rmon->hist[3] = RD(ether_stats_pkts256to511octets); + rmon->hist[4] = RD(ether_stats_pkts512to1023octets); + rmon->hist[5] = RD(ether_stats_pkts1024to1518octets); + rmon->hist[6] = RD(ether_stats_pkts1519to2047octets); + rmon->hist[7] = RD(ether_stats_pkts2048to4095octets); + rmon->hist[8] = RD(ether_stats_pkts4096to8191octets); + rmon->hist[9] = RD(ether_stats_pkts8192to10239octets); +#undef RD + + *ranges = mlx5e_rmon_ranges; +} + #define PPORT_PHY_STATISTICAL_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.phys_layer_statistical_cntrs.c##_high) @@ -1015,6 +1147,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); } +void mlx5e_stats_fec_get(struct mlx5e_priv *priv, + struct ethtool_fec_stats *fec_stats) +{ + u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + + if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) + return; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); + if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical, + sz, MLX5_REG_PPCNT, 0, 0)) + return; + + fec_stats->corrected_bits.total = + MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical, + phys_layer_statistical_cntrs, + phy_corrected_bits); +} + #define PPORT_ETH_EXT_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_extended_cntrs_grp_data_layout.c##_high) @@ -1629,6 +1784,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) }, #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 21d3b8747f93..139e59f30db0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -114,6 +114,18 @@ void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv); void mlx5e_stats_pause_get(struct mlx5e_priv *priv, struct ethtool_pause_stats *pause_stats); +void mlx5e_stats_fec_get(struct mlx5e_priv *priv, + struct ethtool_fec_stats *fec_stats); + +void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv, + struct ethtool_eth_phy_stats *phy_stats); +void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv, + struct ethtool_eth_mac_stats *mac_stats); +void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv, + struct ethtool_eth_ctrl_stats *ctrl_stats); +void mlx5e_stats_rmon_get(struct mlx5e_priv *priv, + struct ethtool_rmon_stats *rmon, + const struct ethtool_rmon_hist_range **ranges); /* Concrete NIC Stats */ @@ -207,6 +219,7 @@ struct mlx5e_sw_stats { u64 rx_tls_resync_req_end; u64 rx_tls_resync_req_skip; u64 rx_tls_resync_res_ok; + u64 rx_tls_resync_res_retry; u64 rx_tls_resync_res_skip; u64 rx_tls_err; #endif @@ -337,6 +350,7 @@ struct mlx5e_rq_stats { u64 tls_resync_req_end; u64 tls_resync_req_skip; u64 tls_resync_res_ok; + u64 tls_resync_res_retry; u64 tls_resync_res_skip; u64 tls_err; #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index d157d1b9cad6..47a9c49b25fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1942,6 +1942,9 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev, return 0; flow_rule_match_meta(rule, &match); + if (!match.mask->ingress_ifindex) + return 0; + if (match.mask->ingress_ifindex != 0xFFFFFFFF) { NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index d54da3797c30..833be29170a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -36,6 +36,7 @@ #include "en/xdp.h" #include "en/xsk/rx.h" #include "en/xsk/tx.h" +#include "en_accel/ktls_txrx.h" static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) { @@ -171,6 +172,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) */ clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state); + /* Keep after async ICOSQ CQ poll */ + if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget))) + busy |= mlx5e_ktls_rx_handle_resync_list(c, budget); + busy |= INDIRECT_CALL_2(rq->post_wqes, mlx5e_post_rx_mpwqes, mlx5e_post_rx_wqes, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c new file mode 100644 index 000000000000..8ab1224653a4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -0,0 +1,509 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021 Mellanox Technologies Ltd */ + +#include <linux/etherdevice.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/mlx5_ifc.h> +#include <linux/mlx5/vport.h> +#include <linux/mlx5/fs.h> +#include "esw/acl/lgcy.h" +#include "esw/legacy.h" +#include "mlx5_core.h" +#include "eswitch.h" +#include "fs_core.h" + +enum { + LEGACY_VEPA_PRIO = 0, + LEGACY_FDB_PRIO, +}; + +static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb; + int err; + + root_ns = mlx5_get_fdb_sub_ns(dev, 0); + if (!root_ns) { + esw_warn(dev, "Failed to get FDB flow namespace\n"); + return -EOPNOTSUPP; + } + + /* num FTE 2, num FG 2 */ + ft_attr.prio = LEGACY_VEPA_PRIO; + ft_attr.max_fte = 2; + ft_attr.autogroup.max_num_groups = 2; + fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create VEPA FDB err %d\n", err); + return err; + } + esw->fdb_table.legacy.vepa_fdb = fdb; + + return 0; +} + +static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) +{ + esw_debug(esw->dev, "Destroy FDB Table\n"); + if (!esw->fdb_table.legacy.fdb) + return; + + if (esw->fdb_table.legacy.promisc_grp) + mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); + if (esw->fdb_table.legacy.allmulti_grp) + mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); + if (esw->fdb_table.legacy.addr_grp) + mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); + mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); + + esw->fdb_table.legacy.fdb = NULL; + esw->fdb_table.legacy.addr_grp = NULL; + esw->fdb_table.legacy.allmulti_grp = NULL; + esw->fdb_table.legacy.promisc_grp = NULL; + atomic64_set(&esw->user_count, 0); +} + +static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb; + struct mlx5_flow_group *g; + void *match_criteria; + int table_size; + u32 *flow_group_in; + u8 *dmac; + int err = 0; + + esw_debug(dev, "Create FDB log_max_size(%d)\n", + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + + root_ns = mlx5_get_fdb_sub_ns(dev, 0); + if (!root_ns) { + esw_warn(dev, "Failed to get FDB flow namespace\n"); + return -EOPNOTSUPP; + } + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + ft_attr.max_fte = table_size; + ft_attr.prio = LEGACY_FDB_PRIO; + fdb = mlx5_create_flow_table(root_ns, &ft_attr); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create FDB Table err %d\n", err); + goto out; + } + esw->fdb_table.legacy.fdb = fdb; + + /* Addresses group : Full match unicast/multicast addresses */ + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + /* Preserve 2 entries for allmulti and promisc rules*/ + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); + eth_broadcast_addr(dmac); + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create flow group err(%d)\n", err); + goto out; + } + esw->fdb_table.legacy.addr_grp = g; + + /* Allmulti group : One rule that forwards any mcast traffic */ + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2); + eth_zero_addr(dmac); + dmac[0] = 0x01; + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); + goto out; + } + esw->fdb_table.legacy.allmulti_grp = g; + + /* Promiscuous group : + * One rule that forward all unmatched traffic from previous groups + */ + eth_zero_addr(dmac); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); + goto out; + } + esw->fdb_table.legacy.promisc_grp = g; + +out: + if (err) + esw_destroy_legacy_fdb_table(esw); + + kvfree(flow_group_in); + return err; +} + +static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw) +{ + esw_debug(esw->dev, "Destroy VEPA Table\n"); + if (!esw->fdb_table.legacy.vepa_fdb) + return; + + mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb); + esw->fdb_table.legacy.vepa_fdb = NULL; +} + +static int esw_create_legacy_table(struct mlx5_eswitch *esw) +{ + int err; + + memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); + atomic64_set(&esw->user_count, 0); + + err = esw_create_legacy_vepa_table(esw); + if (err) + return err; + + err = esw_create_legacy_fdb_table(esw); + if (err) + esw_destroy_legacy_vepa_table(esw); + + return err; +} + +static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw) +{ + if (esw->fdb_table.legacy.vepa_uplink_rule) + mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule); + + if (esw->fdb_table.legacy.vepa_star_rule) + mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule); + + esw->fdb_table.legacy.vepa_uplink_rule = NULL; + esw->fdb_table.legacy.vepa_star_rule = NULL; +} + +static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) +{ + esw_cleanup_vepa_rules(esw); + esw_destroy_legacy_fdb_table(esw); + esw_destroy_legacy_vepa_table(esw); +} + +#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ + MLX5_VPORT_MC_ADDR_CHANGE | \ + MLX5_VPORT_PROMISC_CHANGE) + +int esw_legacy_enable(struct mlx5_eswitch *esw) +{ + struct mlx5_vport *vport; + int ret, i; + + ret = esw_create_legacy_table(esw); + if (ret) + return ret; + + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; + + ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); + if (ret) + esw_destroy_legacy_table(esw); + return ret; +} + +void esw_legacy_disable(struct mlx5_eswitch *esw) +{ + struct esw_mc_addr *mc_promisc; + + mlx5_eswitch_disable_pf_vf_vports(esw); + + mc_promisc = &esw->mc_promisc; + if (mc_promisc->uplink_rule) + mlx5_del_flow_rules(mc_promisc->uplink_rule); + + esw_destroy_legacy_table(esw); +} + +static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, + u8 setting) +{ + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *flow_rule; + struct mlx5_flow_spec *spec; + int err = 0; + void *misc; + + if (!setting) { + esw_cleanup_vepa_rules(esw); + return 0; + } + + if (esw->fdb_table.legacy.vepa_uplink_rule) + return 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + /* Uplink rule forward uplink traffic to FDB */ + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = esw->fdb_table.legacy.fdb; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec, + &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + goto out; + } else { + esw->fdb_table.legacy.vepa_uplink_rule = flow_rule; + } + + /* Star rule to forward all traffic to uplink vport */ + memset(&dest, 0, sizeof(dest)); + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport.num = MLX5_VPORT_UPLINK; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL, + &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + goto out; + } else { + esw->fdb_table.legacy.vepa_star_rule = flow_rule; + } + +out: + kvfree(spec); + if (err) + esw_cleanup_vepa_rules(esw); + return err; +} + +int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting) +{ + int err = 0; + + if (!esw) + return -EOPNOTSUPP; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + + mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto out; + } + + err = _mlx5_eswitch_set_vepa_locked(esw, setting); + +out: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) +{ + if (!esw) + return -EOPNOTSUPP; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + + if (esw->mode != MLX5_ESWITCH_LEGACY) + return -EOPNOTSUPP; + + *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; + return 0; +} + +int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + int ret; + + /* Only non manager vports need ACL in legacy mode */ + if (mlx5_esw_is_manager_vport(esw, vport->vport)) + return 0; + + ret = esw_acl_ingress_lgcy_setup(esw, vport); + if (ret) + goto ingress_err; + + ret = esw_acl_egress_lgcy_setup(esw, vport); + if (ret) + goto egress_err; + + return 0; + +egress_err: + esw_acl_ingress_lgcy_cleanup(esw, vport); +ingress_err: + return ret; +} + +void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + if (mlx5_esw_is_manager_vport(esw, vport->vport)) + return; + + esw_acl_egress_lgcy_cleanup(esw, vport); + esw_acl_ingress_lgcy_cleanup(esw, vport); +} + +int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev, + struct mlx5_vport *vport, + struct mlx5_vport_drop_stats *stats) +{ + u64 rx_discard_vport_down, tx_discard_vport_down; + struct mlx5_eswitch *esw = dev->priv.eswitch; + u64 bytes = 0; + int err = 0; + + if (esw->mode != MLX5_ESWITCH_LEGACY) + return 0; + + mutex_lock(&esw->state_lock); + if (!vport->enabled) + goto unlock; + + if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) + mlx5_fc_query(dev, vport->egress.legacy.drop_counter, + &stats->rx_dropped, &bytes); + + if (vport->ingress.legacy.drop_counter) + mlx5_fc_query(dev, vport->ingress.legacy.drop_counter, + &stats->tx_dropped, &bytes); + + if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) && + !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) + goto unlock; + + err = mlx5_query_vport_down_stats(dev, vport->vport, 1, + &rx_discard_vport_down, + &tx_discard_vport_down); + if (err) + goto unlock; + + if (MLX5_CAP_GEN(dev, receive_discard_vport_down)) + stats->rx_dropped += rx_discard_vport_down; + if (MLX5_CAP_GEN(dev, transmit_discard_vport_down)) + stats->tx_dropped += tx_discard_vport_down; + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, + u16 vport, u16 vlan, u8 qos) +{ + u8 set_flags = 0; + int err = 0; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + + if (vlan || qos) + set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; + + mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + if (!vlan) + goto unlock; /* compatibility with libvirt */ + + err = -EOPNOTSUPP; + goto unlock; + } + + err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags); + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, + u16 vport, bool spoofchk) +{ + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + bool pschk; + int err = 0; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } + pschk = evport->info.spoofchk; + evport->info.spoofchk = spoofchk; + if (pschk && !is_valid_ether_addr(evport->info.mac)) + mlx5_core_warn(esw->dev, + "Spoofchk in set while MAC is invalid, vport(%d)\n", + evport->vport); + if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) + err = esw_acl_ingress_lgcy_setup(esw, evport); + if (err) + evport->info.spoofchk = pschk; + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, + u16 vport, bool setting) +{ + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + int err = 0; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } + evport->info.trusted = setting; + if (evport->enabled) + esw_vport_change_handle_locked(evport); + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h new file mode 100644 index 000000000000..e0820bb72b57 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021 Mellanox Technologies Ltd */ + +#ifndef __MLX5_ESW_LEGACY_H__ +#define __MLX5_ESW_LEGACY_H__ + +#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ + MLX5_VPORT_MC_ADDR_CHANGE | \ + MLX5_VPORT_PROMISC_CHANGE) + +struct mlx5_eswitch; + +int esw_legacy_enable(struct mlx5_eswitch *esw); +void esw_legacy_disable(struct mlx5_eswitch *esw); + +int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); + +int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev, + struct mlx5_vport *vport, + struct mlx5_vport_drop_stats *stats); +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 6cf04a366f99..1bb229ecd43b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -36,6 +36,7 @@ #include <linux/mlx5/vport.h> #include <linux/mlx5/fs.h> #include "esw/acl/lgcy.h" +#include "esw/legacy.h" #include "mlx5_core.h" #include "lib/eq.h" #include "eswitch.h" @@ -61,9 +62,6 @@ struct vport_addr { bool mc_promisc; }; -static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw); -static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw); - static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) { if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) @@ -278,226 +276,6 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport) return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); } -enum { - LEGACY_VEPA_PRIO = 0, - LEGACY_FDB_PRIO, -}; - -static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) -{ - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_core_dev *dev = esw->dev; - struct mlx5_flow_namespace *root_ns; - struct mlx5_flow_table *fdb; - int err; - - root_ns = mlx5_get_fdb_sub_ns(dev, 0); - if (!root_ns) { - esw_warn(dev, "Failed to get FDB flow namespace\n"); - return -EOPNOTSUPP; - } - - /* num FTE 2, num FG 2 */ - ft_attr.prio = LEGACY_VEPA_PRIO; - ft_attr.max_fte = 2; - ft_attr.autogroup.max_num_groups = 2; - fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); - if (IS_ERR(fdb)) { - err = PTR_ERR(fdb); - esw_warn(dev, "Failed to create VEPA FDB err %d\n", err); - return err; - } - esw->fdb_table.legacy.vepa_fdb = fdb; - - return 0; -} - -static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_core_dev *dev = esw->dev; - struct mlx5_flow_namespace *root_ns; - struct mlx5_flow_table *fdb; - struct mlx5_flow_group *g; - void *match_criteria; - int table_size; - u32 *flow_group_in; - u8 *dmac; - int err = 0; - - esw_debug(dev, "Create FDB log_max_size(%d)\n", - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - - root_ns = mlx5_get_fdb_sub_ns(dev, 0); - if (!root_ns) { - esw_warn(dev, "Failed to get FDB flow namespace\n"); - return -EOPNOTSUPP; - } - - flow_group_in = kvzalloc(inlen, GFP_KERNEL); - if (!flow_group_in) - return -ENOMEM; - - table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - ft_attr.max_fte = table_size; - ft_attr.prio = LEGACY_FDB_PRIO; - fdb = mlx5_create_flow_table(root_ns, &ft_attr); - if (IS_ERR(fdb)) { - err = PTR_ERR(fdb); - esw_warn(dev, "Failed to create FDB Table err %d\n", err); - goto out; - } - esw->fdb_table.legacy.fdb = fdb; - - /* Addresses group : Full match unicast/multicast addresses */ - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, - MLX5_MATCH_OUTER_HEADERS); - match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); - dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); - /* Preserve 2 entries for allmulti and promisc rules*/ - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); - eth_broadcast_addr(dmac); - g = mlx5_create_flow_group(fdb, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "Failed to create flow group err(%d)\n", err); - goto out; - } - esw->fdb_table.legacy.addr_grp = g; - - /* Allmulti group : One rule that forwards any mcast traffic */ - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, - MLX5_MATCH_OUTER_HEADERS); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2); - eth_zero_addr(dmac); - dmac[0] = 0x01; - g = mlx5_create_flow_group(fdb, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); - goto out; - } - esw->fdb_table.legacy.allmulti_grp = g; - - /* Promiscuous group : - * One rule that forward all unmatched traffic from previous groups - */ - eth_zero_addr(dmac); - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, - MLX5_MATCH_MISC_PARAMETERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); - g = mlx5_create_flow_group(fdb, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); - goto out; - } - esw->fdb_table.legacy.promisc_grp = g; - -out: - if (err) - esw_destroy_legacy_fdb_table(esw); - - kvfree(flow_group_in); - return err; -} - -static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw) -{ - esw_debug(esw->dev, "Destroy VEPA Table\n"); - if (!esw->fdb_table.legacy.vepa_fdb) - return; - - mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb); - esw->fdb_table.legacy.vepa_fdb = NULL; -} - -static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) -{ - esw_debug(esw->dev, "Destroy FDB Table\n"); - if (!esw->fdb_table.legacy.fdb) - return; - - if (esw->fdb_table.legacy.promisc_grp) - mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); - if (esw->fdb_table.legacy.allmulti_grp) - mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); - if (esw->fdb_table.legacy.addr_grp) - mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); - mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); - - esw->fdb_table.legacy.fdb = NULL; - esw->fdb_table.legacy.addr_grp = NULL; - esw->fdb_table.legacy.allmulti_grp = NULL; - esw->fdb_table.legacy.promisc_grp = NULL; - atomic64_set(&esw->user_count, 0); -} - -static int esw_create_legacy_table(struct mlx5_eswitch *esw) -{ - int err; - - memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); - atomic64_set(&esw->user_count, 0); - - err = esw_create_legacy_vepa_table(esw); - if (err) - return err; - - err = esw_create_legacy_fdb_table(esw); - if (err) - esw_destroy_legacy_vepa_table(esw); - - return err; -} - -static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) -{ - esw_cleanup_vepa_rules(esw); - esw_destroy_legacy_fdb_table(esw); - esw_destroy_legacy_vepa_table(esw); -} - -#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ - MLX5_VPORT_MC_ADDR_CHANGE | \ - MLX5_VPORT_PROMISC_CHANGE) - -static int esw_legacy_enable(struct mlx5_eswitch *esw) -{ - struct mlx5_vport *vport; - int ret, i; - - ret = esw_create_legacy_table(esw); - if (ret) - return ret; - - mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) - vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; - - ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); - if (ret) - esw_destroy_legacy_table(esw); - return ret; -} - -static void esw_legacy_disable(struct mlx5_eswitch *esw) -{ - struct esw_mc_addr *mc_promisc; - - mlx5_eswitch_disable_pf_vf_vports(esw); - - mc_promisc = &esw->mc_promisc; - if (mc_promisc->uplink_rule) - mlx5_del_flow_rules(mc_promisc->uplink_rule); - - esw_destroy_legacy_table(esw); -} - /* E-Switch vport UC/MC lists management */ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, struct vport_addr *vaddr); @@ -919,7 +697,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, (promisc_all || promisc_mc)); } -static void esw_vport_change_handle_locked(struct mlx5_vport *vport) +void esw_vport_change_handle_locked(struct mlx5_vport *vport) { struct mlx5_core_dev *dev = vport->dev; struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -1170,56 +948,20 @@ static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) ((u8 *)node_guid)[0] = mac[5]; } -static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - int ret; - - /* Only non manager vports need ACL in legacy mode */ - if (mlx5_esw_is_manager_vport(esw, vport->vport)) - return 0; - - ret = esw_acl_ingress_lgcy_setup(esw, vport); - if (ret) - goto ingress_err; - - ret = esw_acl_egress_lgcy_setup(esw, vport); - if (ret) - goto egress_err; - - return 0; - -egress_err: - esw_acl_ingress_lgcy_cleanup(esw, vport); -ingress_err: - return ret; -} - static int esw_vport_setup_acl(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (esw->mode == MLX5_ESWITCH_LEGACY) - return esw_vport_create_legacy_acl_tables(esw, vport); + return esw_legacy_vport_acl_setup(esw, vport); else return esw_vport_create_offloads_acl_tables(esw, vport); } -static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) - -{ - if (mlx5_esw_is_manager_vport(esw, vport->vport)) - return; - - esw_acl_egress_lgcy_cleanup(esw, vport); - esw_acl_ingress_lgcy_cleanup(esw, vport); -} - static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (esw->mode == MLX5_ESWITCH_LEGACY) - esw_vport_destroy_legacy_acl_tables(esw, vport); + esw_legacy_vport_acl_cleanup(esw, vport); else esw_vport_destroy_offloads_acl_tables(esw, vport); } @@ -1390,15 +1132,9 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) { int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out); u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {}; - u16 max_sf_vports; u32 *out; int err; - max_sf_vports = mlx5_sf_max_functions(dev); - /* Device interface is array of 64-bits */ - if (max_sf_vports) - outlen += DIV_ROUND_UP(max_sf_vports, BITS_PER_TYPE(__be64)) * sizeof(__be64); - out = kvzalloc(outlen, GFP_KERNEL); if (!out) return ERR_PTR(-ENOMEM); @@ -1449,8 +1185,6 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) } /* Public E-Switch API */ -#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) - int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, enum mlx5_eswitch_vport_event enabled_events) { @@ -1633,6 +1367,47 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode) blocking_notifier_call_chain(&esw->n_head, 0, &info); } +static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw) +{ + struct mlx5_core_dev *dev = esw->dev; + int total_vports; + int err; + + total_vports = mlx5_eswitch_get_total_vports(dev); + + if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { + err = mlx5_fs_egress_acls_init(dev, total_vports); + if (err) + return err; + } else { + esw_warn(dev, "engress ACL is not supported by FW\n"); + } + + if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { + err = mlx5_fs_ingress_acls_init(dev, total_vports); + if (err) + goto err; + } else { + esw_warn(dev, "ingress ACL is not supported by FW\n"); + } + return 0; + +err: + if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) + mlx5_fs_egress_acls_cleanup(dev); + return err; +} + +static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw) +{ + struct mlx5_core_dev *dev = esw->dev; + + if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) + mlx5_fs_ingress_acls_cleanup(dev); + if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) + mlx5_fs_egress_acls_cleanup(dev); +} + /** * mlx5_eswitch_enable_locked - Enable eswitch * @esw: Pointer to eswitch @@ -1661,14 +1436,12 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) return -EOPNOTSUPP; } - if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) - esw_warn(esw->dev, "ingress ACL is not supported by FW\n"); - - if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) - esw_warn(esw->dev, "engress ACL is not supported by FW\n"); - mlx5_eswitch_get_devlink_param(esw); + err = mlx5_esw_acls_ns_init(esw); + if (err) + return err; + mlx5_eswitch_update_num_of_vfs(esw, num_vfs); esw_create_tsar(esw); @@ -1704,6 +1477,7 @@ abort: mlx5_rescan_drivers(esw->dev); esw_destroy_tsar(esw); + mlx5_esw_acls_ns_cleanup(esw); return err; } @@ -1719,7 +1493,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { int ret; - if (!ESW_ALLOWED(esw)) + if (!mlx5_esw_allowed(esw)) return 0; down_write(&esw->mode_lock); @@ -1772,6 +1546,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) mlx5_rescan_drivers(esw->dev); esw_destroy_tsar(esw); + mlx5_esw_acls_ns_cleanup(esw); if (clear_vf) mlx5_eswitch_clear_vf_vports_info(esw); @@ -1779,7 +1554,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) { - if (!ESW_ALLOWED(esw)) + if (!mlx5_esw_allowed(esw)) return; down_write(&esw->mode_lock); @@ -1862,7 +1637,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) abort: if (esw->work_queue) destroy_workqueue(esw->work_queue); - esw_offloads_cleanup_reps(esw); kfree(esw->vports); kfree(esw); return err; @@ -1877,7 +1651,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); - esw_offloads_cleanup_reps(esw); mutex_destroy(&esw->state_lock); WARN_ON(!xa_empty(&esw->offloads.vhca_map)); xa_destroy(&esw->offloads.vhca_map); @@ -1885,6 +1658,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr); mutex_destroy(&esw->offloads.encap_tbl_lock); mutex_destroy(&esw->offloads.decap_tbl_lock); + esw_offloads_cleanup_reps(esw); kfree(esw->vports); kfree(esw); } @@ -2030,7 +1804,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int other_vport = 1; int err = 0; - if (!ESW_ALLOWED(esw)) + if (!mlx5_esw_allowed(esw)) return -EPERM; if (IS_ERR(evport)) return PTR_ERR(evport); @@ -2112,205 +1886,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, return err; } -int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, - u16 vport, u16 vlan, u8 qos) -{ - u8 set_flags = 0; - int err = 0; - - if (!ESW_ALLOWED(esw)) - return -EPERM; - - if (vlan || qos) - set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; - - mutex_lock(&esw->state_lock); - if (esw->mode != MLX5_ESWITCH_LEGACY) { - if (!vlan) - goto unlock; /* compatibility with libvirt */ - - err = -EOPNOTSUPP; - goto unlock; - } - - err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags); - -unlock: - mutex_unlock(&esw->state_lock); - return err; -} - -int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, - u16 vport, bool spoofchk) -{ - struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); - bool pschk; - int err = 0; - - if (!ESW_ALLOWED(esw)) - return -EPERM; - if (IS_ERR(evport)) - return PTR_ERR(evport); - - mutex_lock(&esw->state_lock); - if (esw->mode != MLX5_ESWITCH_LEGACY) { - err = -EOPNOTSUPP; - goto unlock; - } - pschk = evport->info.spoofchk; - evport->info.spoofchk = spoofchk; - if (pschk && !is_valid_ether_addr(evport->info.mac)) - mlx5_core_warn(esw->dev, - "Spoofchk in set while MAC is invalid, vport(%d)\n", - evport->vport); - if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) - err = esw_acl_ingress_lgcy_setup(esw, evport); - if (err) - evport->info.spoofchk = pschk; - -unlock: - mutex_unlock(&esw->state_lock); - return err; -} - -static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw) -{ - if (esw->fdb_table.legacy.vepa_uplink_rule) - mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule); - - if (esw->fdb_table.legacy.vepa_star_rule) - mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule); - - esw->fdb_table.legacy.vepa_uplink_rule = NULL; - esw->fdb_table.legacy.vepa_star_rule = NULL; -} - -static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, - u8 setting) -{ - struct mlx5_flow_destination dest = {}; - struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_handle *flow_rule; - struct mlx5_flow_spec *spec; - int err = 0; - void *misc; - - if (!setting) { - esw_cleanup_vepa_rules(esw); - return 0; - } - - if (esw->fdb_table.legacy.vepa_uplink_rule) - return 0; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return -ENOMEM; - - /* Uplink rule forward uplink traffic to FDB */ - misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); - MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); - - misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); - MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); - - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = esw->fdb_table.legacy.fdb; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec, - &flow_act, &dest, 1); - if (IS_ERR(flow_rule)) { - err = PTR_ERR(flow_rule); - goto out; - } else { - esw->fdb_table.legacy.vepa_uplink_rule = flow_rule; - } - - /* Star rule to forward all traffic to uplink vport */ - memset(&dest, 0, sizeof(dest)); - dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport.num = MLX5_VPORT_UPLINK; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL, - &flow_act, &dest, 1); - if (IS_ERR(flow_rule)) { - err = PTR_ERR(flow_rule); - goto out; - } else { - esw->fdb_table.legacy.vepa_star_rule = flow_rule; - } - -out: - kvfree(spec); - if (err) - esw_cleanup_vepa_rules(esw); - return err; -} - -int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting) -{ - int err = 0; - - if (!esw) - return -EOPNOTSUPP; - - if (!ESW_ALLOWED(esw)) - return -EPERM; - - mutex_lock(&esw->state_lock); - if (esw->mode != MLX5_ESWITCH_LEGACY) { - err = -EOPNOTSUPP; - goto out; - } - - err = _mlx5_eswitch_set_vepa_locked(esw, setting); - -out: - mutex_unlock(&esw->state_lock); - return err; -} - -int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) -{ - if (!esw) - return -EOPNOTSUPP; - - if (!ESW_ALLOWED(esw)) - return -EPERM; - - if (esw->mode != MLX5_ESWITCH_LEGACY) - return -EOPNOTSUPP; - - *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; - return 0; -} - -int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, - u16 vport, bool setting) -{ - struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); - int err = 0; - - if (!ESW_ALLOWED(esw)) - return -EPERM; - if (IS_ERR(evport)) - return PTR_ERR(evport); - - mutex_lock(&esw->state_lock); - if (esw->mode != MLX5_ESWITCH_LEGACY) { - err = -EOPNOTSUPP; - goto unlock; - } - evport->info.trusted = setting; - if (evport->enabled) - esw_vport_change_handle_locked(evport); - -unlock: - mutex_unlock(&esw->state_lock); - return err; -} - static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) { u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); @@ -2376,7 +1951,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, bool max_rate_supported; int err = 0; - if (!ESW_ALLOWED(esw)) + if (!mlx5_esw_allowed(esw)) return -EPERM; if (IS_ERR(evport)) return PTR_ERR(evport); @@ -2415,50 +1990,6 @@ unlock: return err; } -static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, - struct mlx5_vport *vport, - struct mlx5_vport_drop_stats *stats) -{ - struct mlx5_eswitch *esw = dev->priv.eswitch; - u64 rx_discard_vport_down, tx_discard_vport_down; - u64 bytes = 0; - int err = 0; - - if (esw->mode != MLX5_ESWITCH_LEGACY) - return 0; - - mutex_lock(&esw->state_lock); - if (!vport->enabled) - goto unlock; - - if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) - mlx5_fc_query(dev, vport->egress.legacy.drop_counter, - &stats->rx_dropped, &bytes); - - if (vport->ingress.legacy.drop_counter) - mlx5_fc_query(dev, vport->ingress.legacy.drop_counter, - &stats->tx_dropped, &bytes); - - if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) && - !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) - goto unlock; - - err = mlx5_query_vport_down_stats(dev, vport->vport, 1, - &rx_discard_vport_down, - &tx_discard_vport_down); - if (err) - goto unlock; - - if (MLX5_CAP_GEN(dev, receive_discard_vport_down)) - stats->rx_dropped += rx_discard_vport_down; - if (MLX5_CAP_GEN(dev, transmit_discard_vport_down)) - stats->tx_dropped += tx_discard_vport_down; - -unlock: - mutex_unlock(&esw->state_lock); - return err; -} - int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, u16 vport_num, struct ifla_vf_stats *vf_stats) @@ -2526,7 +2057,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, vf_stats->broadcast = MLX5_GET_CTR(out, received_eth_broadcast.packets); - err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats); + err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats); if (err) goto free_out; vf_stats->rx_dropped = stats.rx_dropped; @@ -2541,7 +2072,7 @@ u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) { struct mlx5_eswitch *esw = dev->priv.eswitch; - return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE; + return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE; } EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); @@ -2551,7 +2082,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) struct mlx5_eswitch *esw; esw = dev->priv.eswitch; - return ESW_ALLOWED(esw) ? esw->offloads.encap : + return mlx5_esw_allowed(esw) ? esw->offloads.encap : DEVLINK_ESWITCH_ENCAP_MODE_NONE; } EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode); @@ -2597,7 +2128,7 @@ bool mlx5_esw_hold(struct mlx5_core_dev *mdev) struct mlx5_eswitch *esw = mdev->priv.eswitch; /* e.g. VF doesn't have eswitch so nothing to do */ - if (!ESW_ALLOWED(esw)) + if (!mlx5_esw_allowed(esw)) return true; if (down_read_trylock(&esw->mode_lock) != 0) @@ -2614,7 +2145,7 @@ void mlx5_esw_release(struct mlx5_core_dev *mdev) { struct mlx5_eswitch *esw = mdev->priv.eswitch; - if (ESW_ALLOWED(esw)) + if (mlx5_esw_allowed(esw)) up_read(&esw->mode_lock); } @@ -2626,7 +2157,7 @@ void mlx5_esw_get(struct mlx5_core_dev *mdev) { struct mlx5_eswitch *esw = mdev->priv.eswitch; - if (ESW_ALLOWED(esw)) + if (mlx5_esw_allowed(esw)) atomic64_inc(&esw->user_count); } @@ -2638,7 +2169,7 @@ void mlx5_esw_put(struct mlx5_core_dev *mdev) { struct mlx5_eswitch *esw = mdev->priv.eswitch; - if (ESW_ALLOWED(esw)) + if (mlx5_esw_allowed(esw)) atomic64_dec_if_positive(&esw->user_count); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index deafb0e03787..b289d756a7e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -152,7 +152,6 @@ enum mlx5_eswitch_vport_event { struct mlx5_vport { struct mlx5_core_dev *dev; - int vport; struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; struct mlx5_flow_handle *promisc_rule; @@ -174,6 +173,7 @@ struct mlx5_vport { u32 max_rate; } qos; + u16 vport; bool enabled; enum mlx5_eswitch_vport_event enabled_events; struct devlink_port *dl_port; @@ -314,6 +314,8 @@ int esw_offloads_enable(struct mlx5_eswitch *esw); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw); +bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); +int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable); u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); @@ -519,6 +521,11 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); #define esw_debug(dev, format, ...) \ mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) +static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw) +{ + return esw && MLX5_ESWITCH_MANAGER(esw->dev); +} + /* The returned number is valid only when the dev is eswitch manager. */ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) { @@ -807,6 +814,8 @@ void mlx5_esw_put(struct mlx5_core_dev *dev); int mlx5_esw_try_lock(struct mlx5_eswitch *esw); void mlx5_esw_unlock(struct mlx5_eswitch *esw); +void esw_vport_change_handle_locked(struct mlx5_vport *vport); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index ab32f685cbb7..bbb707117296 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -986,12 +986,13 @@ static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw) static int mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) { - int num_vfs, vport_num, rule_idx = 0, err = 0; struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = {0}; + int num_vfs, rule_idx = 0, err = 0; struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle **flows; struct mlx5_flow_spec *spec; + u16 vport_num; num_vfs = esw->esw_funcs.num_vfs; flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL); @@ -2351,8 +2352,7 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); } -static bool -esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw) +bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) { if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl)) return false; @@ -2452,6 +2452,28 @@ metadata_err: return err; } +int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable) +{ + int err = 0; + + down_write(&esw->mode_lock); + if (esw->mode != MLX5_ESWITCH_NONE) { + err = -EBUSY; + goto done; + } + if (!mlx5_esw_vport_match_metadata_supported(esw)) { + err = -EOPNOTSUPP; + goto done; + } + if (enable) + esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; + else + esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; +done: + up_write(&esw->mode_lock); + return err; +} + int esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, struct mlx5_vport *vport) @@ -2673,9 +2695,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_metadata; - if (esw_check_vport_match_metadata_supported(esw)) - esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; - err = esw_offloads_metadata_init(esw); if (err) goto err_metadata; @@ -2725,7 +2744,6 @@ err_pool: err_vport_metadata: esw_offloads_metadata_uninit(esw); err_metadata: - esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; mlx5_rdma_disable_roce(esw->dev); mutex_destroy(&esw->offloads.termtbl_mutex); return err; @@ -2761,7 +2779,6 @@ void esw_offloads_disable(struct mlx5_eswitch *esw) esw_offloads_steering_cleanup(esw); mapping_destroy(esw->offloads.reg_c0_obj_pool); esw_offloads_metadata_uninit(esw); - esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; mlx5_rdma_disable_roce(esw->dev); mutex_destroy(&esw->offloads.termtbl_mutex); esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index d43a05e77f67..0bba92cf5dc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -850,7 +850,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx) return; } - if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action & + if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) ida_free(&fipsec->halloc, sa_ctx->sa_handle); @@ -1085,6 +1085,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns, rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress); if (IS_ERR(rule->ctx)) { int err = PTR_ERR(rule->ctx); + kfree(rule); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0216bd63a42d..f74d2c834037 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2229,17 +2229,21 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d { struct mlx5_flow_steering *steering = dev->priv.steering; - if (!steering || vport >= mlx5_eswitch_get_total_vports(dev)) + if (!steering) return NULL; switch (type) { case MLX5_FLOW_NAMESPACE_ESW_EGRESS: + if (vport >= steering->esw_egress_acl_vports) + return NULL; if (steering->esw_egress_root_ns && steering->esw_egress_root_ns[vport]) return &steering->esw_egress_root_ns[vport]->ns; else return NULL; case MLX5_FLOW_NAMESPACE_ESW_INGRESS: + if (vport >= steering->esw_ingress_acl_vports) + return NULL; if (steering->esw_ingress_root_ns && steering->esw_ingress_root_ns[vport]) return &steering->esw_ingress_root_ns[vport]->ns; @@ -2571,43 +2575,11 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns) clean_tree(&root_ns->ns.node); } -static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev) -{ - struct mlx5_flow_steering *steering = dev->priv.steering; - int i; - - if (!steering->esw_egress_root_ns) - return; - - for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++) - cleanup_root_ns(steering->esw_egress_root_ns[i]); - - kfree(steering->esw_egress_root_ns); - steering->esw_egress_root_ns = NULL; -} - -static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev) -{ - struct mlx5_flow_steering *steering = dev->priv.steering; - int i; - - if (!steering->esw_ingress_root_ns) - return; - - for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++) - cleanup_root_ns(steering->esw_ingress_root_ns[i]); - - kfree(steering->esw_ingress_root_ns); - steering->esw_ingress_root_ns = NULL; -} - void mlx5_cleanup_fs(struct mlx5_core_dev *dev) { struct mlx5_flow_steering *steering = dev->priv.steering; cleanup_root_ns(steering->root_ns); - cleanup_egress_acls_root_ns(dev); - cleanup_ingress_acls_root_ns(dev); cleanup_root_ns(steering->fdb_root_ns); steering->fdb_root_ns = NULL; kfree(steering->fdb_sub_ns); @@ -2852,10 +2824,9 @@ static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vpo return PTR_ERR_OR_ZERO(prio); } -static int init_egress_acls_root_ns(struct mlx5_core_dev *dev) +int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports) { struct mlx5_flow_steering *steering = dev->priv.steering; - int total_vports = mlx5_eswitch_get_total_vports(dev); int err; int i; @@ -2871,7 +2842,7 @@ static int init_egress_acls_root_ns(struct mlx5_core_dev *dev) if (err) goto cleanup_root_ns; } - + steering->esw_egress_acl_vports = total_vports; return 0; cleanup_root_ns: @@ -2882,10 +2853,24 @@ cleanup_root_ns: return err; } -static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev) +void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int i; + + if (!steering->esw_egress_root_ns) + return; + + for (i = 0; i < steering->esw_egress_acl_vports; i++) + cleanup_root_ns(steering->esw_egress_root_ns[i]); + + kfree(steering->esw_egress_root_ns); + steering->esw_egress_root_ns = NULL; +} + +int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports) { struct mlx5_flow_steering *steering = dev->priv.steering; - int total_vports = mlx5_eswitch_get_total_vports(dev); int err; int i; @@ -2901,7 +2886,7 @@ static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev) if (err) goto cleanup_root_ns; } - + steering->esw_ingress_acl_vports = total_vports; return 0; cleanup_root_ns: @@ -2912,6 +2897,21 @@ cleanup_root_ns: return err; } +void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int i; + + if (!steering->esw_ingress_root_ns) + return; + + for (i = 0; i < steering->esw_ingress_acl_vports; i++) + cleanup_root_ns(steering->esw_ingress_root_ns[i]); + + kfree(steering->esw_ingress_root_ns); + steering->esw_ingress_root_ns = NULL; +} + static int init_egress_root_ns(struct mlx5_flow_steering *steering) { int err; @@ -2974,16 +2974,6 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) if (err) goto err; } - if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { - err = init_egress_acls_root_ns(dev); - if (err) - goto err; - } - if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { - err = init_ingress_acls_root_ns(dev); - if (err) - goto err; - } } if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index b24a9849c45e..e577a2c424af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -129,6 +129,8 @@ struct mlx5_flow_steering { struct mlx5_flow_root_namespace *rdma_rx_root_ns; struct mlx5_flow_root_namespace *rdma_tx_root_ns; struct mlx5_flow_root_namespace *egress_root_ns; + int esw_egress_acl_vports; + int esw_ingress_acl_vports; }; struct fs_node { @@ -287,6 +289,11 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, int mlx5_init_fs(struct mlx5_core_dev *dev); void mlx5_cleanup_fs(struct mlx5_core_dev *dev); +int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports); +void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev); +int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports); +void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev); + #define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); } #define fs_list_for_each_entry(pos, root) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index f43caefd07a1..18e5aec14641 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -497,13 +497,13 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; - bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc), - GFP_KERNEL); + bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc), + GFP_KERNEL); if (!bulk) goto err_alloc_bulk; - bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), - GFP_KERNEL); + bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), + GFP_KERNEL); if (!bulk->bitmask) goto err_alloc_bitmask; @@ -521,9 +521,9 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) return bulk; err_mlx5_cmd_bulk_alloc: - kfree(bulk->bitmask); + kvfree(bulk->bitmask); err_alloc_bitmask: - kfree(bulk); + kvfree(bulk); err_alloc_bulk: return ERR_PTR(err); } @@ -537,8 +537,8 @@ mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk) } mlx5_cmd_fc_free(dev, bulk->base_id); - kfree(bulk->bitmask); - kfree(bulk); + kvfree(bulk->bitmask); + kvfree(bulk); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index b65b0cefc5b3..612a7f69366d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -481,28 +481,19 @@ static const struct mlx5e_profile mlx5i_nic_profile = { static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); - struct mlx5e_channels new_channels = {}; - struct mlx5e_params *params; + struct mlx5e_params new_params; int err = 0; mutex_lock(&priv->state_lock); - params = &priv->channels.params; + new_params = priv->channels.params; + new_params.sw_mtu = new_mtu; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - params->sw_mtu = new_mtu; - netdev->mtu = params->sw_mtu; - goto out; - } - - new_channels.params = *params; - new_channels.params.sw_mtu = new_mtu; - - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); if (err) goto out; - netdev->mtu = new_channels.params.sw_mtu; + netdev->mtu = new_params.sw_mtu; out: mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 127bb92da150..b8748390335f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -603,8 +603,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) if (err) mlx5_core_err(dev, "Failed to init multipath lag err=%d\n", err); - - return; } /* Must be called with intf_mutex held */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 1e7f26b240de..ce696d523493 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -645,16 +645,19 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin) return PTP_PF_NONE; } -static int mlx5_init_pin_config(struct mlx5_clock *clock) +static void mlx5_init_pin_config(struct mlx5_clock *clock) { int i; + if (!clock->ptp_info.n_pins) + return; + clock->ptp_info.pin_config = kcalloc(clock->ptp_info.n_pins, sizeof(*clock->ptp_info.pin_config), GFP_KERNEL); if (!clock->ptp_info.pin_config) - return -ENOMEM; + return; clock->ptp_info.enable = mlx5_ptp_enable; clock->ptp_info.verify = mlx5_ptp_verify; clock->ptp_info.pps = 1; @@ -667,8 +670,6 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock) clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i); clock->ptp_info.pin_config[i].chan = 0; } - - return 0; } static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev) @@ -859,6 +860,17 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev) } } +static void mlx5_init_pps(struct mlx5_core_dev *mdev) +{ + struct mlx5_clock *clock = &mdev->clock; + + if (!MLX5_PPS_CAP(mdev)) + return; + + mlx5_get_pps_caps(mdev); + mlx5_init_pin_config(clock); +} + void mlx5_init_clock(struct mlx5_core_dev *mdev) { struct mlx5_clock *clock = &mdev->clock; @@ -876,10 +888,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) clock->ptp_info = mlx5_ptp_clock_info; /* Initialize 1PPS data structures */ - if (MLX5_PPS_CAP(mdev)) - mlx5_get_pps_caps(mdev); - if (clock->ptp_info.n_pins) - mlx5_init_pin_config(clock); + mlx5_init_pps(mdev); clock->ptp = ptp_clock_register(&clock->ptp_info, &mdev->pdev->dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 19e3e978267e..1f907df5b3a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -167,7 +167,6 @@ static void irq_set_name(char *name, int vecidx) snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx - MLX5_IRQ_VEC_COMP_BASE); - return; } static int request_irqs(struct mlx5_core_dev *dev, int nvec) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c index 8e0dddc6383f..441b5453acae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c @@ -180,5 +180,4 @@ del_roce_addr: mlx5_rdma_del_roce_addr(dev); disable_roce: mlx5_nic_vport_disable_roce(dev); - return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 60a6328a9ca0..52226d9b9a6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -270,15 +270,14 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, { struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_sf *sf; - u16 hw_fn_id; int err; sf = mlx5_sf_alloc(table, new_attr->sfnum, extack); if (IS_ERR(sf)) return PTR_ERR(sf); - hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id); - err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, hw_fn_id, new_attr->sfnum); + err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id, + new_attr->sfnum); if (err) goto esw_err; *new_port_index = sf->port_index; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c index c9bddde04047..ec53c11c8344 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c @@ -67,8 +67,8 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) goto exist_err; } - hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id); - err = mlx5_cmd_alloc_sf(table->dev, hw_fn_id); + hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sw_id); + err = mlx5_cmd_alloc_sf(dev, hw_fn_id); if (err) goto err; @@ -80,7 +80,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) return sw_id; vhca_err: - mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); + mlx5_cmd_dealloc_sf(dev, hw_fn_id); err: table->sfs[i].allocated = false; exist_err: @@ -93,8 +93,8 @@ static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id) struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; u16 hw_fn_id; - hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, id); - mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); + hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id); + mlx5_cmd_dealloc_sf(dev, hw_fn_id); table->sfs[id].allocated = false; table->sfs[id].pending_delete = false; } @@ -123,7 +123,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id) goto err; state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); if (state == MLX5_VHCA_STATE_ALLOCATED) { - mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); + mlx5_cmd_dealloc_sf(dev, hw_fn_id); table->sfs[id].allocated = false; } else { table->sfs[id].pending_delete = true; @@ -216,7 +216,7 @@ int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev) return 0; table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event; - return mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb); + return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb); } void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) @@ -226,7 +226,7 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) if (!table) return; - mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb); + mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb); /* Dealloc SFs whose firmware event has been missed. */ mlx5_sf_hw_dealloc_all(table); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 28a7971cac6a..949879cf2092 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -313,8 +313,8 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, * table, since there is an *assumption* that in such case FW * will recalculate the CS. */ - if (dest_action->dest_tbl.is_fw_tbl) { - *final_icm_addr = dest_action->dest_tbl.fw_tbl.rx_icm_addr; + if (dest_action->dest_tbl->is_fw_tbl) { + *final_icm_addr = dest_action->dest_tbl->fw_tbl.rx_icm_addr; } else { mlx5dr_dbg(dmn, "Destination FT should be terminating when modify TTL is used\n"); @@ -326,8 +326,8 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, /* If destination is vport we will get the FW flow table * that recalculates the CS and forwards to the vport. */ - ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn, - dest_action->vport.caps->num, + ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn, + dest_action->vport->caps->num, final_icm_addr); if (ret) { mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n"); @@ -369,6 +369,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type); for (i = 0; i < num_actions; i++) { + struct mlx5dr_action_dest_tbl *dest_tbl; struct mlx5dr_action *action; int max_actions_type = 1; u32 action_type; @@ -382,37 +383,38 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, break; case DR_ACTION_TYP_FT: dest_action = action; - if (!action->dest_tbl.is_fw_tbl) { - if (action->dest_tbl.tbl->dmn != dmn) { + dest_tbl = action->dest_tbl; + if (!dest_tbl->is_fw_tbl) { + if (dest_tbl->tbl->dmn != dmn) { mlx5dr_err(dmn, "Destination table belongs to a different domain\n"); goto out_invalid_arg; } - if (action->dest_tbl.tbl->level <= matcher->tbl->level) { + if (dest_tbl->tbl->level <= matcher->tbl->level) { mlx5_core_warn_once(dmn->mdev, "Connecting table to a lower/same level destination table\n"); mlx5dr_dbg(dmn, "Connecting table at level %d to a destination table at level %d\n", matcher->tbl->level, - action->dest_tbl.tbl->level); + dest_tbl->tbl->level); } attr.final_icm_addr = rx_rule ? - action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : - action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr; + dest_tbl->tbl->rx.s_anchor->chunk->icm_addr : + dest_tbl->tbl->tx.s_anchor->chunk->icm_addr; } else { struct mlx5dr_cmd_query_flow_table_details output; int ret; /* get the relevant addresses */ - if (!action->dest_tbl.fw_tbl.rx_icm_addr) { + if (!action->dest_tbl->fw_tbl.rx_icm_addr) { ret = mlx5dr_cmd_query_flow_table(dmn->mdev, - action->dest_tbl.fw_tbl.type, - action->dest_tbl.fw_tbl.id, + dest_tbl->fw_tbl.type, + dest_tbl->fw_tbl.id, &output); if (!ret) { - action->dest_tbl.fw_tbl.tx_icm_addr = + dest_tbl->fw_tbl.tx_icm_addr = output.sw_owner_icm_root_1; - action->dest_tbl.fw_tbl.rx_icm_addr = + dest_tbl->fw_tbl.rx_icm_addr = output.sw_owner_icm_root_0; } else { mlx5dr_err(dmn, @@ -422,50 +424,50 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, } } attr.final_icm_addr = rx_rule ? - action->dest_tbl.fw_tbl.rx_icm_addr : - action->dest_tbl.fw_tbl.tx_icm_addr; + dest_tbl->fw_tbl.rx_icm_addr : + dest_tbl->fw_tbl.tx_icm_addr; } break; case DR_ACTION_TYP_QP: mlx5dr_info(dmn, "Domain doesn't support QP\n"); goto out_invalid_arg; case DR_ACTION_TYP_CTR: - attr.ctr_id = action->ctr.ctr_id + - action->ctr.offeset; + attr.ctr_id = action->ctr->ctr_id + + action->ctr->offeset; break; case DR_ACTION_TYP_TAG: - attr.flow_tag = action->flow_tag; + attr.flow_tag = action->flow_tag->flow_tag; break; case DR_ACTION_TYP_TNL_L2_TO_L2: break; case DR_ACTION_TYP_TNL_L3_TO_L2: - attr.decap_index = action->rewrite.index; - attr.decap_actions = action->rewrite.num_of_actions; + attr.decap_index = action->rewrite->index; + attr.decap_actions = action->rewrite->num_of_actions; attr.decap_with_vlan = attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS; break; case DR_ACTION_TYP_MODIFY_HDR: - attr.modify_index = action->rewrite.index; - attr.modify_actions = action->rewrite.num_of_actions; - recalc_cs_required = action->rewrite.modify_ttl && + attr.modify_index = action->rewrite->index; + attr.modify_actions = action->rewrite->num_of_actions; + recalc_cs_required = action->rewrite->modify_ttl && !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps); break; case DR_ACTION_TYP_L2_TO_TNL_L2: case DR_ACTION_TYP_L2_TO_TNL_L3: - attr.reformat_size = action->reformat.reformat_size; - attr.reformat_id = action->reformat.reformat_id; + attr.reformat_size = action->reformat->reformat_size; + attr.reformat_id = action->reformat->reformat_id; break; case DR_ACTION_TYP_VPORT: - attr.hit_gvmi = action->vport.caps->vhca_gvmi; + attr.hit_gvmi = action->vport->caps->vhca_gvmi; dest_action = action; if (rx_rule) { /* Loopback on WIRE vport is not supported */ - if (action->vport.caps->num == WIRE_PORT) + if (action->vport->caps->num == WIRE_PORT) goto out_invalid_arg; - attr.final_icm_addr = action->vport.caps->icm_address_rx; + attr.final_icm_addr = action->vport->caps->icm_address_rx; } else { - attr.final_icm_addr = action->vport.caps->icm_address_tx; + attr.final_icm_addr = action->vport->caps->icm_address_tx; } break; case DR_ACTION_TYP_POP_VLAN: @@ -477,7 +479,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, if (attr.vlans.count == MLX5DR_MAX_VLANS) return -EINVAL; - attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr; + attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr; break; default: goto out_invalid_arg; @@ -530,17 +532,37 @@ out_invalid_arg: return -EINVAL; } +static unsigned int action_size[DR_ACTION_TYP_MAX] = { + [DR_ACTION_TYP_TNL_L2_TO_L2] = sizeof(struct mlx5dr_action_reformat), + [DR_ACTION_TYP_L2_TO_TNL_L2] = sizeof(struct mlx5dr_action_reformat), + [DR_ACTION_TYP_TNL_L3_TO_L2] = sizeof(struct mlx5dr_action_rewrite), + [DR_ACTION_TYP_L2_TO_TNL_L3] = sizeof(struct mlx5dr_action_reformat), + [DR_ACTION_TYP_FT] = sizeof(struct mlx5dr_action_dest_tbl), + [DR_ACTION_TYP_CTR] = sizeof(struct mlx5dr_action_ctr), + [DR_ACTION_TYP_TAG] = sizeof(struct mlx5dr_action_flow_tag), + [DR_ACTION_TYP_MODIFY_HDR] = sizeof(struct mlx5dr_action_rewrite), + [DR_ACTION_TYP_VPORT] = sizeof(struct mlx5dr_action_vport), + [DR_ACTION_TYP_PUSH_VLAN] = sizeof(struct mlx5dr_action_push_vlan), +}; + static struct mlx5dr_action * dr_action_create_generic(enum mlx5dr_action_type action_type) { struct mlx5dr_action *action; + int extra_size; + + if (action_type < DR_ACTION_TYP_MAX) + extra_size = action_size[action_type]; + else + return NULL; - action = kzalloc(sizeof(*action), GFP_KERNEL); + action = kzalloc(sizeof(*action) + extra_size, GFP_KERNEL); if (!action) return NULL; action->action_type = action_type; refcount_set(&action->refcount, 1); + action->data = action + 1; return action; } @@ -559,10 +581,10 @@ mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num) if (!action) return NULL; - action->dest_tbl.is_fw_tbl = true; - action->dest_tbl.fw_tbl.dmn = dmn; - action->dest_tbl.fw_tbl.id = table_num; - action->dest_tbl.fw_tbl.type = FS_FT_FDB; + action->dest_tbl->is_fw_tbl = true; + action->dest_tbl->fw_tbl.dmn = dmn; + action->dest_tbl->fw_tbl.id = table_num; + action->dest_tbl->fw_tbl.type = FS_FT_FDB; refcount_inc(&dmn->refcount); return action; @@ -579,7 +601,7 @@ mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl) if (!action) goto dec_ref; - action->dest_tbl.tbl = tbl; + action->dest_tbl->tbl = tbl; return action; @@ -624,12 +646,12 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, case DR_ACTION_TYP_VPORT: hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - hw_dests[i].vport.num = dest_action->vport.caps->num; - hw_dests[i].vport.vhca_id = dest_action->vport.caps->vhca_gvmi; + hw_dests[i].vport.num = dest_action->vport->caps->num; + hw_dests[i].vport.vhca_id = dest_action->vport->caps->vhca_gvmi; if (reformat_action) { reformat_req = true; hw_dests[i].vport.reformat_id = - reformat_action->reformat.reformat_id; + reformat_action->reformat->reformat_id; ref_actions[num_of_ref++] = reformat_action; hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; } @@ -637,10 +659,10 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, case DR_ACTION_TYP_FT: hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - if (dest_action->dest_tbl.is_fw_tbl) - hw_dests[i].ft_id = dest_action->dest_tbl.fw_tbl.id; + if (dest_action->dest_tbl->is_fw_tbl) + hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id; else - hw_dests[i].ft_id = dest_action->dest_tbl.tbl->table_id; + hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id; break; default: @@ -657,8 +679,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, hw_dests, num_of_dests, reformat_req, - &action->dest_tbl.fw_tbl.id, - &action->dest_tbl.fw_tbl.group_id); + &action->dest_tbl->fw_tbl.id, + &action->dest_tbl->fw_tbl.group_id); if (ret) goto free_action; @@ -667,11 +689,11 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, for (i = 0; i < num_of_ref; i++) refcount_inc(&ref_actions[i]->refcount); - action->dest_tbl.is_fw_tbl = true; - action->dest_tbl.fw_tbl.dmn = dmn; - action->dest_tbl.fw_tbl.type = FS_FT_FDB; - action->dest_tbl.fw_tbl.ref_actions = ref_actions; - action->dest_tbl.fw_tbl.num_of_ref_actions = num_of_ref; + action->dest_tbl->is_fw_tbl = true; + action->dest_tbl->fw_tbl.dmn = dmn; + action->dest_tbl->fw_tbl.type = FS_FT_FDB; + action->dest_tbl->fw_tbl.ref_actions = ref_actions; + action->dest_tbl->fw_tbl.num_of_ref_actions = num_of_ref; kfree(hw_dests); @@ -696,10 +718,10 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn, if (!action) return NULL; - action->dest_tbl.is_fw_tbl = 1; - action->dest_tbl.fw_tbl.type = ft->type; - action->dest_tbl.fw_tbl.id = ft->id; - action->dest_tbl.fw_tbl.dmn = dmn; + action->dest_tbl->is_fw_tbl = 1; + action->dest_tbl->fw_tbl.type = ft->type; + action->dest_tbl->fw_tbl.id = ft->id; + action->dest_tbl->fw_tbl.dmn = dmn; refcount_inc(&dmn->refcount); @@ -715,7 +737,7 @@ mlx5dr_action_create_flow_counter(u32 counter_id) if (!action) return NULL; - action->ctr.ctr_id = counter_id; + action->ctr->ctr_id = counter_id; return action; } @@ -728,7 +750,7 @@ struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value) if (!action) return NULL; - action->flow_tag = tag_value & 0xffffff; + action->flow_tag->flow_tag = tag_value & 0xffffff; return action; } @@ -794,8 +816,8 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, if (ret) return ret; - action->reformat.reformat_id = reformat_id; - action->reformat.reformat_size = data_sz; + action->reformat->reformat_id = reformat_id; + action->reformat->reformat_size = data_sz; return 0; } case DR_ACTION_TYP_TNL_L2_TO_L2: @@ -811,28 +833,28 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, data, data_sz, hw_actions, ACTION_CACHE_LINE_SIZE, - &action->rewrite.num_of_actions); + &action->rewrite->num_of_actions); if (ret) { mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n"); return ret; } - action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, - DR_CHUNK_SIZE_8); - if (!action->rewrite.chunk) { + action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, + DR_CHUNK_SIZE_8); + if (!action->rewrite->chunk) { mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n"); return -ENOMEM; } - action->rewrite.data = (void *)hw_actions; - action->rewrite.index = (action->rewrite.chunk->icm_addr - + action->rewrite->data = (void *)hw_actions; + action->rewrite->index = (action->rewrite->chunk->icm_addr - dmn->info.caps.hdr_modify_icm_addr) / ACTION_CACHE_LINE_SIZE; ret = mlx5dr_send_postsend_action(dmn, action); if (ret) { mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n"); - mlx5dr_icm_free_chunk(action->rewrite.chunk); + mlx5dr_icm_free_chunk(action->rewrite->chunk); return ret; } return 0; @@ -867,7 +889,7 @@ struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn, if (!action) return NULL; - action->push_vlan.vlan_hdr = vlan_hdr_h; + action->push_vlan->vlan_hdr = vlan_hdr_h; return action; } @@ -898,7 +920,7 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn, if (!action) goto dec_ref; - action->reformat.dmn = dmn; + action->reformat->dmn = dmn; ret = dr_action_create_reformat_action(dmn, data_sz, @@ -1104,17 +1126,17 @@ dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action, const __be64 *sw_action) { u16 sw_field = MLX5_GET(set_action_in, sw_action, field); - struct mlx5dr_domain *dmn = action->rewrite.dmn; + struct mlx5dr_domain *dmn = action->rewrite->dmn; if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) { - action->rewrite.allow_rx = 0; + action->rewrite->allow_rx = 0; if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) { mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n", sw_field); return -EINVAL; } } else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) { - action->rewrite.allow_tx = 0; + action->rewrite->allow_tx = 0; if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) { mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n", sw_field); @@ -1122,7 +1144,7 @@ dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action, } } - if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) { + if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) { mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n"); return -EINVAL; } @@ -1135,7 +1157,7 @@ dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action, const __be64 *sw_action) { u16 sw_field = MLX5_GET(set_action_in, sw_action, field); - struct mlx5dr_domain *dmn = action->rewrite.dmn; + struct mlx5dr_domain *dmn = action->rewrite->dmn; if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL && sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT && @@ -1153,7 +1175,7 @@ static int dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action, const __be64 *sw_action) { - struct mlx5dr_domain *dmn = action->rewrite.dmn; + struct mlx5dr_domain *dmn = action->rewrite->dmn; u16 sw_fields[2]; int i; @@ -1162,14 +1184,14 @@ dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action, for (i = 0; i < 2; i++) { if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) { - action->rewrite.allow_rx = 0; + action->rewrite->allow_rx = 0; if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) { mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n", sw_fields[i]); return -EINVAL; } } else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) { - action->rewrite.allow_tx = 0; + action->rewrite->allow_tx = 0; if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) { mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n", sw_fields[i]); @@ -1178,7 +1200,7 @@ dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action, } } - if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) { + if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) { mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n"); return -EINVAL; } @@ -1190,7 +1212,7 @@ static int dr_action_modify_check_field_limitation(struct mlx5dr_action *action, const __be64 *sw_action) { - struct mlx5dr_domain *dmn = action->rewrite.dmn; + struct mlx5dr_domain *dmn = action->rewrite->dmn; u8 action_type; int ret; @@ -1239,7 +1261,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, { const struct mlx5dr_ste_action_modify_field *hw_dst_action_info; const struct mlx5dr_ste_action_modify_field *hw_src_action_info; - struct mlx5dr_domain *dmn = action->rewrite.dmn; + struct mlx5dr_domain *dmn = action->rewrite->dmn; int ret, i, hw_idx = 0; __be64 *sw_action; __be64 hw_action; @@ -1249,8 +1271,8 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, *modify_ttl = false; - action->rewrite.allow_rx = 1; - action->rewrite.allow_tx = 1; + action->rewrite->allow_rx = 1; + action->rewrite->allow_tx = 1; for (i = 0; i < num_sw_actions; i++) { sw_action = &sw_actions[i]; @@ -1358,13 +1380,13 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, if (ret) goto free_hw_actions; - action->rewrite.chunk = chunk; - action->rewrite.modify_ttl = modify_ttl; - action->rewrite.data = (u8 *)hw_actions; - action->rewrite.num_of_actions = num_hw_actions; - action->rewrite.index = (chunk->icm_addr - - dmn->info.caps.hdr_modify_icm_addr) / - ACTION_CACHE_LINE_SIZE; + action->rewrite->chunk = chunk; + action->rewrite->modify_ttl = modify_ttl; + action->rewrite->data = (u8 *)hw_actions; + action->rewrite->num_of_actions = num_hw_actions; + action->rewrite->index = (chunk->icm_addr - + dmn->info.caps.hdr_modify_icm_addr) / + ACTION_CACHE_LINE_SIZE; ret = mlx5dr_send_postsend_action(dmn, action); if (ret) @@ -1399,7 +1421,7 @@ mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn, if (!action) goto dec_ref; - action->rewrite.dmn = dmn; + action->rewrite->dmn = dmn; ret = dr_action_create_modify_action(dmn, actions_sz, @@ -1451,8 +1473,8 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, if (!action) return NULL; - action->vport.dmn = vport_dmn; - action->vport.caps = vport_cap; + action->vport->dmn = vport_dmn; + action->vport->caps = vport_cap; return action; } @@ -1464,44 +1486,44 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) switch (action->action_type) { case DR_ACTION_TYP_FT: - if (action->dest_tbl.is_fw_tbl) - refcount_dec(&action->dest_tbl.fw_tbl.dmn->refcount); + if (action->dest_tbl->is_fw_tbl) + refcount_dec(&action->dest_tbl->fw_tbl.dmn->refcount); else - refcount_dec(&action->dest_tbl.tbl->refcount); + refcount_dec(&action->dest_tbl->tbl->refcount); - if (action->dest_tbl.is_fw_tbl && - action->dest_tbl.fw_tbl.num_of_ref_actions) { + if (action->dest_tbl->is_fw_tbl && + action->dest_tbl->fw_tbl.num_of_ref_actions) { struct mlx5dr_action **ref_actions; int i; - ref_actions = action->dest_tbl.fw_tbl.ref_actions; - for (i = 0; i < action->dest_tbl.fw_tbl.num_of_ref_actions; i++) + ref_actions = action->dest_tbl->fw_tbl.ref_actions; + for (i = 0; i < action->dest_tbl->fw_tbl.num_of_ref_actions; i++) refcount_dec(&ref_actions[i]->refcount); kfree(ref_actions); - mlx5dr_fw_destroy_md_tbl(action->dest_tbl.fw_tbl.dmn, - action->dest_tbl.fw_tbl.id, - action->dest_tbl.fw_tbl.group_id); + mlx5dr_fw_destroy_md_tbl(action->dest_tbl->fw_tbl.dmn, + action->dest_tbl->fw_tbl.id, + action->dest_tbl->fw_tbl.group_id); } break; case DR_ACTION_TYP_TNL_L2_TO_L2: - refcount_dec(&action->reformat.dmn->refcount); + refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_TNL_L3_TO_L2: - mlx5dr_icm_free_chunk(action->rewrite.chunk); - refcount_dec(&action->reformat.dmn->refcount); + mlx5dr_icm_free_chunk(action->rewrite->chunk); + refcount_dec(&action->rewrite->dmn->refcount); break; case DR_ACTION_TYP_L2_TO_TNL_L2: case DR_ACTION_TYP_L2_TO_TNL_L3: - mlx5dr_cmd_destroy_reformat_ctx((action->reformat.dmn)->mdev, - action->reformat.reformat_id); - refcount_dec(&action->reformat.dmn->refcount); + mlx5dr_cmd_destroy_reformat_ctx((action->reformat->dmn)->mdev, + action->reformat->reformat_id); + refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_MODIFY_HDR: - mlx5dr_icm_free_chunk(action->rewrite.chunk); - kfree(action->rewrite.data); - refcount_dec(&action->rewrite.dmn->refcount); + mlx5dr_icm_free_chunk(action->rewrite->chunk); + kfree(action->rewrite->data); + refcount_dec(&action->rewrite->dmn->refcount); break; default: break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 30b0136b5bc7..461473d31e2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -287,7 +287,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev, u32 *in; int err; - in = kzalloc(inlen, GFP_KERNEL); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -302,7 +302,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev, *group_id = MLX5_GET(create_flow_group_out, out, group_id); out: - kfree(in); + kvfree(in); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 8a6a56f9dc4e..c1926d927008 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -406,7 +406,7 @@ static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn, alloc_size = *num_stes * DR_STE_SIZE; } - *data = kzalloc(alloc_size, GFP_KERNEL); + *data = kvzalloc(alloc_size, GFP_KERNEL); if (!*data) return -ENOMEM; @@ -505,7 +505,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn, } out_free: - kfree(data); + kvfree(data); return ret; } @@ -562,7 +562,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn, } out_free: - kfree(data); + kvfree(data); return ret; } @@ -572,12 +572,12 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, struct postsend_info send_info = {}; int ret; - send_info.write.addr = (uintptr_t)action->rewrite.data; - send_info.write.length = action->rewrite.num_of_actions * + send_info.write.addr = (uintptr_t)action->rewrite->data; + send_info.write.length = action->rewrite->num_of_actions * DR_MODIFY_ACTION_SIZE; send_info.write.lkey = 0; - send_info.remote_addr = action->rewrite.chunk->mr_addr; - send_info.rkey = action->rewrite.chunk->rkey; + send_info.remote_addr = action->rewrite->chunk->mr_addr; + send_info.rkey = action->rewrite->chunk->rkey; ret = dr_postsend_icm_data(dmn, &send_info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c index b599b6beb5b9..30ae3cda6d2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -29,7 +29,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, last_htbl = tbl->rx.s_anchor; tbl->rx.default_icm_addr = action ? - action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : + action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr : tbl->rx.nic_dmn->default_icm_addr; info.type = CONNECT_MISS; @@ -53,7 +53,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, last_htbl = tbl->tx.s_anchor; tbl->tx.default_icm_addr = action ? - action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr : + action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr : tbl->tx.nic_dmn->default_icm_addr; info.type = CONNECT_MISS; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 4af0e4e6a13c..462673947f3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -806,53 +806,71 @@ struct mlx5dr_ste_action_modify_field { u8 l4_type; }; +struct mlx5dr_action_rewrite { + struct mlx5dr_domain *dmn; + struct mlx5dr_icm_chunk *chunk; + u8 *data; + u16 num_of_actions; + u32 index; + u8 allow_rx:1; + u8 allow_tx:1; + u8 modify_ttl:1; +}; + +struct mlx5dr_action_reformat { + struct mlx5dr_domain *dmn; + u32 reformat_id; + u32 reformat_size; +}; + +struct mlx5dr_action_dest_tbl { + u8 is_fw_tbl:1; + union { + struct mlx5dr_table *tbl; + struct { + struct mlx5dr_domain *dmn; + u32 id; + u32 group_id; + enum fs_flow_table_type type; + u64 rx_icm_addr; + u64 tx_icm_addr; + struct mlx5dr_action **ref_actions; + u32 num_of_ref_actions; + } fw_tbl; + }; +}; + +struct mlx5dr_action_ctr { + u32 ctr_id; + u32 offeset; +}; + +struct mlx5dr_action_vport { + struct mlx5dr_domain *dmn; + struct mlx5dr_cmd_vport_cap *caps; +}; + +struct mlx5dr_action_push_vlan { + u32 vlan_hdr; /* tpid_pcp_dei_vid */ +}; + +struct mlx5dr_action_flow_tag { + u32 flow_tag; +}; + struct mlx5dr_action { enum mlx5dr_action_type action_type; refcount_t refcount; + union { - struct { - struct mlx5dr_domain *dmn; - struct mlx5dr_icm_chunk *chunk; - u8 *data; - u16 num_of_actions; - u32 index; - u8 allow_rx:1; - u8 allow_tx:1; - u8 modify_ttl:1; - } rewrite; - struct { - struct mlx5dr_domain *dmn; - u32 reformat_id; - u32 reformat_size; - } reformat; - struct { - u8 is_fw_tbl:1; - union { - struct mlx5dr_table *tbl; - struct { - struct mlx5dr_domain *dmn; - u32 id; - u32 group_id; - enum fs_flow_table_type type; - u64 rx_icm_addr; - u64 tx_icm_addr; - struct mlx5dr_action **ref_actions; - u32 num_of_ref_actions; - } fw_tbl; - }; - } dest_tbl; - struct { - u32 ctr_id; - u32 offeset; - } ctr; - struct { - struct mlx5dr_domain *dmn; - struct mlx5dr_cmd_vport_cap *caps; - } vport; - struct { - u32 vlan_hdr; /* tpid_pcp_dei_vid */ - } push_vlan; - u32 flow_tag; + void *data; + struct mlx5dr_action_rewrite *rewrite; + struct mlx5dr_action_reformat *reformat; + struct mlx5dr_action_dest_tbl *dest_tbl; + struct mlx5dr_action_ctr *ctr; + struct mlx5dr_action_vport *vport; + struct mlx5dr_action_push_vlan *push_vlan; + struct mlx5dr_action_flow_tag *flow_tag; }; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 078601d31cde..c8061beed6db 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1059,6 +1059,131 @@ mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); } +static void +mlxsw_sp_get_eth_phy_stats(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats) +{ + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + + if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, + 0, ppcnt_pl)) + return; + + phy_stats->SymbolErrorDuringCarrier = + mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get(ppcnt_pl); +} + +static void +mlxsw_sp_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + + if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, + 0, ppcnt_pl)) + return; + + mac_stats->FramesTransmittedOK = + mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); + mac_stats->FramesReceivedOK = + mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); + mac_stats->FrameCheckSequenceErrors = + mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); + mac_stats->AlignmentErrors = + mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); + mac_stats->OctetsTransmittedOK = + mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); + mac_stats->OctetsReceivedOK = + mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); + mac_stats->MulticastFramesXmittedOK = + mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get(ppcnt_pl); + mac_stats->BroadcastFramesXmittedOK = + mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get(ppcnt_pl); + mac_stats->MulticastFramesReceivedOK = + mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); + mac_stats->BroadcastFramesReceivedOK = + mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get(ppcnt_pl); + mac_stats->InRangeLengthErrors = + mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl); + mac_stats->OutOfRangeLengthField = + mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl); + mac_stats->FrameTooLongErrors = + mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl); +} + +static void +mlxsw_sp_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + + if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, + 0, ppcnt_pl)) + return; + + ctrl_stats->MACControlFramesTransmitted = + mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get(ppcnt_pl); + ctrl_stats->MACControlFramesReceived = + mlxsw_reg_ppcnt_a_mac_control_frames_received_get(ppcnt_pl); + ctrl_stats->UnsupportedOpcodesReceived = + mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get(ppcnt_pl); +} + +static const struct ethtool_rmon_hist_range mlxsw_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 2047 }, + { 2048, 4095 }, + { 4096, 8191 }, + { 8192, 10239 }, + {} +}; + +static void +mlxsw_sp_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon, + const struct ethtool_rmon_hist_range **ranges) +{ + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + + if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, + 0, ppcnt_pl)) + return; + + rmon->undersize_pkts = + mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get(ppcnt_pl); + rmon->oversize_pkts = + mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get(ppcnt_pl); + rmon->fragments = + mlxsw_reg_ppcnt_ether_stats_fragments_get(ppcnt_pl); + + rmon->hist[0] = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get(ppcnt_pl); + rmon->hist[1] = + mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get(ppcnt_pl); + rmon->hist[2] = + mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get(ppcnt_pl); + rmon->hist[3] = + mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get(ppcnt_pl); + rmon->hist[4] = + mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get(ppcnt_pl); + rmon->hist[5] = + mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get(ppcnt_pl); + rmon->hist[6] = + mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get(ppcnt_pl); + rmon->hist[7] = + mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get(ppcnt_pl); + rmon->hist[8] = + mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get(ppcnt_pl); + rmon->hist[9] = + mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get(ppcnt_pl); + + *ranges = mlxsw_rmon_ranges; +} + const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .cap_link_lanes_supported = true, .get_drvinfo = mlxsw_sp_port_get_drvinfo, @@ -1075,6 +1200,10 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_module_info = mlxsw_sp_get_module_info, .get_module_eeprom = mlxsw_sp_get_module_eeprom, .get_ts_info = mlxsw_sp_get_ts_info, + .get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats, + .get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats, + .get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats, + .get_rmon_stats = mlxsw_sp_get_rmon_stats, }; struct mlxsw_sp1_port_link_mode { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index c1f05c17557d..eeccd586e781 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -2916,7 +2916,8 @@ mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work * return; if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE && - !switchdev_work->fdb_info.added_by_user) + (!switchdev_work->fdb_info.added_by_user || + switchdev_work->fdb_info.is_local)) return; if (!netif_running(dev)) @@ -2971,7 +2972,7 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) + if (!fdb_info->added_by_user || fdb_info->is_local) break; err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true); if (err) diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index 2feed6ce19d3..13eef6e9bd2d 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -193,11 +193,10 @@ static void ks8851_read_mac_addr(struct net_device *dev) static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np) { struct net_device *dev = ks->netdev; - const u8 *mac_addr; + int ret; - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) { - ether_addr_copy(dev->dev_addr, mac_addr); + ret = of_get_mac_address(np, dev->dev_addr); + if (!ret) { ks8851_write_mac_addr(dev); return; } diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 11a1dc4c436d..dae10328c6cf 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -2771,7 +2771,6 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, { struct lan743x_adapter *adapter = NULL; struct net_device *netdev = NULL; - const void *mac_addr; int ret = -ENODEV; netdev = devm_alloc_etherdev(&pdev->dev, @@ -2788,9 +2787,7 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; netdev->max_mtu = LAN743X_MAX_FRAME_SIZE; - mac_addr = of_get_mac_address(pdev->dev.of_node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(adapter->mac_address, mac_addr); + of_get_mac_address(pdev->dev.of_node, adapter->mac_address); ret = lan743x_pci_init(adapter, pdev); if (ret) diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig new file mode 100644 index 000000000000..e1ac0a5d808d --- /dev/null +++ b/drivers/net/ethernet/microsoft/Kconfig @@ -0,0 +1,29 @@ +# +# Microsoft Azure network device configuration +# + +config NET_VENDOR_MICROSOFT + bool "Microsoft Network Devices" + default y + help + If you have a network (Ethernet) device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip the + question about Microsoft network devices. If you say Y, you will be + asked for your specific device in the following question. + +if NET_VENDOR_MICROSOFT + +config MICROSOFT_MANA + tristate "Microsoft Azure Network Adapter (MANA) support" + depends on PCI_MSI && X86_64 + select PCI_HYPERV + help + This driver supports Microsoft Azure Network Adapter (MANA). + So far, the driver is only supported on X86_64. + + To compile this driver as a module, choose M here. + The module will be called mana. + +endif #NET_VENDOR_MICROSOFT diff --git a/drivers/net/ethernet/microsoft/Makefile b/drivers/net/ethernet/microsoft/Makefile new file mode 100644 index 000000000000..d2ddc218135f --- /dev/null +++ b/drivers/net/ethernet/microsoft/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Microsoft Azure network device driver. +# + +obj-$(CONFIG_MICROSOFT_MANA) += mana/ diff --git a/drivers/net/ethernet/microsoft/mana/Makefile b/drivers/net/ethernet/microsoft/mana/Makefile new file mode 100644 index 000000000000..0edd5bb685f3 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# Makefile for the Microsoft Azure Network Adapter driver + +obj-$(CONFIG_MICROSOFT_MANA) += mana.o +mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h new file mode 100644 index 000000000000..33e53d32e891 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/gdma.h @@ -0,0 +1,673 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _GDMA_H +#define _GDMA_H + +#include <linux/dma-mapping.h> +#include <linux/netdevice.h> + +#include "shm_channel.h" + +/* Structures labeled with "HW DATA" are exchanged with the hardware. All of + * them are naturally aligned and hence don't need __packed. + */ + +enum gdma_request_type { + GDMA_VERIFY_VF_DRIVER_VERSION = 1, + GDMA_QUERY_MAX_RESOURCES = 2, + GDMA_LIST_DEVICES = 3, + GDMA_REGISTER_DEVICE = 4, + GDMA_DEREGISTER_DEVICE = 5, + GDMA_GENERATE_TEST_EQE = 10, + GDMA_CREATE_QUEUE = 12, + GDMA_DISABLE_QUEUE = 13, + GDMA_CREATE_DMA_REGION = 25, + GDMA_DMA_REGION_ADD_PAGES = 26, + GDMA_DESTROY_DMA_REGION = 27, +}; + +enum gdma_queue_type { + GDMA_INVALID_QUEUE, + GDMA_SQ, + GDMA_RQ, + GDMA_CQ, + GDMA_EQ, +}; + +enum gdma_work_request_flags { + GDMA_WR_NONE = 0, + GDMA_WR_OOB_IN_SGL = BIT(0), + GDMA_WR_PAD_BY_SGE0 = BIT(1), +}; + +enum gdma_eqe_type { + GDMA_EQE_COMPLETION = 3, + GDMA_EQE_TEST_EVENT = 64, + GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, + GDMA_EQE_HWC_INIT_DATA = 130, + GDMA_EQE_HWC_INIT_DONE = 131, +}; + +enum { + GDMA_DEVICE_NONE = 0, + GDMA_DEVICE_HWC = 1, + GDMA_DEVICE_MANA = 2, +}; + +struct gdma_resource { + /* Protect the bitmap */ + spinlock_t lock; + + /* The bitmap size in bits. */ + u32 size; + + /* The bitmap tracks the resources. */ + unsigned long *map; +}; + +union gdma_doorbell_entry { + u64 as_uint64; + + struct { + u64 id : 24; + u64 reserved : 8; + u64 tail_ptr : 31; + u64 arm : 1; + } cq; + + struct { + u64 id : 24; + u64 wqe_cnt : 8; + u64 tail_ptr : 32; + } rq; + + struct { + u64 id : 24; + u64 reserved : 8; + u64 tail_ptr : 32; + } sq; + + struct { + u64 id : 16; + u64 reserved : 16; + u64 tail_ptr : 31; + u64 arm : 1; + } eq; +}; /* HW DATA */ + +struct gdma_msg_hdr { + u32 hdr_type; + u32 msg_type; + u16 msg_version; + u16 hwc_msg_id; + u32 msg_size; +}; /* HW DATA */ + +struct gdma_dev_id { + union { + struct { + u16 type; + u16 instance; + }; + + u32 as_uint32; + }; +}; /* HW DATA */ + +struct gdma_req_hdr { + struct gdma_msg_hdr req; + struct gdma_msg_hdr resp; /* The expected response */ + struct gdma_dev_id dev_id; + u32 activity_id; +}; /* HW DATA */ + +struct gdma_resp_hdr { + struct gdma_msg_hdr response; + struct gdma_dev_id dev_id; + u32 activity_id; + u32 status; + u32 reserved; +}; /* HW DATA */ + +struct gdma_general_req { + struct gdma_req_hdr hdr; +}; /* HW DATA */ + +#define GDMA_MESSAGE_V1 1 + +struct gdma_general_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +#define GDMA_STANDARD_HEADER_TYPE 0 + +static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, + u32 req_size, u32 resp_size) +{ + hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; + hdr->req.msg_type = code; + hdr->req.msg_version = GDMA_MESSAGE_V1; + hdr->req.msg_size = req_size; + + hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; + hdr->resp.msg_type = code; + hdr->resp.msg_version = GDMA_MESSAGE_V1; + hdr->resp.msg_size = resp_size; +} + +/* The 16-byte struct is part of the GDMA work queue entry (WQE). */ +struct gdma_sge { + u64 address; + u32 mem_key; + u32 size; +}; /* HW DATA */ + +struct gdma_wqe_request { + struct gdma_sge *sgl; + u32 num_sge; + + u32 inline_oob_size; + const void *inline_oob_data; + + u32 flags; + u32 client_data_unit; +}; + +enum gdma_page_type { + GDMA_PAGE_TYPE_4K, +}; + +#define GDMA_INVALID_DMA_REGION 0 + +struct gdma_mem_info { + struct device *dev; + + dma_addr_t dma_handle; + void *virt_addr; + u64 length; + + /* Allocated by the PF driver */ + u64 gdma_region; +}; + +#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 + +struct gdma_dev { + struct gdma_context *gdma_context; + + struct gdma_dev_id dev_id; + + u32 pdid; + u32 doorbell; + u32 gpa_mkey; + + /* GDMA driver specific pointer */ + void *driver_data; +}; + +#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE + +#define GDMA_CQE_SIZE 64 +#define GDMA_EQE_SIZE 16 +#define GDMA_MAX_SQE_SIZE 512 +#define GDMA_MAX_RQE_SIZE 256 + +#define GDMA_COMP_DATA_SIZE 0x3C + +#define GDMA_EVENT_DATA_SIZE 0xC + +/* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ +#define GDMA_WQE_BU_SIZE 32 + +#define INVALID_PDID UINT_MAX +#define INVALID_DOORBELL UINT_MAX +#define INVALID_MEM_KEY UINT_MAX +#define INVALID_QUEUE_ID UINT_MAX +#define INVALID_PCI_MSIX_INDEX UINT_MAX + +struct gdma_comp { + u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; + u32 wq_num; + bool is_sq; +}; + +struct gdma_event { + u32 details[GDMA_EVENT_DATA_SIZE / 4]; + u8 type; +}; + +struct gdma_queue; + +#define CQE_POLLING_BUFFER 512 +struct mana_eq { + struct gdma_queue *eq; + struct gdma_comp cqe_poll[CQE_POLLING_BUFFER]; +}; + +typedef void gdma_eq_callback(void *context, struct gdma_queue *q, + struct gdma_event *e); + +typedef void gdma_cq_callback(void *context, struct gdma_queue *q); + +/* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE + * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the + * driver increases the 'head' in BUs rather than in bytes, and notifies + * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track + * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. + * + * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is + * processed, the driver increases the 'tail' to indicate that WQEs have + * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. + * + * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures + * that the EQ/CQ is big enough so they can't overflow, and the driver uses + * the owner bits mechanism to detect if the queue has become empty. + */ +struct gdma_queue { + struct gdma_dev *gdma_dev; + + enum gdma_queue_type type; + u32 id; + + struct gdma_mem_info mem_info; + + void *queue_mem_ptr; + u32 queue_size; + + bool monitor_avl_buf; + + u32 head; + u32 tail; + + /* Extra fields specific to EQ/CQ. */ + union { + struct { + bool disable_needed; + + gdma_eq_callback *callback; + void *context; + + unsigned int msix_index; + + u32 log2_throttle_limit; + + /* NAPI data */ + struct napi_struct napi; + int work_done; + int budget; + } eq; + + struct { + gdma_cq_callback *callback; + void *context; + + struct gdma_queue *parent; /* For CQ/EQ relationship */ + } cq; + }; +}; + +struct gdma_queue_spec { + enum gdma_queue_type type; + bool monitor_avl_buf; + unsigned int queue_size; + + /* Extra fields specific to EQ/CQ. */ + union { + struct { + gdma_eq_callback *callback; + void *context; + + unsigned long log2_throttle_limit; + + /* Only used by the MANA device. */ + struct net_device *ndev; + } eq; + + struct { + gdma_cq_callback *callback; + void *context; + + struct gdma_queue *parent_eq; + + } cq; + }; +}; + +struct gdma_irq_context { + void (*handler)(void *arg); + void *arg; +}; + +struct gdma_context { + struct device *dev; + + /* Per-vPort max number of queues */ + unsigned int max_num_queues; + unsigned int max_num_msix; + unsigned int num_msix_usable; + struct gdma_resource msix_resource; + struct gdma_irq_context *irq_contexts; + + /* This maps a CQ index to the queue structure. */ + unsigned int max_num_cqs; + struct gdma_queue **cq_table; + + /* Protect eq_test_event and test_event_eq_id */ + struct mutex eq_test_event_mutex; + struct completion eq_test_event; + u32 test_event_eq_id; + + void __iomem *bar0_va; + void __iomem *shm_base; + void __iomem *db_page_base; + u32 db_page_size; + + /* Shared memory chanenl (used to bootstrap HWC) */ + struct shm_channel shm_channel; + + /* Hardware communication channel (HWC) */ + struct gdma_dev hwc; + + /* Azure network adapter */ + struct gdma_dev mana; +}; + +#define MAX_NUM_GDMA_DEVICES 4 + +static inline bool mana_gd_is_mana(struct gdma_dev *gd) +{ + return gd->dev_id.type == GDMA_DEVICE_MANA; +} + +static inline bool mana_gd_is_hwc(struct gdma_dev *gd) +{ + return gd->dev_id.type == GDMA_DEVICE_HWC; +} + +u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); +u32 mana_gd_wq_avail_space(struct gdma_queue *wq); + +int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); + +int mana_gd_create_hwc_queue(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr); + +int mana_gd_create_mana_eq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr); + +int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr); + +void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); + +int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); + +void mana_gd_arm_cq(struct gdma_queue *cq); + +struct gdma_wqe { + u32 reserved :24; + u32 last_vbytes :8; + + union { + u32 flags; + + struct { + u32 num_sge :8; + u32 inline_oob_size_div4:3; + u32 client_oob_in_sgl :1; + u32 reserved1 :4; + u32 client_data_unit :14; + u32 reserved2 :2; + }; + }; +}; /* HW DATA */ + +#define INLINE_OOB_SMALL_SIZE 8 +#define INLINE_OOB_LARGE_SIZE 24 + +#define MAX_TX_WQE_SIZE 512 +#define MAX_RX_WQE_SIZE 256 + +struct gdma_cqe { + u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; + + union { + u32 as_uint32; + + struct { + u32 wq_num : 24; + u32 is_sq : 1; + u32 reserved : 4; + u32 owner_bits : 3; + }; + } cqe_info; +}; /* HW DATA */ + +#define GDMA_CQE_OWNER_BITS 3 + +#define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) + +#define SET_ARM_BIT 1 + +#define GDMA_EQE_OWNER_BITS 3 + +union gdma_eqe_info { + u32 as_uint32; + + struct { + u32 type : 8; + u32 reserved1 : 8; + u32 client_id : 2; + u32 reserved2 : 11; + u32 owner_bits : 3; + }; +}; /* HW DATA */ + +#define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) +#define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) + +struct gdma_eqe { + u32 details[GDMA_EVENT_DATA_SIZE / 4]; + u32 eqe_info; +}; /* HW DATA */ + +#define GDMA_REG_DB_PAGE_OFFSET 8 +#define GDMA_REG_DB_PAGE_SIZE 0x10 +#define GDMA_REG_SHM_OFFSET 0x18 + +struct gdma_posted_wqe_info { + u32 wqe_size_in_bu; +}; + +/* GDMA_GENERATE_TEST_EQE */ +struct gdma_generate_test_event_req { + struct gdma_req_hdr hdr; + u32 queue_index; +}; /* HW DATA */ + +/* GDMA_VERIFY_VF_DRIVER_VERSION */ +enum { + GDMA_PROTOCOL_V1 = 1, + GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, + GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, +}; + +struct gdma_verify_ver_req { + struct gdma_req_hdr hdr; + + /* Mandatory fields required for protocol establishment */ + u64 protocol_ver_min; + u64 protocol_ver_max; + u64 drv_cap_flags1; + u64 drv_cap_flags2; + u64 drv_cap_flags3; + u64 drv_cap_flags4; + + /* Advisory fields */ + u64 drv_ver; + u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ + u32 reserved; + u32 os_ver_major; + u32 os_ver_minor; + u32 os_ver_build; + u32 os_ver_platform; + u64 reserved_2; + u8 os_ver_str1[128]; + u8 os_ver_str2[128]; + u8 os_ver_str3[128]; + u8 os_ver_str4[128]; +}; /* HW DATA */ + +struct gdma_verify_ver_resp { + struct gdma_resp_hdr hdr; + u64 gdma_protocol_ver; + u64 pf_cap_flags1; + u64 pf_cap_flags2; + u64 pf_cap_flags3; + u64 pf_cap_flags4; +}; /* HW DATA */ + +/* GDMA_QUERY_MAX_RESOURCES */ +struct gdma_query_max_resources_resp { + struct gdma_resp_hdr hdr; + u32 status; + u32 max_sq; + u32 max_rq; + u32 max_cq; + u32 max_eq; + u32 max_db; + u32 max_mst; + u32 max_cq_mod_ctx; + u32 max_mod_cq; + u32 max_msix; +}; /* HW DATA */ + +/* GDMA_LIST_DEVICES */ +struct gdma_list_devices_resp { + struct gdma_resp_hdr hdr; + u32 num_of_devs; + u32 reserved; + struct gdma_dev_id devs[64]; +}; /* HW DATA */ + +/* GDMA_REGISTER_DEVICE */ +struct gdma_register_device_resp { + struct gdma_resp_hdr hdr; + u32 pdid; + u32 gpa_mkey; + u32 db_id; +}; /* HW DATA */ + +/* GDMA_CREATE_QUEUE */ +struct gdma_create_queue_req { + struct gdma_req_hdr hdr; + u32 type; + u32 reserved1; + u32 pdid; + u32 doolbell_id; + u64 gdma_region; + u32 reserved2; + u32 queue_size; + u32 log2_throttle_limit; + u32 eq_pci_msix_index; + u32 cq_mod_ctx_id; + u32 cq_parent_eq_id; + u8 rq_drop_on_overrun; + u8 rq_err_on_wqe_overflow; + u8 rq_chain_rec_wqes; + u8 sq_hw_db; + u32 reserved3; +}; /* HW DATA */ + +struct gdma_create_queue_resp { + struct gdma_resp_hdr hdr; + u32 queue_index; +}; /* HW DATA */ + +/* GDMA_DISABLE_QUEUE */ +struct gdma_disable_queue_req { + struct gdma_req_hdr hdr; + u32 type; + u32 queue_index; + u32 alloc_res_id_on_creation; +}; /* HW DATA */ + +/* GDMA_CREATE_DMA_REGION */ +struct gdma_create_dma_region_req { + struct gdma_req_hdr hdr; + + /* The total size of the DMA region */ + u64 length; + + /* The offset in the first page */ + u32 offset_in_page; + + /* enum gdma_page_type */ + u32 gdma_page_type; + + /* The total number of pages */ + u32 page_count; + + /* If page_addr_list_len is smaller than page_count, + * the remaining page addresses will be added via the + * message GDMA_DMA_REGION_ADD_PAGES. + */ + u32 page_addr_list_len; + u64 page_addr_list[]; +}; /* HW DATA */ + +struct gdma_create_dma_region_resp { + struct gdma_resp_hdr hdr; + u64 gdma_region; +}; /* HW DATA */ + +/* GDMA_DMA_REGION_ADD_PAGES */ +struct gdma_dma_region_add_pages_req { + struct gdma_req_hdr hdr; + + u64 gdma_region; + + u32 page_addr_list_len; + u32 reserved3; + + u64 page_addr_list[]; +}; /* HW DATA */ + +/* GDMA_DESTROY_DMA_REGION */ +struct gdma_destroy_dma_region_req { + struct gdma_req_hdr hdr; + + u64 gdma_region; +}; /* HW DATA */ + +int mana_gd_verify_vf_version(struct pci_dev *pdev); + +int mana_gd_register_device(struct gdma_dev *gd); +int mana_gd_deregister_device(struct gdma_dev *gd); + +int mana_gd_post_work_request(struct gdma_queue *wq, + const struct gdma_wqe_request *wqe_req, + struct gdma_posted_wqe_info *wqe_info); + +int mana_gd_post_and_ring(struct gdma_queue *queue, + const struct gdma_wqe_request *wqe, + struct gdma_posted_wqe_info *wqe_info); + +int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); +void mana_gd_free_res_map(struct gdma_resource *r); + +void mana_gd_wq_ring_doorbell(struct gdma_context *gc, + struct gdma_queue *queue); + +int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, + struct gdma_mem_info *gmi); + +void mana_gd_free_memory(struct gdma_mem_info *gmi); + +int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, + u32 resp_len, void *resp); +#endif /* _GDMA_H */ diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c new file mode 100644 index 000000000000..2f87bf90f8ec --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -0,0 +1,1415 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2021, Microsoft Corporation. */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "mana.h" + +static u32 mana_gd_r32(struct gdma_context *g, u64 offset) +{ + return readl(g->bar0_va + offset); +} + +static u64 mana_gd_r64(struct gdma_context *g, u64 offset) +{ + return readq(g->bar0_va + offset); +} + +static void mana_gd_init_registers(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + + gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF; + + gc->db_page_base = gc->bar0_va + + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET); + + gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET); +} + +static int mana_gd_query_max_resources(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_query_max_resources_resp resp = {}; + struct gdma_general_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES, + sizeof(req), sizeof(resp)); + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n", + err, resp.hdr.status); + return err ? err : -EPROTO; + } + + if (gc->num_msix_usable > resp.max_msix) + gc->num_msix_usable = resp.max_msix; + + if (gc->num_msix_usable <= 1) + return -ENOSPC; + + gc->max_num_queues = num_online_cpus(); + if (gc->max_num_queues > MANA_MAX_NUM_QUEUES) + gc->max_num_queues = MANA_MAX_NUM_QUEUES; + + if (gc->max_num_queues > resp.max_eq) + gc->max_num_queues = resp.max_eq; + + if (gc->max_num_queues > resp.max_cq) + gc->max_num_queues = resp.max_cq; + + if (gc->max_num_queues > resp.max_sq) + gc->max_num_queues = resp.max_sq; + + if (gc->max_num_queues > resp.max_rq) + gc->max_num_queues = resp.max_rq; + + return 0; +} + +static int mana_gd_detect_devices(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_list_devices_resp resp = {}; + struct gdma_general_req req = {}; + struct gdma_dev_id dev; + u32 i, max_num_devs; + u16 dev_type; + int err; + + mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req), + sizeof(resp)); + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err, + resp.hdr.status); + return err ? err : -EPROTO; + } + + max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs); + + for (i = 0; i < max_num_devs; i++) { + dev = resp.devs[i]; + dev_type = dev.type; + + /* HWC is already detected in mana_hwc_create_channel(). */ + if (dev_type == GDMA_DEVICE_HWC) + continue; + + if (dev_type == GDMA_DEVICE_MANA) { + gc->mana.gdma_context = gc; + gc->mana.dev_id = dev; + } + } + + return gc->mana.dev_id.type == 0 ? -ENODEV : 0; +} + +int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, + u32 resp_len, void *resp) +{ + struct hw_channel_context *hwc = gc->hwc.driver_data; + + return mana_hwc_send_request(hwc, req_len, req, resp_len, resp); +} + +int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, + struct gdma_mem_info *gmi) +{ + dma_addr_t dma_handle; + void *buf; + + if (length < PAGE_SIZE || !is_power_of_2(length)) + return -EINVAL; + + gmi->dev = gc->dev; + buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + gmi->dma_handle = dma_handle; + gmi->virt_addr = buf; + gmi->length = length; + + return 0; +} + +void mana_gd_free_memory(struct gdma_mem_info *gmi) +{ + dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr, + gmi->dma_handle); +} + +static int mana_gd_create_hw_eq(struct gdma_context *gc, + struct gdma_queue *queue) +{ + struct gdma_create_queue_resp resp = {}; + struct gdma_create_queue_req req = {}; + int err; + + if (queue->type != GDMA_EQ) + return -EINVAL; + + mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE, + sizeof(req), sizeof(resp)); + + req.hdr.dev_id = queue->gdma_dev->dev_id; + req.type = queue->type; + req.pdid = queue->gdma_dev->pdid; + req.doolbell_id = queue->gdma_dev->doorbell; + req.gdma_region = queue->mem_info.gdma_region; + req.queue_size = queue->queue_size; + req.log2_throttle_limit = queue->eq.log2_throttle_limit; + req.eq_pci_msix_index = queue->eq.msix_index; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err, + resp.hdr.status); + return err ? err : -EPROTO; + } + + queue->id = resp.queue_index; + queue->eq.disable_needed = true; + queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + return 0; +} + +static int mana_gd_disable_queue(struct gdma_queue *queue) +{ + struct gdma_context *gc = queue->gdma_dev->gdma_context; + struct gdma_disable_queue_req req = {}; + struct gdma_general_resp resp = {}; + int err; + + WARN_ON(queue->type != GDMA_EQ); + + mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE, + sizeof(req), sizeof(resp)); + + req.hdr.dev_id = queue->gdma_dev->dev_id; + req.type = queue->type; + req.queue_index = queue->id; + req.alloc_res_id_on_creation = 1; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err, + resp.hdr.status); + return err ? err : -EPROTO; + } + + return 0; +} + +#define DOORBELL_OFFSET_SQ 0x0 +#define DOORBELL_OFFSET_RQ 0x400 +#define DOORBELL_OFFSET_CQ 0x800 +#define DOORBELL_OFFSET_EQ 0xFF8 + +static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index, + enum gdma_queue_type q_type, u32 qid, + u32 tail_ptr, u8 num_req) +{ + void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index; + union gdma_doorbell_entry e = {}; + + switch (q_type) { + case GDMA_EQ: + e.eq.id = qid; + e.eq.tail_ptr = tail_ptr; + e.eq.arm = num_req; + + addr += DOORBELL_OFFSET_EQ; + break; + + case GDMA_CQ: + e.cq.id = qid; + e.cq.tail_ptr = tail_ptr; + e.cq.arm = num_req; + + addr += DOORBELL_OFFSET_CQ; + break; + + case GDMA_RQ: + e.rq.id = qid; + e.rq.tail_ptr = tail_ptr; + e.rq.wqe_cnt = num_req; + + addr += DOORBELL_OFFSET_RQ; + break; + + case GDMA_SQ: + e.sq.id = qid; + e.sq.tail_ptr = tail_ptr; + + addr += DOORBELL_OFFSET_SQ; + break; + + default: + WARN_ON(1); + return; + } + + /* Ensure all writes are done before ring doorbell */ + wmb(); + + writeq(e.as_uint64, addr); +} + +void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue) +{ + mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type, + queue->id, queue->head * GDMA_WQE_BU_SIZE, 1); +} + +void mana_gd_arm_cq(struct gdma_queue *cq) +{ + struct gdma_context *gc = cq->gdma_dev->gdma_context; + + u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE; + + u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS); + + mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id, + head, SET_ARM_BIT); +} + +static void mana_gd_process_eqe(struct gdma_queue *eq) +{ + u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE); + struct gdma_context *gc = eq->gdma_dev->gdma_context; + struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr; + union gdma_eqe_info eqe_info; + enum gdma_eqe_type type; + struct gdma_event event; + struct gdma_queue *cq; + struct gdma_eqe *eqe; + u32 cq_id; + + eqe = &eq_eqe_ptr[head]; + eqe_info.as_uint32 = eqe->eqe_info; + type = eqe_info.type; + + switch (type) { + case GDMA_EQE_COMPLETION: + cq_id = eqe->details[0] & 0xFFFFFF; + if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs)) + break; + + cq = gc->cq_table[cq_id]; + if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id)) + break; + + if (cq->cq.callback) + cq->cq.callback(cq->cq.context, cq); + + break; + + case GDMA_EQE_TEST_EVENT: + gc->test_event_eq_id = eq->id; + complete(&gc->eq_test_event); + break; + + case GDMA_EQE_HWC_INIT_EQ_ID_DB: + case GDMA_EQE_HWC_INIT_DATA: + case GDMA_EQE_HWC_INIT_DONE: + if (!eq->eq.callback) + break; + + event.type = type; + memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE); + eq->eq.callback(eq->eq.context, eq, &event); + break; + + default: + break; + } +} + +static void mana_gd_process_eq_events(void *arg) +{ + u32 owner_bits, new_bits, old_bits; + union gdma_eqe_info eqe_info; + struct gdma_eqe *eq_eqe_ptr; + struct gdma_queue *eq = arg; + struct gdma_context *gc; + struct gdma_eqe *eqe; + unsigned int arm_bit; + u32 head, num_eqe; + int i; + + gc = eq->gdma_dev->gdma_context; + + num_eqe = eq->queue_size / GDMA_EQE_SIZE; + eq_eqe_ptr = eq->queue_mem_ptr; + + /* Process up to 5 EQEs at a time, and update the HW head. */ + for (i = 0; i < 5; i++) { + eqe = &eq_eqe_ptr[eq->head % num_eqe]; + eqe_info.as_uint32 = eqe->eqe_info; + owner_bits = eqe_info.owner_bits; + + old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK; + /* No more entries */ + if (owner_bits == old_bits) + break; + + new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK; + if (owner_bits != new_bits) { + dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id); + break; + } + + mana_gd_process_eqe(eq); + + eq->head++; + } + + /* Always rearm the EQ for HWC. For MANA, rearm it when NAPI is done. */ + if (mana_gd_is_hwc(eq->gdma_dev)) { + arm_bit = SET_ARM_BIT; + } else if (eq->eq.work_done < eq->eq.budget && + napi_complete_done(&eq->eq.napi, eq->eq.work_done)) { + arm_bit = SET_ARM_BIT; + } else { + arm_bit = 0; + } + + head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS); + + mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id, + head, arm_bit); +} + +static int mana_poll(struct napi_struct *napi, int budget) +{ + struct gdma_queue *eq = container_of(napi, struct gdma_queue, eq.napi); + + eq->eq.work_done = 0; + eq->eq.budget = budget; + + mana_gd_process_eq_events(eq); + + return min(eq->eq.work_done, budget); +} + +static void mana_gd_schedule_napi(void *arg) +{ + struct gdma_queue *eq = arg; + struct napi_struct *napi; + + napi = &eq->eq.napi; + napi_schedule_irqoff(napi); +} + +static int mana_gd_register_irq(struct gdma_queue *queue, + const struct gdma_queue_spec *spec) +{ + struct gdma_dev *gd = queue->gdma_dev; + bool is_mana = mana_gd_is_mana(gd); + struct gdma_irq_context *gic; + struct gdma_context *gc; + struct gdma_resource *r; + unsigned int msi_index; + unsigned long flags; + int err; + + gc = gd->gdma_context; + r = &gc->msix_resource; + + spin_lock_irqsave(&r->lock, flags); + + msi_index = find_first_zero_bit(r->map, r->size); + if (msi_index >= r->size) { + err = -ENOSPC; + } else { + bitmap_set(r->map, msi_index, 1); + queue->eq.msix_index = msi_index; + err = 0; + } + + spin_unlock_irqrestore(&r->lock, flags); + + if (err) + return err; + + WARN_ON(msi_index >= gc->num_msix_usable); + + gic = &gc->irq_contexts[msi_index]; + + if (is_mana) { + netif_napi_add(spec->eq.ndev, &queue->eq.napi, mana_poll, + NAPI_POLL_WEIGHT); + napi_enable(&queue->eq.napi); + } + + WARN_ON(gic->handler || gic->arg); + + gic->arg = queue; + + if (is_mana) + gic->handler = mana_gd_schedule_napi; + else + gic->handler = mana_gd_process_eq_events; + + return 0; +} + +static void mana_gd_deregiser_irq(struct gdma_queue *queue) +{ + struct gdma_dev *gd = queue->gdma_dev; + struct gdma_irq_context *gic; + struct gdma_context *gc; + struct gdma_resource *r; + unsigned int msix_index; + unsigned long flags; + + gc = gd->gdma_context; + r = &gc->msix_resource; + + /* At most num_online_cpus() + 1 interrupts are used. */ + msix_index = queue->eq.msix_index; + if (WARN_ON(msix_index >= gc->num_msix_usable)) + return; + + gic = &gc->irq_contexts[msix_index]; + gic->handler = NULL; + gic->arg = NULL; + + spin_lock_irqsave(&r->lock, flags); + bitmap_clear(r->map, msix_index, 1); + spin_unlock_irqrestore(&r->lock, flags); + + queue->eq.msix_index = INVALID_PCI_MSIX_INDEX; +} + +int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq) +{ + struct gdma_generate_test_event_req req = {}; + struct gdma_general_resp resp = {}; + struct device *dev = gc->dev; + int err; + + mutex_lock(&gc->eq_test_event_mutex); + + init_completion(&gc->eq_test_event); + gc->test_event_eq_id = INVALID_QUEUE_ID; + + mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE, + sizeof(req), sizeof(resp)); + + req.hdr.dev_id = eq->gdma_dev->dev_id; + req.queue_index = eq->id; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err) { + dev_err(dev, "test_eq failed: %d\n", err); + goto out; + } + + err = -EPROTO; + + if (resp.hdr.status) { + dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status); + goto out; + } + + if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) { + dev_err(dev, "test_eq timed out on queue %d\n", eq->id); + goto out; + } + + if (eq->id != gc->test_event_eq_id) { + dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n", + gc->test_event_eq_id, eq->id); + goto out; + } + + err = 0; +out: + mutex_unlock(&gc->eq_test_event_mutex); + return err; +} + +static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets, + struct gdma_queue *queue) +{ + int err; + + if (flush_evenets) { + err = mana_gd_test_eq(gc, queue); + if (err) + dev_warn(gc->dev, "Failed to flush EQ: %d\n", err); + } + + mana_gd_deregiser_irq(queue); + + if (mana_gd_is_mana(queue->gdma_dev)) { + napi_disable(&queue->eq.napi); + netif_napi_del(&queue->eq.napi); + } + + if (queue->eq.disable_needed) + mana_gd_disable_queue(queue); +} + +static int mana_gd_create_eq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + bool create_hwq, struct gdma_queue *queue) +{ + struct gdma_context *gc = gd->gdma_context; + struct device *dev = gc->dev; + u32 log2_num_entries; + int err; + + queue->eq.msix_index = INVALID_PCI_MSIX_INDEX; + + log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE); + + if (spec->eq.log2_throttle_limit > log2_num_entries) { + dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n", + spec->eq.log2_throttle_limit, log2_num_entries); + return -EINVAL; + } + + err = mana_gd_register_irq(queue, spec); + if (err) { + dev_err(dev, "Failed to register irq: %d\n", err); + return err; + } + + queue->eq.callback = spec->eq.callback; + queue->eq.context = spec->eq.context; + queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries); + queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1; + + if (create_hwq) { + err = mana_gd_create_hw_eq(gc, queue); + if (err) + goto out; + + err = mana_gd_test_eq(gc, queue); + if (err) + goto out; + } + + return 0; +out: + dev_err(dev, "Failed to create EQ: %d\n", err); + mana_gd_destroy_eq(gc, false, queue); + return err; +} + +static void mana_gd_create_cq(const struct gdma_queue_spec *spec, + struct gdma_queue *queue) +{ + u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE); + + queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries); + queue->cq.parent = spec->cq.parent_eq; + queue->cq.context = spec->cq.context; + queue->cq.callback = spec->cq.callback; +} + +static void mana_gd_destroy_cq(struct gdma_context *gc, + struct gdma_queue *queue) +{ + u32 id = queue->id; + + if (id >= gc->max_num_cqs) + return; + + if (!gc->cq_table[id]) + return; + + gc->cq_table[id] = NULL; +} + +int mana_gd_create_hwc_queue(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr) +{ + struct gdma_context *gc = gd->gdma_context; + struct gdma_mem_info *gmi; + struct gdma_queue *queue; + int err; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + return -ENOMEM; + + gmi = &queue->mem_info; + err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); + if (err) + goto free_q; + + queue->head = 0; + queue->tail = 0; + queue->queue_mem_ptr = gmi->virt_addr; + queue->queue_size = spec->queue_size; + queue->monitor_avl_buf = spec->monitor_avl_buf; + queue->type = spec->type; + queue->gdma_dev = gd; + + if (spec->type == GDMA_EQ) + err = mana_gd_create_eq(gd, spec, false, queue); + else if (spec->type == GDMA_CQ) + mana_gd_create_cq(spec, queue); + + if (err) + goto out; + + *queue_ptr = queue; + return 0; +out: + mana_gd_free_memory(gmi); +free_q: + kfree(queue); + return err; +} + +static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region) +{ + struct gdma_destroy_dma_region_req req = {}; + struct gdma_general_resp resp = {}; + int err; + + if (gdma_region == GDMA_INVALID_DMA_REGION) + return; + + mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req), + sizeof(resp)); + req.gdma_region = gdma_region; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) + dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n", + err, resp.hdr.status); +} + +static int mana_gd_create_dma_region(struct gdma_dev *gd, + struct gdma_mem_info *gmi) +{ + unsigned int num_page = gmi->length / PAGE_SIZE; + struct gdma_create_dma_region_req *req = NULL; + struct gdma_create_dma_region_resp resp = {}; + struct gdma_context *gc = gd->gdma_context; + struct hw_channel_context *hwc; + u32 length = gmi->length; + u32 req_msg_size; + int err; + int i; + + if (length < PAGE_SIZE || !is_power_of_2(length)) + return -EINVAL; + + if (offset_in_page(gmi->virt_addr) != 0) + return -EINVAL; + + hwc = gc->hwc.driver_data; + req_msg_size = sizeof(*req) + num_page * sizeof(u64); + if (req_msg_size > hwc->max_req_msg_size) + return -EINVAL; + + req = kzalloc(req_msg_size, GFP_KERNEL); + if (!req) + return -ENOMEM; + + mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION, + req_msg_size, sizeof(resp)); + req->length = length; + req->offset_in_page = 0; + req->gdma_page_type = GDMA_PAGE_TYPE_4K; + req->page_count = num_page; + req->page_addr_list_len = num_page; + + for (i = 0; i < num_page; i++) + req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE; + + err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp); + if (err) + goto out; + + if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) { + dev_err(gc->dev, "Failed to create DMA region: 0x%x\n", + resp.hdr.status); + err = -EPROTO; + goto out; + } + + gmi->gdma_region = resp.gdma_region; +out: + kfree(req); + return err; +} + +int mana_gd_create_mana_eq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr) +{ + struct gdma_context *gc = gd->gdma_context; + struct gdma_mem_info *gmi; + struct gdma_queue *queue; + int err; + + if (spec->type != GDMA_EQ) + return -EINVAL; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + return -ENOMEM; + + gmi = &queue->mem_info; + err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); + if (err) + goto free_q; + + err = mana_gd_create_dma_region(gd, gmi); + if (err) + goto out; + + queue->head = 0; + queue->tail = 0; + queue->queue_mem_ptr = gmi->virt_addr; + queue->queue_size = spec->queue_size; + queue->monitor_avl_buf = spec->monitor_avl_buf; + queue->type = spec->type; + queue->gdma_dev = gd; + + err = mana_gd_create_eq(gd, spec, true, queue); + if (err) + goto out; + + *queue_ptr = queue; + return 0; +out: + mana_gd_free_memory(gmi); +free_q: + kfree(queue); + return err; +} + +int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr) +{ + struct gdma_context *gc = gd->gdma_context; + struct gdma_mem_info *gmi; + struct gdma_queue *queue; + int err; + + if (spec->type != GDMA_CQ && spec->type != GDMA_SQ && + spec->type != GDMA_RQ) + return -EINVAL; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + return -ENOMEM; + + gmi = &queue->mem_info; + err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); + if (err) + goto free_q; + + err = mana_gd_create_dma_region(gd, gmi); + if (err) + goto out; + + queue->head = 0; + queue->tail = 0; + queue->queue_mem_ptr = gmi->virt_addr; + queue->queue_size = spec->queue_size; + queue->monitor_avl_buf = spec->monitor_avl_buf; + queue->type = spec->type; + queue->gdma_dev = gd; + + if (spec->type == GDMA_CQ) + mana_gd_create_cq(spec, queue); + + *queue_ptr = queue; + return 0; +out: + mana_gd_free_memory(gmi); +free_q: + kfree(queue); + return err; +} + +void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue) +{ + struct gdma_mem_info *gmi = &queue->mem_info; + + switch (queue->type) { + case GDMA_EQ: + mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue); + break; + + case GDMA_CQ: + mana_gd_destroy_cq(gc, queue); + break; + + case GDMA_RQ: + break; + + case GDMA_SQ: + break; + + default: + dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n", + queue->type); + return; + } + + mana_gd_destroy_dma_region(gc, gmi->gdma_region); + mana_gd_free_memory(gmi); + kfree(queue); +} + +int mana_gd_verify_vf_version(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_verify_ver_resp resp = {}; + struct gdma_verify_ver_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION, + sizeof(req), sizeof(resp)); + + req.protocol_ver_min = GDMA_PROTOCOL_FIRST; + req.protocol_ver_max = GDMA_PROTOCOL_LAST; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n", + err, resp.hdr.status); + return err ? err : -EPROTO; + } + + return 0; +} + +int mana_gd_register_device(struct gdma_dev *gd) +{ + struct gdma_context *gc = gd->gdma_context; + struct gdma_register_device_resp resp = {}; + struct gdma_general_req req = {}; + int err; + + gd->pdid = INVALID_PDID; + gd->doorbell = INVALID_DOORBELL; + gd->gpa_mkey = INVALID_MEM_KEY; + + mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req), + sizeof(resp)); + + req.hdr.dev_id = gd->dev_id; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n", + err, resp.hdr.status); + return err ? err : -EPROTO; + } + + gd->pdid = resp.pdid; + gd->gpa_mkey = resp.gpa_mkey; + gd->doorbell = resp.db_id; + + return 0; +} + +int mana_gd_deregister_device(struct gdma_dev *gd) +{ + struct gdma_context *gc = gd->gdma_context; + struct gdma_general_resp resp = {}; + struct gdma_general_req req = {}; + int err; + + if (gd->pdid == INVALID_PDID) + return -EINVAL; + + mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req), + sizeof(resp)); + + req.hdr.dev_id = gd->dev_id; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) { + dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n", + err, resp.hdr.status); + if (!err) + err = -EPROTO; + } + + gd->pdid = INVALID_PDID; + gd->doorbell = INVALID_DOORBELL; + gd->gpa_mkey = INVALID_MEM_KEY; + + return err; +} + +u32 mana_gd_wq_avail_space(struct gdma_queue *wq) +{ + u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE; + u32 wq_size = wq->queue_size; + + WARN_ON_ONCE(used_space > wq_size); + + return wq_size - used_space; +} + +u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset) +{ + u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1); + + WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size); + + return wq->queue_mem_ptr + offset; +} + +static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req, + enum gdma_queue_type q_type, + u32 client_oob_size, u32 sgl_data_size, + u8 *wqe_ptr) +{ + bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL); + bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0); + struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr; + u8 *ptr; + + memset(header, 0, sizeof(struct gdma_wqe)); + header->num_sge = wqe_req->num_sge; + header->inline_oob_size_div4 = client_oob_size / sizeof(u32); + + if (oob_in_sgl) { + WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2); + + header->client_oob_in_sgl = 1; + + if (pad_data) + header->last_vbytes = wqe_req->sgl[0].size; + } + + if (q_type == GDMA_SQ) + header->client_data_unit = wqe_req->client_data_unit; + + /* The size of gdma_wqe + client_oob_size must be less than or equal + * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond + * the queue memory buffer boundary. + */ + ptr = wqe_ptr + sizeof(header); + + if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) { + memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size); + + if (client_oob_size > wqe_req->inline_oob_size) + memset(ptr + wqe_req->inline_oob_size, 0, + client_oob_size - wqe_req->inline_oob_size); + } + + return sizeof(header) + client_oob_size; +} + +static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr, + const struct gdma_wqe_request *wqe_req) +{ + u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge; + const u8 *address = (u8 *)wqe_req->sgl; + u8 *base_ptr, *end_ptr; + u32 size_to_end; + + base_ptr = wq->queue_mem_ptr; + end_ptr = base_ptr + wq->queue_size; + size_to_end = (u32)(end_ptr - wqe_ptr); + + if (size_to_end < sgl_size) { + memcpy(wqe_ptr, address, size_to_end); + + wqe_ptr = base_ptr; + address += size_to_end; + sgl_size -= size_to_end; + } + + memcpy(wqe_ptr, address, sgl_size); +} + +int mana_gd_post_work_request(struct gdma_queue *wq, + const struct gdma_wqe_request *wqe_req, + struct gdma_posted_wqe_info *wqe_info) +{ + u32 client_oob_size = wqe_req->inline_oob_size; + struct gdma_context *gc; + u32 sgl_data_size; + u32 max_wqe_size; + u32 wqe_size; + u8 *wqe_ptr; + + if (wqe_req->num_sge == 0) + return -EINVAL; + + if (wq->type == GDMA_RQ) { + if (client_oob_size != 0) + return -EINVAL; + + client_oob_size = INLINE_OOB_SMALL_SIZE; + + max_wqe_size = GDMA_MAX_RQE_SIZE; + } else { + if (client_oob_size != INLINE_OOB_SMALL_SIZE && + client_oob_size != INLINE_OOB_LARGE_SIZE) + return -EINVAL; + + max_wqe_size = GDMA_MAX_SQE_SIZE; + } + + sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge; + wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size + + sgl_data_size, GDMA_WQE_BU_SIZE); + if (wqe_size > max_wqe_size) + return -EINVAL; + + if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) { + gc = wq->gdma_dev->gdma_context; + dev_err(gc->dev, "unsuccessful flow control!\n"); + return -ENOSPC; + } + + if (wqe_info) + wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE; + + wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head); + wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size, + sgl_data_size, wqe_ptr); + if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size) + wqe_ptr -= wq->queue_size; + + mana_gd_write_sgl(wq, wqe_ptr, wqe_req); + + wq->head += wqe_size / GDMA_WQE_BU_SIZE; + + return 0; +} + +int mana_gd_post_and_ring(struct gdma_queue *queue, + const struct gdma_wqe_request *wqe_req, + struct gdma_posted_wqe_info *wqe_info) +{ + struct gdma_context *gc = queue->gdma_dev->gdma_context; + int err; + + err = mana_gd_post_work_request(queue, wqe_req, wqe_info); + if (err) + return err; + + mana_gd_wq_ring_doorbell(gc, queue); + + return 0; +} + +static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp) +{ + unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe); + struct gdma_cqe *cq_cqe = cq->queue_mem_ptr; + u32 owner_bits, new_bits, old_bits; + struct gdma_cqe *cqe; + + cqe = &cq_cqe[cq->head % num_cqe]; + owner_bits = cqe->cqe_info.owner_bits; + + old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK; + /* Return 0 if no more entries. */ + if (owner_bits == old_bits) + return 0; + + new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK; + /* Return -1 if overflow detected. */ + if (owner_bits != new_bits) + return -1; + + comp->wq_num = cqe->cqe_info.wq_num; + comp->is_sq = cqe->cqe_info.is_sq; + memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE); + + return 1; +} + +int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe) +{ + int cqe_idx; + int ret; + + for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) { + ret = mana_gd_read_cqe(cq, &comp[cqe_idx]); + + if (ret < 0) { + cq->head -= cqe_idx; + return ret; + } + + if (ret == 0) + break; + + cq->head++; + } + + return cqe_idx; +} + +static irqreturn_t mana_gd_intr(int irq, void *arg) +{ + struct gdma_irq_context *gic = arg; + + if (gic->handler) + gic->handler(gic->arg); + + return IRQ_HANDLED; +} + +int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r) +{ + r->map = bitmap_zalloc(res_avail, GFP_KERNEL); + if (!r->map) + return -ENOMEM; + + r->size = res_avail; + spin_lock_init(&r->lock); + + return 0; +} + +void mana_gd_free_res_map(struct gdma_resource *r) +{ + bitmap_free(r->map); + r->map = NULL; + r->size = 0; +} + +static int mana_gd_setup_irqs(struct pci_dev *pdev) +{ + unsigned int max_queues_per_port = num_online_cpus(); + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_irq_context *gic; + unsigned int max_irqs; + int nvec, irq; + int err, i, j; + + if (max_queues_per_port > MANA_MAX_NUM_QUEUES) + max_queues_per_port = MANA_MAX_NUM_QUEUES; + + max_irqs = max_queues_per_port * MAX_PORTS_IN_MANA_DEV; + + /* Need 1 interrupt for the Hardware communication Channel (HWC) */ + max_irqs++; + + nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX); + if (nvec < 0) + return nvec; + + gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context), + GFP_KERNEL); + if (!gc->irq_contexts) { + err = -ENOMEM; + goto free_irq_vector; + } + + for (i = 0; i < nvec; i++) { + gic = &gc->irq_contexts[i]; + gic->handler = NULL; + gic->arg = NULL; + + irq = pci_irq_vector(pdev, i); + if (irq < 0) { + err = irq; + goto free_irq; + } + + err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic); + if (err) + goto free_irq; + } + + err = mana_gd_alloc_res_map(nvec, &gc->msix_resource); + if (err) + goto free_irq; + + gc->max_num_msix = nvec; + gc->num_msix_usable = nvec; + + return 0; + +free_irq: + for (j = i - 1; j >= 0; j--) { + irq = pci_irq_vector(pdev, j); + gic = &gc->irq_contexts[j]; + free_irq(irq, gic); + } + + kfree(gc->irq_contexts); + gc->irq_contexts = NULL; +free_irq_vector: + pci_free_irq_vectors(pdev); + return err; +} + +static void mana_gd_remove_irqs(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_irq_context *gic; + int irq, i; + + if (gc->max_num_msix < 1) + return; + + mana_gd_free_res_map(&gc->msix_resource); + + for (i = 0; i < gc->max_num_msix; i++) { + irq = pci_irq_vector(pdev, i); + if (irq < 0) + continue; + + gic = &gc->irq_contexts[i]; + free_irq(irq, gic); + } + + pci_free_irq_vectors(pdev); + + gc->max_num_msix = 0; + gc->num_msix_usable = 0; + kfree(gc->irq_contexts); + gc->irq_contexts = NULL; +} + +static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct gdma_context *gc; + void __iomem *bar0_va; + int bar = 0; + int err; + + err = pci_enable_device(pdev); + if (err) + return -ENXIO; + + pci_set_master(pdev); + + err = pci_request_regions(pdev, "mana"); + if (err) + goto disable_dev; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + goto release_region; + + err = -ENOMEM; + gc = vzalloc(sizeof(*gc)); + if (!gc) + goto release_region; + + bar0_va = pci_iomap(pdev, bar, 0); + if (!bar0_va) + goto free_gc; + + gc->bar0_va = bar0_va; + gc->dev = &pdev->dev; + + pci_set_drvdata(pdev, gc); + + mana_gd_init_registers(pdev); + + mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base); + + err = mana_gd_setup_irqs(pdev); + if (err) + goto unmap_bar; + + mutex_init(&gc->eq_test_event_mutex); + + err = mana_hwc_create_channel(gc); + if (err) + goto remove_irq; + + err = mana_gd_verify_vf_version(pdev); + if (err) + goto remove_irq; + + err = mana_gd_query_max_resources(pdev); + if (err) + goto remove_irq; + + err = mana_gd_detect_devices(pdev); + if (err) + goto remove_irq; + + err = mana_probe(&gc->mana); + if (err) + goto clean_up_gdma; + + return 0; + +clean_up_gdma: + mana_hwc_destroy_channel(gc); + vfree(gc->cq_table); + gc->cq_table = NULL; +remove_irq: + mana_gd_remove_irqs(pdev); +unmap_bar: + pci_iounmap(pdev, bar0_va); +free_gc: + vfree(gc); +release_region: + pci_release_regions(pdev); +disable_dev: + pci_clear_master(pdev); + pci_disable_device(pdev); + dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err); + return err; +} + +static void mana_gd_remove(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + + mana_remove(&gc->mana); + + mana_hwc_destroy_channel(gc); + vfree(gc->cq_table); + gc->cq_table = NULL; + + mana_gd_remove_irqs(pdev); + + pci_iounmap(pdev, gc->bar0_va); + + vfree(gc); + + pci_release_regions(pdev); + pci_clear_master(pdev); + pci_disable_device(pdev); +} + +#ifndef PCI_VENDOR_ID_MICROSOFT +#define PCI_VENDOR_ID_MICROSOFT 0x1414 +#endif + +static const struct pci_device_id mana_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) }, + { } +}; + +static struct pci_driver mana_driver = { + .name = "mana", + .id_table = mana_id_table, + .probe = mana_gd_probe, + .remove = mana_gd_remove, +}; + +module_pci_driver(mana_driver); + +MODULE_DEVICE_TABLE(pci, mana_id_table); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver"); diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c new file mode 100644 index 000000000000..462bc577692a --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -0,0 +1,843 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2021, Microsoft Corporation. */ + +#include "gdma.h" +#include "hw_channel.h" + +static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id) +{ + struct gdma_resource *r = &hwc->inflight_msg_res; + unsigned long flags; + u32 index; + + down(&hwc->sema); + + spin_lock_irqsave(&r->lock, flags); + + index = find_first_zero_bit(hwc->inflight_msg_res.map, + hwc->inflight_msg_res.size); + + bitmap_set(hwc->inflight_msg_res.map, index, 1); + + spin_unlock_irqrestore(&r->lock, flags); + + *msg_id = index; + + return 0; +} + +static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id) +{ + struct gdma_resource *r = &hwc->inflight_msg_res; + unsigned long flags; + + spin_lock_irqsave(&r->lock, flags); + bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1); + spin_unlock_irqrestore(&r->lock, flags); + + up(&hwc->sema); +} + +static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx, + const struct gdma_resp_hdr *resp_msg, + u32 resp_len) +{ + if (resp_len < sizeof(*resp_msg)) + return -EPROTO; + + if (resp_len > caller_ctx->output_buflen) + return -EPROTO; + + return 0; +} + +static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len, + const struct gdma_resp_hdr *resp_msg) +{ + struct hwc_caller_ctx *ctx; + int err = -EPROTO; + + if (!test_bit(resp_msg->response.hwc_msg_id, + hwc->inflight_msg_res.map)) { + dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n", + resp_msg->response.hwc_msg_id); + return; + } + + ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id; + err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len); + if (err) + goto out; + + ctx->status_code = resp_msg->status; + + memcpy(ctx->output_buf, resp_msg, resp_len); +out: + ctx->error = err; + complete(&ctx->comp_event); +} + +static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq, + struct hwc_work_request *req) +{ + struct device *dev = hwc_rxq->hwc->dev; + struct gdma_sge *sge; + int err; + + sge = &req->sge; + sge->address = (u64)req->buf_sge_addr; + sge->mem_key = hwc_rxq->msg_buf->gpa_mkey; + sge->size = req->buf_len; + + memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request)); + req->wqe_req.sgl = sge; + req->wqe_req.num_sge = 1; + req->wqe_req.client_data_unit = 0; + + err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL); + if (err) + dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err); + return err; +} + +static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, + struct gdma_event *event) +{ + struct hw_channel_context *hwc = ctx; + struct gdma_dev *gd = hwc->gdma_dev; + union hwc_init_type_data type_data; + union hwc_init_eq_id_db eq_db; + u32 type, val; + + switch (event->type) { + case GDMA_EQE_HWC_INIT_EQ_ID_DB: + eq_db.as_uint32 = event->details[0]; + hwc->cq->gdma_eq->id = eq_db.eq_id; + gd->doorbell = eq_db.doorbell; + break; + + case GDMA_EQE_HWC_INIT_DATA: + type_data.as_uint32 = event->details[0]; + type = type_data.type; + val = type_data.value; + + switch (type) { + case HWC_INIT_DATA_CQID: + hwc->cq->gdma_cq->id = val; + break; + + case HWC_INIT_DATA_RQID: + hwc->rxq->gdma_wq->id = val; + break; + + case HWC_INIT_DATA_SQID: + hwc->txq->gdma_wq->id = val; + break; + + case HWC_INIT_DATA_QUEUE_DEPTH: + hwc->hwc_init_q_depth_max = (u16)val; + break; + + case HWC_INIT_DATA_MAX_REQUEST: + hwc->hwc_init_max_req_msg_size = val; + break; + + case HWC_INIT_DATA_MAX_RESPONSE: + hwc->hwc_init_max_resp_msg_size = val; + break; + + case HWC_INIT_DATA_MAX_NUM_CQS: + gd->gdma_context->max_num_cqs = val; + break; + + case HWC_INIT_DATA_PDID: + hwc->gdma_dev->pdid = val; + break; + + case HWC_INIT_DATA_GPA_MKEY: + hwc->rxq->msg_buf->gpa_mkey = val; + hwc->txq->msg_buf->gpa_mkey = val; + break; + } + + break; + + case GDMA_EQE_HWC_INIT_DONE: + complete(&hwc->hwc_init_eqe_comp); + break; + + default: + /* Ignore unknown events, which should never happen. */ + break; + } +} + +static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id, + const struct hwc_rx_oob *rx_oob) +{ + struct hw_channel_context *hwc = ctx; + struct hwc_wq *hwc_rxq = hwc->rxq; + struct hwc_work_request *rx_req; + struct gdma_resp_hdr *resp; + struct gdma_wqe *dma_oob; + struct gdma_queue *rq; + struct gdma_sge *sge; + u64 rq_base_addr; + u64 rx_req_idx; + u8 *wqe; + + if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id)) + return; + + rq = hwc_rxq->gdma_wq; + wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE); + dma_oob = (struct gdma_wqe *)wqe; + + sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4); + + /* Select the RX work request for virtual address and for reposting. */ + rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle; + rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size; + + rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx]; + resp = (struct gdma_resp_hdr *)rx_req->buf_va; + + if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) { + dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n", + resp->response.hwc_msg_id); + return; + } + + mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp); + + /* Do no longer use 'resp', because the buffer is posted to the HW + * in the below mana_hwc_post_rx_wqe(). + */ + resp = NULL; + + mana_hwc_post_rx_wqe(hwc_rxq, rx_req); +} + +static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id, + const struct hwc_rx_oob *rx_oob) +{ + struct hw_channel_context *hwc = ctx; + struct hwc_wq *hwc_txq = hwc->txq; + + WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id); +} + +static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc, + enum gdma_queue_type type, u64 queue_size, + struct gdma_queue **queue) +{ + struct gdma_queue_spec spec = {}; + + if (type != GDMA_SQ && type != GDMA_RQ) + return -EINVAL; + + spec.type = type; + spec.monitor_avl_buf = false; + spec.queue_size = queue_size; + + return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); +} + +static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc, + u64 queue_size, + void *ctx, gdma_cq_callback *cb, + struct gdma_queue *parent_eq, + struct gdma_queue **queue) +{ + struct gdma_queue_spec spec = {}; + + spec.type = GDMA_CQ; + spec.monitor_avl_buf = false; + spec.queue_size = queue_size; + spec.cq.context = ctx; + spec.cq.callback = cb; + spec.cq.parent_eq = parent_eq; + + return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); +} + +static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc, + u64 queue_size, + void *ctx, gdma_eq_callback *cb, + struct gdma_queue **queue) +{ + struct gdma_queue_spec spec = {}; + + spec.type = GDMA_EQ; + spec.monitor_avl_buf = false; + spec.queue_size = queue_size; + spec.eq.context = ctx; + spec.eq.callback = cb; + spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ; + + return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); +} + +static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self) +{ + struct hwc_rx_oob comp_data = {}; + struct gdma_comp *completions; + struct hwc_cq *hwc_cq = ctx; + u32 comp_read, i; + + WARN_ON_ONCE(hwc_cq->gdma_cq != q_self); + + completions = hwc_cq->comp_buf; + comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth); + WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth); + + for (i = 0; i < comp_read; ++i) { + comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data; + + if (completions[i].is_sq) + hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx, + completions[i].wq_num, + &comp_data); + else + hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx, + completions[i].wq_num, + &comp_data); + } + + mana_gd_arm_cq(q_self); +} + +static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq) +{ + if (!hwc_cq) + return; + + kfree(hwc_cq->comp_buf); + + if (hwc_cq->gdma_cq) + mana_gd_destroy_queue(gc, hwc_cq->gdma_cq); + + if (hwc_cq->gdma_eq) + mana_gd_destroy_queue(gc, hwc_cq->gdma_eq); + + kfree(hwc_cq); +} + +static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth, + gdma_eq_callback *callback, void *ctx, + hwc_rx_event_handler_t *rx_ev_hdlr, + void *rx_ev_ctx, + hwc_tx_event_handler_t *tx_ev_hdlr, + void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr) +{ + struct gdma_queue *eq, *cq; + struct gdma_comp *comp_buf; + struct hwc_cq *hwc_cq; + u32 eq_size, cq_size; + int err; + + eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth); + if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE) + eq_size = MINIMUM_SUPPORTED_PAGE_SIZE; + + cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); + if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE) + cq_size = MINIMUM_SUPPORTED_PAGE_SIZE; + + hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL); + if (!hwc_cq) + return -ENOMEM; + + err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq); + if (err) { + dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err); + goto out; + } + hwc_cq->gdma_eq = eq; + + err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event, + eq, &cq); + if (err) { + dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err); + goto out; + } + hwc_cq->gdma_cq = cq; + + comp_buf = kcalloc(q_depth, sizeof(struct gdma_comp), GFP_KERNEL); + if (!comp_buf) { + err = -ENOMEM; + goto out; + } + + hwc_cq->hwc = hwc; + hwc_cq->comp_buf = comp_buf; + hwc_cq->queue_depth = q_depth; + hwc_cq->rx_event_handler = rx_ev_hdlr; + hwc_cq->rx_event_ctx = rx_ev_ctx; + hwc_cq->tx_event_handler = tx_ev_hdlr; + hwc_cq->tx_event_ctx = tx_ev_ctx; + + *hwc_cq_ptr = hwc_cq; + return 0; +out: + mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq); + return err; +} + +static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, + u32 max_msg_size, + struct hwc_dma_buf **dma_buf_ptr) +{ + struct gdma_context *gc = hwc->gdma_dev->gdma_context; + struct hwc_work_request *hwc_wr; + struct hwc_dma_buf *dma_buf; + struct gdma_mem_info *gmi; + void *virt_addr; + u32 buf_size; + u8 *base_pa; + int err; + u16 i; + + dma_buf = kzalloc(sizeof(*dma_buf) + + q_depth * sizeof(struct hwc_work_request), + GFP_KERNEL); + if (!dma_buf) + return -ENOMEM; + + dma_buf->num_reqs = q_depth; + + buf_size = PAGE_ALIGN(q_depth * max_msg_size); + + gmi = &dma_buf->mem_info; + err = mana_gd_alloc_memory(gc, buf_size, gmi); + if (err) { + dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err); + goto out; + } + + virt_addr = dma_buf->mem_info.virt_addr; + base_pa = (u8 *)dma_buf->mem_info.dma_handle; + + for (i = 0; i < q_depth; i++) { + hwc_wr = &dma_buf->reqs[i]; + + hwc_wr->buf_va = virt_addr + i * max_msg_size; + hwc_wr->buf_sge_addr = base_pa + i * max_msg_size; + + hwc_wr->buf_len = max_msg_size; + } + + *dma_buf_ptr = dma_buf; + return 0; +out: + kfree(dma_buf); + return err; +} + +static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc, + struct hwc_dma_buf *dma_buf) +{ + if (!dma_buf) + return; + + mana_gd_free_memory(&dma_buf->mem_info); + + kfree(dma_buf); +} + +static void mana_hwc_destroy_wq(struct hw_channel_context *hwc, + struct hwc_wq *hwc_wq) +{ + if (!hwc_wq) + return; + + mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf); + + if (hwc_wq->gdma_wq) + mana_gd_destroy_queue(hwc->gdma_dev->gdma_context, + hwc_wq->gdma_wq); + + kfree(hwc_wq); +} + +static int mana_hwc_create_wq(struct hw_channel_context *hwc, + enum gdma_queue_type q_type, u16 q_depth, + u32 max_msg_size, struct hwc_cq *hwc_cq, + struct hwc_wq **hwc_wq_ptr) +{ + struct gdma_queue *queue; + struct hwc_wq *hwc_wq; + u32 queue_size; + int err; + + WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ); + + if (q_type == GDMA_RQ) + queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth); + else + queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth); + + if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE) + queue_size = MINIMUM_SUPPORTED_PAGE_SIZE; + + hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL); + if (!hwc_wq) + return -ENOMEM; + + err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue); + if (err) + goto out; + + err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size, + &hwc_wq->msg_buf); + if (err) + goto out; + + hwc_wq->hwc = hwc; + hwc_wq->gdma_wq = queue; + hwc_wq->queue_depth = q_depth; + hwc_wq->hwc_cq = hwc_cq; + + *hwc_wq_ptr = hwc_wq; + return 0; +out: + if (err) + mana_hwc_destroy_wq(hwc, hwc_wq); + return err; +} + +static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq, + struct hwc_work_request *req, + u32 dest_virt_rq_id, u32 dest_virt_rcq_id, + bool dest_pf) +{ + struct device *dev = hwc_txq->hwc->dev; + struct hwc_tx_oob *tx_oob; + struct gdma_sge *sge; + int err; + + if (req->msg_size == 0 || req->msg_size > req->buf_len) { + dev_err(dev, "wrong msg_size: %u, buf_len: %u\n", + req->msg_size, req->buf_len); + return -EINVAL; + } + + tx_oob = &req->tx_oob; + + tx_oob->vrq_id = dest_virt_rq_id; + tx_oob->dest_vfid = 0; + tx_oob->vrcq_id = dest_virt_rcq_id; + tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id; + tx_oob->loopback = false; + tx_oob->lso_override = false; + tx_oob->dest_pf = dest_pf; + tx_oob->vsq_id = hwc_txq->gdma_wq->id; + + sge = &req->sge; + sge->address = (u64)req->buf_sge_addr; + sge->mem_key = hwc_txq->msg_buf->gpa_mkey; + sge->size = req->msg_size; + + memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request)); + req->wqe_req.sgl = sge; + req->wqe_req.num_sge = 1; + req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob); + req->wqe_req.inline_oob_data = tx_oob; + req->wqe_req.client_data_unit = 0; + + err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL); + if (err) + dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err); + return err; +} + +static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc, + u16 num_msg) +{ + int err; + + sema_init(&hwc->sema, num_msg); + + err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res); + if (err) + dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err); + return err; +} + +static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth, + u32 max_req_msg_size, u32 max_resp_msg_size) +{ + struct gdma_context *gc = hwc->gdma_dev->gdma_context; + struct hwc_wq *hwc_rxq = hwc->rxq; + struct hwc_work_request *req; + struct hwc_caller_ctx *ctx; + int err; + int i; + + /* Post all WQEs on the RQ */ + for (i = 0; i < q_depth; i++) { + req = &hwc_rxq->msg_buf->reqs[i]; + err = mana_hwc_post_rx_wqe(hwc_rxq, req); + if (err) + return err; + } + + ctx = kzalloc(q_depth * sizeof(struct hwc_caller_ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + for (i = 0; i < q_depth; ++i) + init_completion(&ctx[i].comp_event); + + hwc->caller_ctx = ctx; + + return mana_gd_test_eq(gc, hwc->cq->gdma_eq); +} + +static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth, + u32 *max_req_msg_size, + u32 *max_resp_msg_size) +{ + struct hw_channel_context *hwc = gc->hwc.driver_data; + struct gdma_queue *rq = hwc->rxq->gdma_wq; + struct gdma_queue *sq = hwc->txq->gdma_wq; + struct gdma_queue *eq = hwc->cq->gdma_eq; + struct gdma_queue *cq = hwc->cq->gdma_cq; + int err; + + init_completion(&hwc->hwc_init_eqe_comp); + + err = mana_smc_setup_hwc(&gc->shm_channel, false, + eq->mem_info.dma_handle, + cq->mem_info.dma_handle, + rq->mem_info.dma_handle, + sq->mem_info.dma_handle, + eq->eq.msix_index); + if (err) + return err; + + if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ)) + return -ETIMEDOUT; + + *q_depth = hwc->hwc_init_q_depth_max; + *max_req_msg_size = hwc->hwc_init_max_req_msg_size; + *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size; + + if (WARN_ON(cq->id >= gc->max_num_cqs)) + return -EPROTO; + + gc->cq_table = vzalloc(gc->max_num_cqs * sizeof(struct gdma_queue *)); + if (!gc->cq_table) + return -ENOMEM; + + gc->cq_table[cq->id] = cq; + + return 0; +} + +static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth, + u32 max_req_msg_size, u32 max_resp_msg_size) +{ + struct hwc_wq *hwc_rxq = NULL; + struct hwc_wq *hwc_txq = NULL; + struct hwc_cq *hwc_cq = NULL; + int err; + + err = mana_hwc_init_inflight_msg(hwc, q_depth); + if (err) + return err; + + /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ + * queue depth and RQ queue depth. + */ + err = mana_hwc_create_cq(hwc, q_depth * 2, + mana_hwc_init_event_handler, hwc, + mana_hwc_rx_event_handler, hwc, + mana_hwc_tx_event_handler, hwc, &hwc_cq); + if (err) { + dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err); + goto out; + } + hwc->cq = hwc_cq; + + err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size, + hwc_cq, &hwc_rxq); + if (err) { + dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err); + goto out; + } + hwc->rxq = hwc_rxq; + + err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size, + hwc_cq, &hwc_txq); + if (err) { + dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err); + goto out; + } + hwc->txq = hwc_txq; + + hwc->num_inflight_msg = q_depth; + hwc->max_req_msg_size = max_req_msg_size; + + return 0; +out: + if (hwc_txq) + mana_hwc_destroy_wq(hwc, hwc_txq); + + if (hwc_rxq) + mana_hwc_destroy_wq(hwc, hwc_rxq); + + if (hwc_cq) + mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq); + + mana_gd_free_res_map(&hwc->inflight_msg_res); + return err; +} + +int mana_hwc_create_channel(struct gdma_context *gc) +{ + u32 max_req_msg_size, max_resp_msg_size; + struct gdma_dev *gd = &gc->hwc; + struct hw_channel_context *hwc; + u16 q_depth_max; + int err; + + hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); + if (!hwc) + return -ENOMEM; + + gd->gdma_context = gc; + gd->driver_data = hwc; + hwc->gdma_dev = gd; + hwc->dev = gc->dev; + + /* HWC's instance number is always 0. */ + gd->dev_id.as_uint32 = 0; + gd->dev_id.type = GDMA_DEVICE_HWC; + + gd->pdid = INVALID_PDID; + gd->doorbell = INVALID_DOORBELL; + + err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH, + HW_CHANNEL_MAX_REQUEST_SIZE, + HW_CHANNEL_MAX_RESPONSE_SIZE); + if (err) { + dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err); + goto out; + } + + err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size, + &max_resp_msg_size); + if (err) { + dev_err(hwc->dev, "Failed to establish HWC: %d\n", err); + goto out; + } + + err = mana_hwc_test_channel(gc->hwc.driver_data, + HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH, + max_req_msg_size, max_resp_msg_size); + if (err) { + dev_err(hwc->dev, "Failed to test HWC: %d\n", err); + goto out; + } + + return 0; +out: + kfree(hwc); + return err; +} + +void mana_hwc_destroy_channel(struct gdma_context *gc) +{ + struct hw_channel_context *hwc = gc->hwc.driver_data; + struct hwc_caller_ctx *ctx; + + mana_smc_teardown_hwc(&gc->shm_channel, false); + + ctx = hwc->caller_ctx; + kfree(ctx); + hwc->caller_ctx = NULL; + + mana_hwc_destroy_wq(hwc, hwc->txq); + hwc->txq = NULL; + + mana_hwc_destroy_wq(hwc, hwc->rxq); + hwc->rxq = NULL; + + mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq); + hwc->cq = NULL; + + mana_gd_free_res_map(&hwc->inflight_msg_res); + + hwc->num_inflight_msg = 0; + + if (hwc->gdma_dev->pdid != INVALID_PDID) { + hwc->gdma_dev->doorbell = INVALID_DOORBELL; + hwc->gdma_dev->pdid = INVALID_PDID; + } + + kfree(hwc); + gc->hwc.driver_data = NULL; + gc->hwc.gdma_context = NULL; +} + +int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, + const void *req, u32 resp_len, void *resp) +{ + struct hwc_work_request *tx_wr; + struct hwc_wq *txq = hwc->txq; + struct gdma_req_hdr *req_msg; + struct hwc_caller_ctx *ctx; + u16 msg_id; + int err; + + mana_hwc_get_msg_index(hwc, &msg_id); + + tx_wr = &txq->msg_buf->reqs[msg_id]; + + if (req_len > tx_wr->buf_len) { + dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len, + tx_wr->buf_len); + err = -EINVAL; + goto out; + } + + ctx = hwc->caller_ctx + msg_id; + ctx->output_buf = resp; + ctx->output_buflen = resp_len; + + req_msg = (struct gdma_req_hdr *)tx_wr->buf_va; + if (req) + memcpy(req_msg, req, req_len); + + req_msg->req.hwc_msg_id = msg_id; + + tx_wr->msg_size = req_len; + + err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false); + if (err) { + dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err); + goto out; + } + + if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) { + dev_err(hwc->dev, "HWC: Request timed out!\n"); + err = -ETIMEDOUT; + goto out; + } + + if (ctx->error) { + err = ctx->error; + goto out; + } + + if (ctx->status_code) { + dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n", + ctx->status_code); + err = -EPROTO; + goto out; + } +out: + mana_hwc_put_msg_index(hwc, msg_id); + return err; +} diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.h b/drivers/net/ethernet/microsoft/mana/hw_channel.h new file mode 100644 index 000000000000..31c6e83c454a --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _HW_CHANNEL_H +#define _HW_CHANNEL_H + +#define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ 4 + +#define HW_CHANNEL_MAX_REQUEST_SIZE 0x1000 +#define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000 + +#define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1 + +#define HWC_INIT_DATA_CQID 1 +#define HWC_INIT_DATA_RQID 2 +#define HWC_INIT_DATA_SQID 3 +#define HWC_INIT_DATA_QUEUE_DEPTH 4 +#define HWC_INIT_DATA_MAX_REQUEST 5 +#define HWC_INIT_DATA_MAX_RESPONSE 6 +#define HWC_INIT_DATA_MAX_NUM_CQS 7 +#define HWC_INIT_DATA_PDID 8 +#define HWC_INIT_DATA_GPA_MKEY 9 + +/* Structures labeled with "HW DATA" are exchanged with the hardware. All of + * them are naturally aligned and hence don't need __packed. + */ + +union hwc_init_eq_id_db { + u32 as_uint32; + + struct { + u32 eq_id : 16; + u32 doorbell : 16; + }; +}; /* HW DATA */ + +union hwc_init_type_data { + u32 as_uint32; + + struct { + u32 value : 24; + u32 type : 8; + }; +}; /* HW DATA */ + +struct hwc_rx_oob { + u32 type : 6; + u32 eom : 1; + u32 som : 1; + u32 vendor_err : 8; + u32 reserved1 : 16; + + u32 src_virt_wq : 24; + u32 src_vfid : 8; + + u32 reserved2; + + union { + u32 wqe_addr_low; + u32 wqe_offset; + }; + + u32 wqe_addr_high; + + u32 client_data_unit : 14; + u32 reserved3 : 18; + + u32 tx_oob_data_size; + + u32 chunk_offset : 21; + u32 reserved4 : 11; +}; /* HW DATA */ + +struct hwc_tx_oob { + u32 reserved1; + + u32 reserved2; + + u32 vrq_id : 24; + u32 dest_vfid : 8; + + u32 vrcq_id : 24; + u32 reserved3 : 8; + + u32 vscq_id : 24; + u32 loopback : 1; + u32 lso_override: 1; + u32 dest_pf : 1; + u32 reserved4 : 5; + + u32 vsq_id : 24; + u32 reserved5 : 8; +}; /* HW DATA */ + +struct hwc_work_request { + void *buf_va; + void *buf_sge_addr; + u32 buf_len; + u32 msg_size; + + struct gdma_wqe_request wqe_req; + struct hwc_tx_oob tx_oob; + + struct gdma_sge sge; +}; + +/* hwc_dma_buf represents the array of in-flight WQEs. + * mem_info as know as the GDMA mapped memory is partitioned and used by + * in-flight WQEs. + * The number of WQEs is determined by the number of in-flight messages. + */ +struct hwc_dma_buf { + struct gdma_mem_info mem_info; + + u32 gpa_mkey; + + u32 num_reqs; + struct hwc_work_request reqs[]; +}; + +typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id, + const struct hwc_rx_oob *rx_oob); + +typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id, + const struct hwc_rx_oob *rx_oob); + +struct hwc_cq { + struct hw_channel_context *hwc; + + struct gdma_queue *gdma_cq; + struct gdma_queue *gdma_eq; + struct gdma_comp *comp_buf; + u16 queue_depth; + + hwc_rx_event_handler_t *rx_event_handler; + void *rx_event_ctx; + + hwc_tx_event_handler_t *tx_event_handler; + void *tx_event_ctx; +}; + +struct hwc_wq { + struct hw_channel_context *hwc; + + struct gdma_queue *gdma_wq; + struct hwc_dma_buf *msg_buf; + u16 queue_depth; + + struct hwc_cq *hwc_cq; +}; + +struct hwc_caller_ctx { + struct completion comp_event; + void *output_buf; + u32 output_buflen; + + u32 error; /* Linux error code */ + u32 status_code; +}; + +struct hw_channel_context { + struct gdma_dev *gdma_dev; + struct device *dev; + + u16 num_inflight_msg; + u32 max_req_msg_size; + + u16 hwc_init_q_depth_max; + u32 hwc_init_max_req_msg_size; + u32 hwc_init_max_resp_msg_size; + + struct completion hwc_init_eqe_comp; + + struct hwc_wq *rxq; + struct hwc_wq *txq; + struct hwc_cq *cq; + + struct semaphore sema; + struct gdma_resource inflight_msg_res; + + struct hwc_caller_ctx *caller_ctx; +}; + +int mana_hwc_create_channel(struct gdma_context *gc); +void mana_hwc_destroy_channel(struct gdma_context *gc); + +int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, + const void *req, u32 resp_len, void *resp); + +#endif /* _HW_CHANNEL_H */ diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h new file mode 100644 index 000000000000..a2c3f826f022 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/mana.h @@ -0,0 +1,533 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _MANA_H +#define _MANA_H + +#include "gdma.h" +#include "hw_channel.h" + +/* Microsoft Azure Network Adapter (MANA)'s definitions + * + * Structures labeled with "HW DATA" are exchanged with the hardware. All of + * them are naturally aligned and hence don't need __packed. + */ + +/* MANA protocol version */ +#define MANA_MAJOR_VERSION 0 +#define MANA_MINOR_VERSION 1 +#define MANA_MICRO_VERSION 1 + +typedef u64 mana_handle_t; +#define INVALID_MANA_HANDLE ((mana_handle_t)-1) + +enum TRI_STATE { + TRI_STATE_UNKNOWN = -1, + TRI_STATE_FALSE = 0, + TRI_STATE_TRUE = 1 +}; + +/* Number of entries for hardware indirection table must be in power of 2 */ +#define MANA_INDIRECT_TABLE_SIZE 64 +#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) + +/* The Toeplitz hash key's length in bytes: should be multiple of 8 */ +#define MANA_HASH_KEY_SIZE 40 + +#define COMP_ENTRY_SIZE 64 + +#define ADAPTER_MTU_SIZE 1500 +#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14) + +#define RX_BUFFERS_PER_QUEUE 512 + +#define MAX_SEND_BUFFERS_PER_QUEUE 256 + +#define EQ_SIZE (8 * PAGE_SIZE) +#define LOG2_EQ_THROTTLE 3 + +#define MAX_PORTS_IN_MANA_DEV 16 + +struct mana_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct mana_txq { + struct gdma_queue *gdma_sq; + + union { + u32 gdma_txq_id; + struct { + u32 reserved1 : 10; + u32 vsq_frame : 14; + u32 reserved2 : 8; + }; + }; + + u16 vp_offset; + + struct net_device *ndev; + + /* The SKBs are sent to the HW and we are waiting for the CQEs. */ + struct sk_buff_head pending_skbs; + struct netdev_queue *net_txq; + + atomic_t pending_sends; + + struct mana_stats stats; +}; + +/* skb data and frags dma mappings */ +struct mana_skb_head { + dma_addr_t dma_handle[MAX_SKB_FRAGS + 1]; + + u32 size[MAX_SKB_FRAGS + 1]; +}; + +#define MANA_HEADROOM sizeof(struct mana_skb_head) + +enum mana_tx_pkt_format { + MANA_SHORT_PKT_FMT = 0, + MANA_LONG_PKT_FMT = 1, +}; + +struct mana_tx_short_oob { + u32 pkt_fmt : 2; + u32 is_outer_ipv4 : 1; + u32 is_outer_ipv6 : 1; + u32 comp_iphdr_csum : 1; + u32 comp_tcp_csum : 1; + u32 comp_udp_csum : 1; + u32 supress_txcqe_gen : 1; + u32 vcq_num : 24; + + u32 trans_off : 10; /* Transport header offset */ + u32 vsq_frame : 14; + u32 short_vp_offset : 8; +}; /* HW DATA */ + +struct mana_tx_long_oob { + u32 is_encap : 1; + u32 inner_is_ipv6 : 1; + u32 inner_tcp_opt : 1; + u32 inject_vlan_pri_tag : 1; + u32 reserved1 : 12; + u32 pcp : 3; /* 802.1Q */ + u32 dei : 1; /* 802.1Q */ + u32 vlan_id : 12; /* 802.1Q */ + + u32 inner_frame_offset : 10; + u32 inner_ip_rel_offset : 6; + u32 long_vp_offset : 12; + u32 reserved2 : 4; + + u32 reserved3; + u32 reserved4; +}; /* HW DATA */ + +struct mana_tx_oob { + struct mana_tx_short_oob s_oob; + struct mana_tx_long_oob l_oob; +}; /* HW DATA */ + +enum mana_cq_type { + MANA_CQ_TYPE_RX, + MANA_CQ_TYPE_TX, +}; + +enum mana_cqe_type { + CQE_INVALID = 0, + CQE_RX_OKAY = 1, + CQE_RX_COALESCED_4 = 2, + CQE_RX_OBJECT_FENCE = 3, + CQE_RX_TRUNCATED = 4, + + CQE_TX_OKAY = 32, + CQE_TX_SA_DROP = 33, + CQE_TX_MTU_DROP = 34, + CQE_TX_INVALID_OOB = 35, + CQE_TX_INVALID_ETH_TYPE = 36, + CQE_TX_HDR_PROCESSING_ERROR = 37, + CQE_TX_VF_DISABLED = 38, + CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, + CQE_TX_VPORT_DISABLED = 40, + CQE_TX_VLAN_TAGGING_VIOLATION = 41, +}; + +#define MANA_CQE_COMPLETION 1 + +struct mana_cqe_header { + u32 cqe_type : 6; + u32 client_type : 2; + u32 vendor_err : 24; +}; /* HW DATA */ + +/* NDIS HASH Types */ +#define NDIS_HASH_IPV4 BIT(0) +#define NDIS_HASH_TCP_IPV4 BIT(1) +#define NDIS_HASH_UDP_IPV4 BIT(2) +#define NDIS_HASH_IPV6 BIT(3) +#define NDIS_HASH_TCP_IPV6 BIT(4) +#define NDIS_HASH_UDP_IPV6 BIT(5) +#define NDIS_HASH_IPV6_EX BIT(6) +#define NDIS_HASH_TCP_IPV6_EX BIT(7) +#define NDIS_HASH_UDP_IPV6_EX BIT(8) + +#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) +#define MANA_HASH_L4 \ + (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ + NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) + +struct mana_rxcomp_perpkt_info { + u32 pkt_len : 16; + u32 reserved1 : 16; + u32 reserved2; + u32 pkt_hash; +}; /* HW DATA */ + +#define MANA_RXCOMP_OOB_NUM_PPI 4 + +/* Receive completion OOB */ +struct mana_rxcomp_oob { + struct mana_cqe_header cqe_hdr; + + u32 rx_vlan_id : 12; + u32 rx_vlantag_present : 1; + u32 rx_outer_iphdr_csum_succeed : 1; + u32 rx_outer_iphdr_csum_fail : 1; + u32 reserved1 : 1; + u32 rx_hashtype : 9; + u32 rx_iphdr_csum_succeed : 1; + u32 rx_iphdr_csum_fail : 1; + u32 rx_tcp_csum_succeed : 1; + u32 rx_tcp_csum_fail : 1; + u32 rx_udp_csum_succeed : 1; + u32 rx_udp_csum_fail : 1; + u32 reserved2 : 1; + + struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; + + u32 rx_wqe_offset; +}; /* HW DATA */ + +struct mana_tx_comp_oob { + struct mana_cqe_header cqe_hdr; + + u32 tx_data_offset; + + u32 tx_sgl_offset : 5; + u32 tx_wqe_offset : 27; + + u32 reserved[12]; +}; /* HW DATA */ + +struct mana_rxq; + +struct mana_cq { + struct gdma_queue *gdma_cq; + + /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ + u32 gdma_id; + + /* Type of the CQ: TX or RX */ + enum mana_cq_type type; + + /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. + * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. + */ + struct mana_rxq *rxq; + + /* Pointer to the mana_txq that is pushing TX CQEs to the queue. + * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. + */ + struct mana_txq *txq; + + /* Pointer to a buffer which the CQ handler can copy the CQE's into. */ + struct gdma_comp *gdma_comp_buf; +}; + +#define GDMA_MAX_RQE_SGES 15 + +struct mana_recv_buf_oob { + /* A valid GDMA work request representing the data buffer. */ + struct gdma_wqe_request wqe_req; + + void *buf_va; + dma_addr_t buf_dma_addr; + + /* SGL of the buffer going to be sent has part of the work request. */ + u32 num_sge; + struct gdma_sge sgl[GDMA_MAX_RQE_SGES]; + + /* Required to store the result of mana_gd_post_work_request. + * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the + * work queue when the WQE is consumed. + */ + struct gdma_posted_wqe_info wqe_inf; +}; + +struct mana_rxq { + struct gdma_queue *gdma_rq; + /* Cache the gdma receive queue id */ + u32 gdma_id; + + /* Index of RQ in the vPort, not gdma receive queue id */ + u32 rxq_idx; + + u32 datasize; + + mana_handle_t rxobj; + + struct mana_cq rx_cq; + + struct net_device *ndev; + + /* Total number of receive buffers to be allocated */ + u32 num_rx_buf; + + u32 buf_index; + + struct mana_stats stats; + + /* MUST BE THE LAST MEMBER: + * Each receive buffer has an associated mana_recv_buf_oob. + */ + struct mana_recv_buf_oob rx_oobs[]; +}; + +struct mana_tx_qp { + struct mana_txq txq; + + struct mana_cq tx_cq; + + mana_handle_t tx_object; +}; + +struct mana_ethtool_stats { + u64 stop_queue; + u64 wake_queue; +}; + +struct mana_context { + struct gdma_dev *gdma_dev; + + u16 num_ports; + + struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; +}; + +struct mana_port_context { + struct mana_context *ac; + struct net_device *ndev; + + u8 mac_addr[ETH_ALEN]; + + struct mana_eq *eqs; + + enum TRI_STATE rss_state; + + mana_handle_t default_rxobj; + bool tx_shortform_allowed; + u16 tx_vp_offset; + + struct mana_tx_qp *tx_qp; + + /* Indirection Table for RX & TX. The values are queue indexes */ + u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; + + /* Indirection table containing RxObject Handles */ + mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; + + /* Hash key used by the NIC */ + u8 hashkey[MANA_HASH_KEY_SIZE]; + + /* This points to an array of num_queues of RQ pointers. */ + struct mana_rxq **rxqs; + + /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ + unsigned int max_queues; + unsigned int num_queues; + + mana_handle_t port_handle; + + u16 port_idx; + + bool port_is_up; + bool port_st_save; /* Saved port state */ + + struct mana_ethtool_stats eth_stats; +}; + +int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, + bool update_hash, bool update_tab); + +int mana_alloc_queues(struct net_device *ndev); +int mana_attach(struct net_device *ndev); +int mana_detach(struct net_device *ndev, bool from_close); + +int mana_probe(struct gdma_dev *gd); +void mana_remove(struct gdma_dev *gd); + +extern const struct ethtool_ops mana_ethtool_ops; + +struct mana_obj_spec { + u32 queue_index; + u64 gdma_region; + u32 queue_size; + u32 attached_eq; + u32 modr_ctx_id; +}; + +enum mana_command_code { + MANA_QUERY_DEV_CONFIG = 0x20001, + MANA_QUERY_GF_STAT = 0x20002, + MANA_CONFIG_VPORT_TX = 0x20003, + MANA_CREATE_WQ_OBJ = 0x20004, + MANA_DESTROY_WQ_OBJ = 0x20005, + MANA_FENCE_RQ = 0x20006, + MANA_CONFIG_VPORT_RX = 0x20007, + MANA_QUERY_VPORT_CONFIG = 0x20008, +}; + +/* Query Device Configuration */ +struct mana_query_device_cfg_req { + struct gdma_req_hdr hdr; + + /* Driver Capability flags */ + u64 drv_cap_flags1; + u64 drv_cap_flags2; + u64 drv_cap_flags3; + u64 drv_cap_flags4; + + u32 proto_major_ver; + u32 proto_minor_ver; + u32 proto_micro_ver; + + u32 reserved; +}; /* HW DATA */ + +struct mana_query_device_cfg_resp { + struct gdma_resp_hdr hdr; + + u64 pf_cap_flags1; + u64 pf_cap_flags2; + u64 pf_cap_flags3; + u64 pf_cap_flags4; + + u16 max_num_vports; + u16 reserved; + u32 max_num_eqs; +}; /* HW DATA */ + +/* Query vPort Configuration */ +struct mana_query_vport_cfg_req { + struct gdma_req_hdr hdr; + u32 vport_index; +}; /* HW DATA */ + +struct mana_query_vport_cfg_resp { + struct gdma_resp_hdr hdr; + u32 max_num_sq; + u32 max_num_rq; + u32 num_indirection_ent; + u32 reserved1; + u8 mac_addr[6]; + u8 reserved2[2]; + mana_handle_t vport; +}; /* HW DATA */ + +/* Configure vPort */ +struct mana_config_vport_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + u32 pdid; + u32 doorbell_pageid; +}; /* HW DATA */ + +struct mana_config_vport_resp { + struct gdma_resp_hdr hdr; + u16 tx_vport_offset; + u8 short_form_allowed; + u8 reserved; +}; /* HW DATA */ + +/* Create WQ Object */ +struct mana_create_wqobj_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + u32 wq_type; + u32 reserved; + u64 wq_gdma_region; + u64 cq_gdma_region; + u32 wq_size; + u32 cq_size; + u32 cq_moderation_ctx_id; + u32 cq_parent_qid; +}; /* HW DATA */ + +struct mana_create_wqobj_resp { + struct gdma_resp_hdr hdr; + u32 wq_id; + u32 cq_id; + mana_handle_t wq_obj; +}; /* HW DATA */ + +/* Destroy WQ Object */ +struct mana_destroy_wqobj_req { + struct gdma_req_hdr hdr; + u32 wq_type; + u32 reserved; + mana_handle_t wq_obj_handle; +}; /* HW DATA */ + +struct mana_destroy_wqobj_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +/* Fence RQ */ +struct mana_fence_rq_req { + struct gdma_req_hdr hdr; + mana_handle_t wq_obj_handle; +}; /* HW DATA */ + +struct mana_fence_rq_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +/* Configure vPort Rx Steering */ +struct mana_cfg_rx_steer_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + u16 num_indir_entries; + u16 indir_tab_offset; + u32 rx_enable; + u32 rss_enable; + u8 update_default_rxobj; + u8 update_hashkey; + u8 update_indir_tab; + u8 reserved; + mana_handle_t default_rxobj; + u8 hashkey[MANA_HASH_KEY_SIZE]; +}; /* HW DATA */ + +struct mana_cfg_rx_steer_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +#define MANA_MAX_NUM_QUEUES 16 + +#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) + +struct mana_tx_package { + struct gdma_wqe_request wqe_req; + struct gdma_sge sgl_array[5]; + struct gdma_sge *sgl_ptr; + + struct mana_tx_oob tx_oob; + + struct gdma_posted_wqe_info wqe_info; +}; + +#endif /* _MANA_H */ diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c new file mode 100644 index 000000000000..a744ca0b6c19 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -0,0 +1,1895 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2021, Microsoft Corporation. */ + +#include <linux/inetdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/mm.h> + +#include <net/checksum.h> +#include <net/ip6_checksum.h> + +#include "mana.h" + +/* Microsoft Azure Network Adapter (MANA) functions */ + +static int mana_open(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + int err; + + err = mana_alloc_queues(ndev); + if (err) + return err; + + apc->port_is_up = true; + + /* Ensure port state updated before txq state */ + smp_wmb(); + + netif_carrier_on(ndev); + netif_tx_wake_all_queues(ndev); + + return 0; +} + +static int mana_close(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + + if (!apc->port_is_up) + return 0; + + return mana_detach(ndev, true); +} + +static bool mana_can_tx(struct gdma_queue *wq) +{ + return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE; +} + +static unsigned int mana_checksum_info(struct sk_buff *skb) +{ + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + return IPPROTO_TCP; + + if (ip->protocol == IPPROTO_UDP) + return IPPROTO_UDP; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6 = ipv6_hdr(skb); + + if (ip6->nexthdr == IPPROTO_TCP) + return IPPROTO_TCP; + + if (ip6->nexthdr == IPPROTO_UDP) + return IPPROTO_UDP; + } + + /* No csum offloading */ + return 0; +} + +static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc, + struct mana_tx_package *tp) +{ + struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; + struct gdma_dev *gd = apc->ac->gdma_dev; + struct gdma_context *gc; + struct device *dev; + skb_frag_t *frag; + dma_addr_t da; + int i; + + gc = gd->gdma_context; + dev = gc->dev; + da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + + if (dma_mapping_error(dev, da)) + return -ENOMEM; + + ash->dma_handle[0] = da; + ash->size[0] = skb_headlen(skb); + + tp->wqe_req.sgl[0].address = ash->dma_handle[0]; + tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey; + tp->wqe_req.sgl[0].size = ash->size[0]; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); + + if (dma_mapping_error(dev, da)) + goto frag_err; + + ash->dma_handle[i + 1] = da; + ash->size[i + 1] = skb_frag_size(frag); + + tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1]; + tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey; + tp->wqe_req.sgl[i + 1].size = ash->size[i + 1]; + } + + return 0; + +frag_err: + for (i = i - 1; i >= 0; i--) + dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1], + DMA_TO_DEVICE); + + dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + + return -ENOMEM; +} + +static int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; + struct mana_port_context *apc = netdev_priv(ndev); + u16 txq_idx = skb_get_queue_mapping(skb); + struct gdma_dev *gd = apc->ac->gdma_dev; + bool ipv4 = false, ipv6 = false; + struct mana_tx_package pkg = {}; + struct netdev_queue *net_txq; + struct mana_stats *tx_stats; + struct gdma_queue *gdma_sq; + unsigned int csum_type; + struct mana_txq *txq; + struct mana_cq *cq; + int err, len; + + if (unlikely(!apc->port_is_up)) + goto tx_drop; + + if (skb_cow_head(skb, MANA_HEADROOM)) + goto tx_drop_count; + + txq = &apc->tx_qp[txq_idx].txq; + gdma_sq = txq->gdma_sq; + cq = &apc->tx_qp[txq_idx].tx_cq; + + pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; + pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; + + if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { + pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; + pkt_fmt = MANA_LONG_PKT_FMT; + } else { + pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; + } + + pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt; + + if (pkt_fmt == MANA_SHORT_PKT_FMT) + pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob); + else + pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob); + + pkg.wqe_req.inline_oob_data = &pkg.tx_oob; + pkg.wqe_req.flags = 0; + pkg.wqe_req.client_data_unit = 0; + + pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; + WARN_ON_ONCE(pkg.wqe_req.num_sge > 30); + + if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { + pkg.wqe_req.sgl = pkg.sgl_array; + } else { + pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, + sizeof(struct gdma_sge), + GFP_ATOMIC); + if (!pkg.sgl_ptr) + goto tx_drop_count; + + pkg.wqe_req.sgl = pkg.sgl_ptr; + } + + if (skb->protocol == htons(ETH_P_IP)) + ipv4 = true; + else if (skb->protocol == htons(ETH_P_IPV6)) + ipv6 = true; + + if (skb_is_gso(skb)) { + pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; + pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; + + pkg.tx_oob.s_oob.comp_iphdr_csum = 1; + pkg.tx_oob.s_oob.comp_tcp_csum = 1; + pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb); + + pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size; + pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0; + if (ipv4) { + ip_hdr(skb)->tot_len = 0; + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, 0, + IPPROTO_TCP, 0); + } else { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, 0, + IPPROTO_TCP, 0); + } + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + csum_type = mana_checksum_info(skb); + + if (csum_type == IPPROTO_TCP) { + pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; + pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; + + pkg.tx_oob.s_oob.comp_tcp_csum = 1; + pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb); + + } else if (csum_type == IPPROTO_UDP) { + pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; + pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; + + pkg.tx_oob.s_oob.comp_udp_csum = 1; + } else { + /* Can't do offload of this type of checksum */ + if (skb_checksum_help(skb)) + goto free_sgl_ptr; + } + } + + if (mana_map_skb(skb, apc, &pkg)) + goto free_sgl_ptr; + + skb_queue_tail(&txq->pending_skbs, skb); + + len = skb->len; + net_txq = netdev_get_tx_queue(ndev, txq_idx); + + err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req, + (struct gdma_posted_wqe_info *)skb->cb); + if (!mana_can_tx(gdma_sq)) { + netif_tx_stop_queue(net_txq); + apc->eth_stats.stop_queue++; + } + + if (err) { + (void)skb_dequeue_tail(&txq->pending_skbs); + netdev_warn(ndev, "Failed to post TX OOB: %d\n", err); + err = NETDEV_TX_BUSY; + goto tx_busy; + } + + err = NETDEV_TX_OK; + atomic_inc(&txq->pending_sends); + + mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); + + /* skb may be freed after mana_gd_post_work_request. Do not use it. */ + skb = NULL; + + tx_stats = &txq->stats; + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->packets++; + tx_stats->bytes += len; + u64_stats_update_end(&tx_stats->syncp); + +tx_busy: + if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) { + netif_tx_wake_queue(net_txq); + apc->eth_stats.wake_queue++; + } + + kfree(pkg.sgl_ptr); + return err; + +free_sgl_ptr: + kfree(pkg.sgl_ptr); +tx_drop_count: + ndev->stats.tx_dropped++; +tx_drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static void mana_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *st) +{ + struct mana_port_context *apc = netdev_priv(ndev); + unsigned int num_queues = apc->num_queues; + struct mana_stats *stats; + unsigned int start; + u64 packets, bytes; + int q; + + if (!apc->port_is_up) + return; + + netdev_stats_to_stats64(st, &ndev->stats); + + for (q = 0; q < num_queues; q++) { + stats = &apc->rxqs[q]->stats; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + st->rx_packets += packets; + st->rx_bytes += bytes; + } + + for (q = 0; q < num_queues; q++) { + stats = &apc->tx_qp[q].txq.stats; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + st->tx_packets += packets; + st->tx_bytes += bytes; + } +} + +static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb, + int old_q) +{ + struct mana_port_context *apc = netdev_priv(ndev); + u32 hash = skb_get_hash(skb); + struct sock *sk = skb->sk; + int txq; + + txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK]; + + if (txq != old_q && sk && sk_fullsock(sk) && + rcu_access_pointer(sk->sk_dst_cache)) + sk_tx_queue_set(sk, txq); + + return txq; +} + +static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + int txq; + + if (ndev->real_num_tx_queues == 1) + return 0; + + txq = sk_tx_queue_get(skb->sk); + + if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) { + if (skb_rx_queue_recorded(skb)) + txq = skb_get_rx_queue(skb); + else + txq = mana_get_tx_queue(ndev, skb, txq); + } + + return txq; +} + +static const struct net_device_ops mana_devops = { + .ndo_open = mana_open, + .ndo_stop = mana_close, + .ndo_select_queue = mana_select_queue, + .ndo_start_xmit = mana_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_get_stats64 = mana_get_stats64, +}; + +static void mana_cleanup_port_context(struct mana_port_context *apc) +{ + kfree(apc->rxqs); + apc->rxqs = NULL; +} + +static int mana_init_port_context(struct mana_port_context *apc) +{ + apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), + GFP_KERNEL); + + return !apc->rxqs ? -ENOMEM : 0; +} + +static int mana_send_request(struct mana_context *ac, void *in_buf, + u32 in_len, void *out_buf, u32 out_len) +{ + struct gdma_context *gc = ac->gdma_dev->gdma_context; + struct gdma_resp_hdr *resp = out_buf; + struct gdma_req_hdr *req = in_buf; + struct device *dev = gc->dev; + static atomic_t activity_id; + int err; + + req->dev_id = gc->mana.dev_id; + req->activity_id = atomic_inc_return(&activity_id); + + err = mana_gd_send_request(gc, in_len, in_buf, out_len, + out_buf); + if (err || resp->status) { + dev_err(dev, "Failed to send mana message: %d, 0x%x\n", + err, resp->status); + return err ? err : -EPROTO; + } + + if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || + req->activity_id != resp->activity_id) { + dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n", + req->dev_id.as_uint32, resp->dev_id.as_uint32, + req->activity_id, resp->activity_id); + return -EPROTO; + } + + return 0; +} + +static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr, + const enum mana_command_code expected_code, + const u32 min_size) +{ + if (resp_hdr->response.msg_type != expected_code) + return -EPROTO; + + if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) + return -EPROTO; + + if (resp_hdr->response.msg_size < min_size) + return -EPROTO; + + return 0; +} + +static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, + u32 proto_minor_ver, u32 proto_micro_ver, + u16 *max_num_vports) +{ + struct gdma_context *gc = ac->gdma_dev->gdma_context; + struct mana_query_device_cfg_resp resp = {}; + struct mana_query_device_cfg_req req = {}; + struct device *dev = gc->dev; + int err = 0; + + mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG, + sizeof(req), sizeof(resp)); + req.proto_major_ver = proto_major_ver; + req.proto_minor_ver = proto_minor_ver; + req.proto_micro_ver = proto_micro_ver; + + err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp)); + if (err) { + dev_err(dev, "Failed to query config: %d", err); + return err; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG, + sizeof(resp)); + if (err || resp.hdr.status) { + dev_err(dev, "Invalid query result: %d, 0x%x\n", err, + resp.hdr.status); + if (!err) + err = -EPROTO; + return err; + } + + *max_num_vports = resp.max_num_vports; + + return 0; +} + +static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index, + u32 *max_sq, u32 *max_rq, u32 *num_indir_entry) +{ + struct mana_query_vport_cfg_resp resp = {}; + struct mana_query_vport_cfg_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG, + sizeof(req), sizeof(resp)); + + req.vport_index = vport_index; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) + return err; + + err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG, + sizeof(resp)); + if (err) + return err; + + if (resp.hdr.status) + return -EPROTO; + + *max_sq = resp.max_num_sq; + *max_rq = resp.max_num_rq; + *num_indir_entry = resp.num_indirection_ent; + + apc->port_handle = resp.vport; + ether_addr_copy(apc->mac_addr, resp.mac_addr); + + return 0; +} + +static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, + u32 doorbell_pg_id) +{ + struct mana_config_vport_resp resp = {}; + struct mana_config_vport_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX, + sizeof(req), sizeof(resp)); + req.vport = apc->port_handle; + req.pdid = protection_dom_id; + req.doorbell_pageid = doorbell_pg_id; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err); + goto out; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX, + sizeof(resp)); + if (err || resp.hdr.status) { + netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", + err, resp.hdr.status); + if (!err) + err = -EPROTO; + + goto out; + } + + apc->tx_shortform_allowed = resp.short_form_allowed; + apc->tx_vp_offset = resp.tx_vport_offset; +out: + return err; +} + +static int mana_cfg_vport_steering(struct mana_port_context *apc, + enum TRI_STATE rx, + bool update_default_rxobj, bool update_key, + bool update_tab) +{ + u16 num_entries = MANA_INDIRECT_TABLE_SIZE; + struct mana_cfg_rx_steer_req *req = NULL; + struct mana_cfg_rx_steer_resp resp = {}; + struct net_device *ndev = apc->ndev; + mana_handle_t *req_indir_tab; + u32 req_buf_size; + int err; + + req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries; + req = kzalloc(req_buf_size, GFP_KERNEL); + if (!req) + return -ENOMEM; + + mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, + sizeof(resp)); + + req->vport = apc->port_handle; + req->num_indir_entries = num_entries; + req->indir_tab_offset = sizeof(*req); + req->rx_enable = rx; + req->rss_enable = apc->rss_state; + req->update_default_rxobj = update_default_rxobj; + req->update_hashkey = update_key; + req->update_indir_tab = update_tab; + req->default_rxobj = apc->default_rxobj; + + if (update_key) + memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); + + if (update_tab) { + req_indir_tab = (mana_handle_t *)(req + 1); + memcpy(req_indir_tab, apc->rxobj_table, + req->num_indir_entries * sizeof(mana_handle_t)); + } + + err = mana_send_request(apc->ac, req, req_buf_size, &resp, + sizeof(resp)); + if (err) { + netdev_err(ndev, "Failed to configure vPort RX: %d\n", err); + goto out; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX, + sizeof(resp)); + if (err) { + netdev_err(ndev, "vPort RX configuration failed: %d\n", err); + goto out; + } + + if (resp.hdr.status) { + netdev_err(ndev, "vPort RX configuration failed: 0x%x\n", + resp.hdr.status); + err = -EPROTO; + } +out: + kfree(req); + return err; +} + +static int mana_create_wq_obj(struct mana_port_context *apc, + mana_handle_t vport, + u32 wq_type, struct mana_obj_spec *wq_spec, + struct mana_obj_spec *cq_spec, + mana_handle_t *wq_obj) +{ + struct mana_create_wqobj_resp resp = {}; + struct mana_create_wqobj_req req = {}; + struct net_device *ndev = apc->ndev; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ, + sizeof(req), sizeof(resp)); + req.vport = vport; + req.wq_type = wq_type; + req.wq_gdma_region = wq_spec->gdma_region; + req.cq_gdma_region = cq_spec->gdma_region; + req.wq_size = wq_spec->queue_size; + req.cq_size = cq_spec->queue_size; + req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; + req.cq_parent_qid = cq_spec->attached_eq; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(ndev, "Failed to create WQ object: %d\n", err); + goto out; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ, + sizeof(resp)); + if (err || resp.hdr.status) { + netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err, + resp.hdr.status); + if (!err) + err = -EPROTO; + goto out; + } + + if (resp.wq_obj == INVALID_MANA_HANDLE) { + netdev_err(ndev, "Got an invalid WQ object handle\n"); + err = -EPROTO; + goto out; + } + + *wq_obj = resp.wq_obj; + wq_spec->queue_index = resp.wq_id; + cq_spec->queue_index = resp.cq_id; + + return 0; +out: + return err; +} + +static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, + mana_handle_t wq_obj) +{ + struct mana_destroy_wqobj_resp resp = {}; + struct mana_destroy_wqobj_req req = {}; + struct net_device *ndev = apc->ndev; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ, + sizeof(req), sizeof(resp)); + req.wq_type = wq_type; + req.wq_obj_handle = wq_obj; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(ndev, "Failed to destroy WQ object: %d\n", err); + return; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ, + sizeof(resp)); + if (err || resp.hdr.status) + netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err, + resp.hdr.status); +} + +static void mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf) +{ + int i; + + for (i = 0; i < CQE_POLLING_BUFFER; i++) + memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp)); +} + +static void mana_destroy_eq(struct gdma_context *gc, + struct mana_port_context *apc) +{ + struct gdma_queue *eq; + int i; + + if (!apc->eqs) + return; + + for (i = 0; i < apc->num_queues; i++) { + eq = apc->eqs[i].eq; + if (!eq) + continue; + + mana_gd_destroy_queue(gc, eq); + } + + kfree(apc->eqs); + apc->eqs = NULL; +} + +static int mana_create_eq(struct mana_port_context *apc) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + struct gdma_queue_spec spec = {}; + int err; + int i; + + apc->eqs = kcalloc(apc->num_queues, sizeof(struct mana_eq), + GFP_KERNEL); + if (!apc->eqs) + return -ENOMEM; + + spec.type = GDMA_EQ; + spec.monitor_avl_buf = false; + spec.queue_size = EQ_SIZE; + spec.eq.callback = NULL; + spec.eq.context = apc->eqs; + spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; + spec.eq.ndev = apc->ndev; + + for (i = 0; i < apc->num_queues; i++) { + mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll); + + err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq); + if (err) + goto out; + } + + return 0; +out: + mana_destroy_eq(gd->gdma_context, apc); + return err; +} + +static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units) +{ + u32 used_space_old; + u32 used_space_new; + + used_space_old = wq->head - wq->tail; + used_space_new = wq->head - (wq->tail + num_units); + + if (WARN_ON_ONCE(used_space_new > used_space_old)) + return -ERANGE; + + wq->tail += num_units; + return 0; +} + +static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) +{ + struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; + struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; + struct device *dev = gc->dev; + int i; + + dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + + for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) + dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], + DMA_TO_DEVICE); +} + +static void mana_poll_tx_cq(struct mana_cq *cq) +{ + struct gdma_queue *gdma_eq = cq->gdma_cq->cq.parent; + struct gdma_comp *completions = cq->gdma_comp_buf; + struct gdma_posted_wqe_info *wqe_info; + unsigned int pkt_transmitted = 0; + unsigned int wqe_unit_cnt = 0; + struct mana_txq *txq = cq->txq; + struct mana_port_context *apc; + struct netdev_queue *net_txq; + struct gdma_queue *gdma_wq; + unsigned int avail_space; + struct net_device *ndev; + struct sk_buff *skb; + bool txq_stopped; + int comp_read; + int i; + + ndev = txq->ndev; + apc = netdev_priv(ndev); + + comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, + CQE_POLLING_BUFFER); + + for (i = 0; i < comp_read; i++) { + struct mana_tx_comp_oob *cqe_oob; + + if (WARN_ON_ONCE(!completions[i].is_sq)) + return; + + cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data; + if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type != + MANA_CQE_COMPLETION)) + return; + + switch (cqe_oob->cqe_hdr.cqe_type) { + case CQE_TX_OKAY: + break; + + case CQE_TX_SA_DROP: + case CQE_TX_MTU_DROP: + case CQE_TX_INVALID_OOB: + case CQE_TX_INVALID_ETH_TYPE: + case CQE_TX_HDR_PROCESSING_ERROR: + case CQE_TX_VF_DISABLED: + case CQE_TX_VPORT_IDX_OUT_OF_RANGE: + case CQE_TX_VPORT_DISABLED: + case CQE_TX_VLAN_TAGGING_VIOLATION: + WARN_ONCE(1, "TX: CQE error %d: ignored.\n", + cqe_oob->cqe_hdr.cqe_type); + break; + + default: + /* If the CQE type is unexpected, log an error, assert, + * and go through the error path. + */ + WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", + cqe_oob->cqe_hdr.cqe_type); + return; + } + + if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) + return; + + skb = skb_dequeue(&txq->pending_skbs); + if (WARN_ON_ONCE(!skb)) + return; + + wqe_info = (struct gdma_posted_wqe_info *)skb->cb; + wqe_unit_cnt += wqe_info->wqe_size_in_bu; + + mana_unmap_skb(skb, apc); + + napi_consume_skb(skb, gdma_eq->eq.budget); + + pkt_transmitted++; + } + + if (WARN_ON_ONCE(wqe_unit_cnt == 0)) + return; + + mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); + + gdma_wq = txq->gdma_sq; + avail_space = mana_gd_wq_avail_space(gdma_wq); + + /* Ensure tail updated before checking q stop */ + smp_mb(); + + net_txq = txq->net_txq; + txq_stopped = netif_tx_queue_stopped(net_txq); + + /* Ensure checking txq_stopped before apc->port_is_up. */ + smp_rmb(); + + if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { + netif_tx_wake_queue(net_txq); + apc->eth_stats.wake_queue++; + } + + if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) + WARN_ON_ONCE(1); +} + +static void mana_post_pkt_rxq(struct mana_rxq *rxq) +{ + struct mana_recv_buf_oob *recv_buf_oob; + u32 curr_index; + int err; + + curr_index = rxq->buf_index++; + if (rxq->buf_index == rxq->num_rx_buf) + rxq->buf_index = 0; + + recv_buf_oob = &rxq->rx_oobs[curr_index]; + + err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req, + &recv_buf_oob->wqe_inf); + if (WARN_ON_ONCE(err)) + return; + + WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); +} + +static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, + struct mana_rxq *rxq) +{ + struct mana_stats *rx_stats = &rxq->stats; + struct net_device *ndev = rxq->ndev; + uint pkt_len = cqe->ppi[0].pkt_len; + struct mana_port_context *apc; + u16 rxq_idx = rxq->rxq_idx; + struct napi_struct *napi; + struct gdma_queue *eq; + struct sk_buff *skb; + u32 hash_value; + + apc = netdev_priv(ndev); + eq = apc->eqs[rxq_idx].eq; + eq->eq.work_done++; + napi = &eq->eq.napi; + + if (!buf_va) { + ++ndev->stats.rx_dropped; + return; + } + + skb = build_skb(buf_va, PAGE_SIZE); + + if (!skb) { + free_page((unsigned long)buf_va); + ++ndev->stats.rx_dropped; + return; + } + + skb_put(skb, pkt_len); + skb->dev = napi->dev; + + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + skb_record_rx_queue(skb, rxq_idx); + + if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) { + if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) { + hash_value = cqe->ppi[0].pkt_hash; + + if (cqe->rx_hashtype & MANA_HASH_L4) + skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4); + else + skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3); + } + + napi_gro_receive(napi, skb); + + u64_stats_update_begin(&rx_stats->syncp); + rx_stats->packets++; + rx_stats->bytes += pkt_len; + u64_stats_update_end(&rx_stats->syncp); +} + +static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, + struct gdma_comp *cqe) +{ + struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; + struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; + struct net_device *ndev = rxq->ndev; + struct mana_recv_buf_oob *rxbuf_oob; + struct device *dev = gc->dev; + void *new_buf, *old_buf; + struct page *new_page; + u32 curr, pktlen; + dma_addr_t da; + + switch (oob->cqe_hdr.cqe_type) { + case CQE_RX_OKAY: + break; + + case CQE_RX_TRUNCATED: + netdev_err(ndev, "Dropped a truncated packet\n"); + return; + + case CQE_RX_COALESCED_4: + netdev_err(ndev, "RX coalescing is unsupported\n"); + return; + + case CQE_RX_OBJECT_FENCE: + netdev_err(ndev, "RX Fencing is unsupported\n"); + return; + + default: + netdev_err(ndev, "Unknown RX CQE type = %d\n", + oob->cqe_hdr.cqe_type); + return; + } + + if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY) + return; + + pktlen = oob->ppi[0].pkt_len; + + if (pktlen == 0) { + /* data packets should never have packetlength of zero */ + netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n", + rxq->gdma_id, cq->gdma_id, rxq->rxobj); + return; + } + + curr = rxq->buf_index; + rxbuf_oob = &rxq->rx_oobs[curr]; + WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); + + new_page = alloc_page(GFP_ATOMIC); + + if (new_page) { + da = dma_map_page(dev, new_page, 0, rxq->datasize, + DMA_FROM_DEVICE); + + if (dma_mapping_error(dev, da)) { + __free_page(new_page); + new_page = NULL; + } + } + + new_buf = new_page ? page_to_virt(new_page) : NULL; + + if (new_buf) { + dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize, + DMA_FROM_DEVICE); + + old_buf = rxbuf_oob->buf_va; + + /* refresh the rxbuf_oob with the new page */ + rxbuf_oob->buf_va = new_buf; + rxbuf_oob->buf_dma_addr = da; + rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr; + } else { + old_buf = NULL; /* drop the packet if no memory */ + } + + mana_rx_skb(old_buf, oob, rxq); + + mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); + + mana_post_pkt_rxq(rxq); +} + +static void mana_poll_rx_cq(struct mana_cq *cq) +{ + struct gdma_comp *comp = cq->gdma_comp_buf; + u32 comp_read, i; + + comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); + WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); + + for (i = 0; i < comp_read; i++) { + if (WARN_ON_ONCE(comp[i].is_sq)) + return; + + /* verify recv cqe references the right rxq */ + if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) + return; + + mana_process_rx_cqe(cq->rxq, cq, &comp[i]); + } +} + +static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue) +{ + struct mana_cq *cq = context; + + WARN_ON_ONCE(cq->gdma_cq != gdma_queue); + + if (cq->type == MANA_CQ_TYPE_RX) + mana_poll_rx_cq(cq); + else + mana_poll_tx_cq(cq); + + mana_gd_arm_cq(gdma_queue); +} + +static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + + if (!cq->gdma_cq) + return; + + mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); +} + +static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + + if (!txq->gdma_sq) + return; + + mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); +} + +static void mana_destroy_txq(struct mana_port_context *apc) +{ + int i; + + if (!apc->tx_qp) + return; + + for (i = 0; i < apc->num_queues; i++) { + mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); + + mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); + + mana_deinit_txq(apc, &apc->tx_qp[i].txq); + } + + kfree(apc->tx_qp); + apc->tx_qp = NULL; +} + +static int mana_create_txq(struct mana_port_context *apc, + struct net_device *net) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + struct mana_obj_spec wq_spec; + struct mana_obj_spec cq_spec; + struct gdma_queue_spec spec; + struct gdma_context *gc; + struct mana_txq *txq; + struct mana_cq *cq; + u32 txq_size; + u32 cq_size; + int err; + int i; + + apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp), + GFP_KERNEL); + if (!apc->tx_qp) + return -ENOMEM; + + /* The minimum size of the WQE is 32 bytes, hence + * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs + * the SQ can store. This value is then used to size other queues + * to prevent overflow. + */ + txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32; + BUILD_BUG_ON(!PAGE_ALIGNED(txq_size)); + + cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; + cq_size = PAGE_ALIGN(cq_size); + + gc = gd->gdma_context; + + for (i = 0; i < apc->num_queues; i++) { + apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; + + /* Create SQ */ + txq = &apc->tx_qp[i].txq; + + u64_stats_init(&txq->stats.syncp); + txq->ndev = net; + txq->net_txq = netdev_get_tx_queue(net, i); + txq->vp_offset = apc->tx_vp_offset; + skb_queue_head_init(&txq->pending_skbs); + + memset(&spec, 0, sizeof(spec)); + spec.type = GDMA_SQ; + spec.monitor_avl_buf = true; + spec.queue_size = txq_size; + err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); + if (err) + goto out; + + /* Create SQ's CQ */ + cq = &apc->tx_qp[i].tx_cq; + cq->gdma_comp_buf = apc->eqs[i].cqe_poll; + cq->type = MANA_CQ_TYPE_TX; + + cq->txq = txq; + + memset(&spec, 0, sizeof(spec)); + spec.type = GDMA_CQ; + spec.monitor_avl_buf = false; + spec.queue_size = cq_size; + spec.cq.callback = mana_cq_handler; + spec.cq.parent_eq = apc->eqs[i].eq; + spec.cq.context = cq; + err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); + if (err) + goto out; + + memset(&wq_spec, 0, sizeof(wq_spec)); + memset(&cq_spec, 0, sizeof(cq_spec)); + + wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region; + wq_spec.queue_size = txq->gdma_sq->queue_size; + + cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; + cq_spec.queue_size = cq->gdma_cq->queue_size; + cq_spec.modr_ctx_id = 0; + cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; + + err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, + &wq_spec, &cq_spec, + &apc->tx_qp[i].tx_object); + + if (err) + goto out; + + txq->gdma_sq->id = wq_spec.queue_index; + cq->gdma_cq->id = cq_spec.queue_index; + + txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + + txq->gdma_txq_id = txq->gdma_sq->id; + + cq->gdma_id = cq->gdma_cq->id; + + if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) + return -EINVAL; + + gc->cq_table[cq->gdma_id] = cq->gdma_cq; + + mana_gd_arm_cq(cq->gdma_cq); + } + + return 0; +out: + mana_destroy_txq(apc); + return err; +} + +static void mana_napi_sync_for_rx(struct mana_rxq *rxq) +{ + struct net_device *ndev = rxq->ndev; + struct mana_port_context *apc; + u16 rxq_idx = rxq->rxq_idx; + struct napi_struct *napi; + struct gdma_queue *eq; + + apc = netdev_priv(ndev); + eq = apc->eqs[rxq_idx].eq; + napi = &eq->eq.napi; + + napi_synchronize(napi); +} + +static void mana_destroy_rxq(struct mana_port_context *apc, + struct mana_rxq *rxq, bool validate_state) + +{ + struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; + struct mana_recv_buf_oob *rx_oob; + struct device *dev = gc->dev; + int i; + + if (!rxq) + return; + + if (validate_state) + mana_napi_sync_for_rx(rxq); + + mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); + + mana_deinit_cq(apc, &rxq->rx_cq); + + for (i = 0; i < rxq->num_rx_buf; i++) { + rx_oob = &rxq->rx_oobs[i]; + + if (!rx_oob->buf_va) + continue; + + dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize, + DMA_FROM_DEVICE); + + free_page((unsigned long)rx_oob->buf_va); + rx_oob->buf_va = NULL; + } + + if (rxq->gdma_rq) + mana_gd_destroy_queue(gc, rxq->gdma_rq); + + kfree(rxq); +} + +#define MANA_WQE_HEADER_SIZE 16 +#define MANA_WQE_SGE_SIZE 16 + +static int mana_alloc_rx_wqe(struct mana_port_context *apc, + struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size) +{ + struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; + struct mana_recv_buf_oob *rx_oob; + struct device *dev = gc->dev; + struct page *page; + dma_addr_t da; + u32 buf_idx; + + WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE); + + *rxq_size = 0; + *cq_size = 0; + + for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { + rx_oob = &rxq->rx_oobs[buf_idx]; + memset(rx_oob, 0, sizeof(*rx_oob)); + + page = alloc_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE); + + if (dma_mapping_error(dev, da)) { + __free_page(page); + return -ENOMEM; + } + + rx_oob->buf_va = page_to_virt(page); + rx_oob->buf_dma_addr = da; + + rx_oob->num_sge = 1; + rx_oob->sgl[0].address = rx_oob->buf_dma_addr; + rx_oob->sgl[0].size = rxq->datasize; + rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey; + + rx_oob->wqe_req.sgl = rx_oob->sgl; + rx_oob->wqe_req.num_sge = rx_oob->num_sge; + rx_oob->wqe_req.inline_oob_size = 0; + rx_oob->wqe_req.inline_oob_data = NULL; + rx_oob->wqe_req.flags = 0; + rx_oob->wqe_req.client_data_unit = 0; + + *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE + + MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); + *cq_size += COMP_ENTRY_SIZE; + } + + return 0; +} + +static int mana_push_wqe(struct mana_rxq *rxq) +{ + struct mana_recv_buf_oob *rx_oob; + u32 buf_idx; + int err; + + for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { + rx_oob = &rxq->rx_oobs[buf_idx]; + + err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, + &rx_oob->wqe_inf); + if (err) + return -ENOSPC; + } + + return 0; +} + +static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, + u32 rxq_idx, struct mana_eq *eq, + struct net_device *ndev) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + struct mana_obj_spec wq_spec; + struct mana_obj_spec cq_spec; + struct gdma_queue_spec spec; + struct mana_cq *cq = NULL; + struct gdma_context *gc; + u32 cq_size, rq_size; + struct mana_rxq *rxq; + int err; + + gc = gd->gdma_context; + + rxq = kzalloc(sizeof(*rxq) + + RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob), + GFP_KERNEL); + if (!rxq) + return NULL; + + rxq->ndev = ndev; + rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; + rxq->rxq_idx = rxq_idx; + rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64); + rxq->rxobj = INVALID_MANA_HANDLE; + + err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); + if (err) + goto out; + + rq_size = PAGE_ALIGN(rq_size); + cq_size = PAGE_ALIGN(cq_size); + + /* Create RQ */ + memset(&spec, 0, sizeof(spec)); + spec.type = GDMA_RQ; + spec.monitor_avl_buf = true; + spec.queue_size = rq_size; + err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); + if (err) + goto out; + + /* Create RQ's CQ */ + cq = &rxq->rx_cq; + cq->gdma_comp_buf = eq->cqe_poll; + cq->type = MANA_CQ_TYPE_RX; + cq->rxq = rxq; + + memset(&spec, 0, sizeof(spec)); + spec.type = GDMA_CQ; + spec.monitor_avl_buf = false; + spec.queue_size = cq_size; + spec.cq.callback = mana_cq_handler; + spec.cq.parent_eq = eq->eq; + spec.cq.context = cq; + err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); + if (err) + goto out; + + memset(&wq_spec, 0, sizeof(wq_spec)); + memset(&cq_spec, 0, sizeof(cq_spec)); + wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region; + wq_spec.queue_size = rxq->gdma_rq->queue_size; + + cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; + cq_spec.queue_size = cq->gdma_cq->queue_size; + cq_spec.modr_ctx_id = 0; + cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; + + err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, + &wq_spec, &cq_spec, &rxq->rxobj); + if (err) + goto out; + + rxq->gdma_rq->id = wq_spec.queue_index; + cq->gdma_cq->id = cq_spec.queue_index; + + rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + + rxq->gdma_id = rxq->gdma_rq->id; + cq->gdma_id = cq->gdma_cq->id; + + err = mana_push_wqe(rxq); + if (err) + goto out; + + if (cq->gdma_id >= gc->max_num_cqs) + goto out; + + gc->cq_table[cq->gdma_id] = cq->gdma_cq; + + mana_gd_arm_cq(cq->gdma_cq); +out: + if (!err) + return rxq; + + netdev_err(ndev, "Failed to create RXQ: err = %d\n", err); + + mana_destroy_rxq(apc, rxq, false); + + if (cq) + mana_deinit_cq(apc, cq); + + return NULL; +} + +static int mana_add_rx_queues(struct mana_port_context *apc, + struct net_device *ndev) +{ + struct mana_rxq *rxq; + int err = 0; + int i; + + for (i = 0; i < apc->num_queues; i++) { + rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev); + if (!rxq) { + err = -ENOMEM; + goto out; + } + + u64_stats_init(&rxq->stats.syncp); + + apc->rxqs[i] = rxq; + } + + apc->default_rxobj = apc->rxqs[0]->rxobj; +out: + return err; +} + +static void mana_destroy_vport(struct mana_port_context *apc) +{ + struct mana_rxq *rxq; + u32 rxq_idx; + + for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { + rxq = apc->rxqs[rxq_idx]; + if (!rxq) + continue; + + mana_destroy_rxq(apc, rxq, true); + apc->rxqs[rxq_idx] = NULL; + } + + mana_destroy_txq(apc); +} + +static int mana_create_vport(struct mana_port_context *apc, + struct net_device *net) +{ + struct gdma_dev *gd = apc->ac->gdma_dev; + int err; + + apc->default_rxobj = INVALID_MANA_HANDLE; + + err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); + if (err) + return err; + + return mana_create_txq(apc, net); +} + +static void mana_rss_table_init(struct mana_port_context *apc) +{ + int i; + + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + apc->indir_table[i] = + ethtool_rxfh_indir_default(i, apc->num_queues); +} + +int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, + bool update_hash, bool update_tab) +{ + u32 queue_idx; + int i; + + if (update_tab) { + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { + queue_idx = apc->indir_table[i]; + apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; + } + } + + return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); +} + +static int mana_init_port(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + u32 max_txq, max_rxq, max_queues; + int port_idx = apc->port_idx; + u32 num_indirect_entries; + int err; + + err = mana_init_port_context(apc); + if (err) + return err; + + err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq, + &num_indirect_entries); + if (err) { + netdev_err(ndev, "Failed to query info for vPort 0\n"); + goto reset_apc; + } + + max_queues = min_t(u32, max_txq, max_rxq); + if (apc->max_queues > max_queues) + apc->max_queues = max_queues; + + if (apc->num_queues > apc->max_queues) + apc->num_queues = apc->max_queues; + + ether_addr_copy(ndev->dev_addr, apc->mac_addr); + + return 0; + +reset_apc: + kfree(apc->rxqs); + apc->rxqs = NULL; + return err; +} + +int mana_alloc_queues(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + struct gdma_dev *gd = apc->ac->gdma_dev; + int err; + + err = mana_create_eq(apc); + if (err) + return err; + + err = mana_create_vport(apc, ndev); + if (err) + goto destroy_eq; + + err = netif_set_real_num_tx_queues(ndev, apc->num_queues); + if (err) + goto destroy_vport; + + err = mana_add_rx_queues(apc, ndev); + if (err) + goto destroy_vport; + + apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; + + err = netif_set_real_num_rx_queues(ndev, apc->num_queues); + if (err) + goto destroy_vport; + + mana_rss_table_init(apc); + + err = mana_config_rss(apc, TRI_STATE_TRUE, true, true); + if (err) + goto destroy_vport; + + return 0; + +destroy_vport: + mana_destroy_vport(apc); +destroy_eq: + mana_destroy_eq(gd->gdma_context, apc); + return err; +} + +int mana_attach(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + int err; + + ASSERT_RTNL(); + + err = mana_init_port(ndev); + if (err) + return err; + + err = mana_alloc_queues(ndev); + if (err) { + kfree(apc->rxqs); + apc->rxqs = NULL; + return err; + } + + netif_device_attach(ndev); + + apc->port_is_up = apc->port_st_save; + + /* Ensure port state updated before txq state */ + smp_wmb(); + + if (apc->port_is_up) { + netif_carrier_on(ndev); + netif_tx_wake_all_queues(ndev); + } + + return 0; +} + +static int mana_dealloc_queues(struct net_device *ndev) +{ + struct mana_port_context *apc = netdev_priv(ndev); + struct mana_txq *txq; + int i, err; + + if (apc->port_is_up) + return -EINVAL; + + /* No packet can be transmitted now since apc->port_is_up is false. + * There is still a tiny chance that mana_poll_tx_cq() can re-enable + * a txq because it may not timely see apc->port_is_up being cleared + * to false, but it doesn't matter since mana_start_xmit() drops any + * new packets due to apc->port_is_up being false. + * + * Drain all the in-flight TX packets + */ + for (i = 0; i < apc->num_queues; i++) { + txq = &apc->tx_qp[i].txq; + + while (atomic_read(&txq->pending_sends) > 0) + usleep_range(1000, 2000); + } + + /* We're 100% sure the queues can no longer be woken up, because + * we're sure now mana_poll_tx_cq() can't be running. + */ + + apc->rss_state = TRI_STATE_FALSE; + err = mana_config_rss(apc, TRI_STATE_FALSE, false, false); + if (err) { + netdev_err(ndev, "Failed to disable vPort: %d\n", err); + return err; + } + + /* TODO: Implement RX fencing */ + ssleep(1); + + mana_destroy_vport(apc); + + mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc); + + return 0; +} + +int mana_detach(struct net_device *ndev, bool from_close) +{ + struct mana_port_context *apc = netdev_priv(ndev); + int err; + + ASSERT_RTNL(); + + apc->port_st_save = apc->port_is_up; + apc->port_is_up = false; + + /* Ensure port state updated before txq state */ + smp_wmb(); + + netif_tx_disable(ndev); + netif_carrier_off(ndev); + + if (apc->port_st_save) { + err = mana_dealloc_queues(ndev); + if (err) + return err; + } + + if (!from_close) { + netif_device_detach(ndev); + mana_cleanup_port_context(apc); + } + + return 0; +} + +static int mana_probe_port(struct mana_context *ac, int port_idx, + struct net_device **ndev_storage) +{ + struct gdma_context *gc = ac->gdma_dev->gdma_context; + struct mana_port_context *apc; + struct net_device *ndev; + int err; + + ndev = alloc_etherdev_mq(sizeof(struct mana_port_context), + gc->max_num_queues); + if (!ndev) + return -ENOMEM; + + *ndev_storage = ndev; + + apc = netdev_priv(ndev); + apc->ac = ac; + apc->ndev = ndev; + apc->max_queues = gc->max_num_queues; + apc->num_queues = min_t(uint, gc->max_num_queues, MANA_MAX_NUM_QUEUES); + apc->port_handle = INVALID_MANA_HANDLE; + apc->port_idx = port_idx; + + ndev->netdev_ops = &mana_devops; + ndev->ethtool_ops = &mana_ethtool_ops; + ndev->mtu = ETH_DATA_LEN; + ndev->max_mtu = ndev->mtu; + ndev->min_mtu = ndev->mtu; + ndev->needed_headroom = MANA_HEADROOM; + SET_NETDEV_DEV(ndev, gc->dev); + + netif_carrier_off(ndev); + + netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); + + err = mana_init_port(ndev); + if (err) + goto free_net; + + netdev_lockdep_set_classes(ndev); + + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + ndev->hw_features |= NETIF_F_RXCSUM; + ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + ndev->hw_features |= NETIF_F_RXHASH; + ndev->features = ndev->hw_features; + ndev->vlan_features = 0; + + err = register_netdev(ndev); + if (err) { + netdev_err(ndev, "Unable to register netdev.\n"); + goto reset_apc; + } + + return 0; + +reset_apc: + kfree(apc->rxqs); + apc->rxqs = NULL; +free_net: + *ndev_storage = NULL; + netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err); + free_netdev(ndev); + return err; +} + +int mana_probe(struct gdma_dev *gd) +{ + struct gdma_context *gc = gd->gdma_context; + struct device *dev = gc->dev; + struct mana_context *ac; + int err; + int i; + + dev_info(dev, + "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n", + MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION); + + err = mana_gd_register_device(gd); + if (err) + return err; + + ac = kzalloc(sizeof(*ac), GFP_KERNEL); + if (!ac) + return -ENOMEM; + + ac->gdma_dev = gd; + ac->num_ports = 1; + gd->driver_data = ac; + + err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, + MANA_MICRO_VERSION, &ac->num_ports); + if (err) + goto out; + + if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) + ac->num_ports = MAX_PORTS_IN_MANA_DEV; + + for (i = 0; i < ac->num_ports; i++) { + err = mana_probe_port(ac, i, &ac->ports[i]); + if (err) + break; + } +out: + if (err) + mana_remove(gd); + + return err; +} + +void mana_remove(struct gdma_dev *gd) +{ + struct gdma_context *gc = gd->gdma_context; + struct mana_context *ac = gd->driver_data; + struct device *dev = gc->dev; + struct net_device *ndev; + int i; + + for (i = 0; i < ac->num_ports; i++) { + ndev = ac->ports[i]; + if (!ndev) { + if (i == 0) + dev_err(dev, "No net device to remove\n"); + goto out; + } + + /* All cleanup actions should stay after rtnl_lock(), otherwise + * other functions may access partially cleaned up data. + */ + rtnl_lock(); + + mana_detach(ndev, false); + + unregister_netdevice(ndev); + + rtnl_unlock(); + + free_netdev(ndev); + } +out: + mana_gd_deregister_device(gd); + gd->driver_data = NULL; + gd->gdma_context = NULL; + kfree(ac); +} diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c new file mode 100644 index 000000000000..7e74339f39ae --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2021, Microsoft Corporation. */ + +#include <linux/inetdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> + +#include "mana.h" + +static const struct { + char name[ETH_GSTRING_LEN]; + u16 offset; +} mana_eth_stats[] = { + {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, + {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, +}; + +static int mana_get_sset_count(struct net_device *ndev, int stringset) +{ + struct mana_port_context *apc = netdev_priv(ndev); + unsigned int num_queues = apc->num_queues; + + if (stringset != ETH_SS_STATS) + return -EINVAL; + + return ARRAY_SIZE(mana_eth_stats) + num_queues * 4; +} + +static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + struct mana_port_context *apc = netdev_priv(ndev); + unsigned int num_queues = apc->num_queues; + u8 *p = data; + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) { + memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < num_queues; i++) { + sprintf(p, "rx_%d_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_%d_bytes", i); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < num_queues; i++) { + sprintf(p, "tx_%d_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_%d_bytes", i); + p += ETH_GSTRING_LEN; + } +} + +static void mana_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *e_stats, u64 *data) +{ + struct mana_port_context *apc = netdev_priv(ndev); + unsigned int num_queues = apc->num_queues; + void *eth_stats = &apc->eth_stats; + struct mana_stats *stats; + unsigned int start; + u64 packets, bytes; + int q, i = 0; + + if (!apc->port_is_up) + return; + + for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++) + data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset); + + for (q = 0; q < num_queues; q++) { + stats = &apc->rxqs[q]->stats; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + data[i++] = packets; + data[i++] = bytes; + } + + for (q = 0; q < num_queues; q++) { + stats = &apc->tx_qp[q].txq.stats; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + data[i++] = packets; + data[i++] = bytes; + } +} + +static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd, + u32 *rules) +{ + struct mana_port_context *apc = netdev_priv(ndev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = apc->num_queues; + return 0; + } + + return -EOPNOTSUPP; +} + +static u32 mana_get_rxfh_key_size(struct net_device *ndev) +{ + return MANA_HASH_KEY_SIZE; +} + +static u32 mana_rss_indir_size(struct net_device *ndev) +{ + return MANA_INDIRECT_TABLE_SIZE; +} + +static int mana_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mana_port_context *apc = netdev_priv(ndev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ + + if (indir) { + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + indir[i] = apc->indir_table[i]; + } + + if (key) + memcpy(key, apc->hashkey, MANA_HASH_KEY_SIZE); + + return 0; +} + +static int mana_set_rxfh(struct net_device *ndev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mana_port_context *apc = netdev_priv(ndev); + bool update_hash = false, update_table = false; + u32 save_table[MANA_INDIRECT_TABLE_SIZE]; + u8 save_key[MANA_HASH_KEY_SIZE]; + int i, err; + + if (!apc->port_is_up) + return -EOPNOTSUPP; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (indir) { + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + if (indir[i] >= apc->num_queues) + return -EINVAL; + + update_table = true; + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { + save_table[i] = apc->indir_table[i]; + apc->indir_table[i] = indir[i]; + } + } + + if (key) { + update_hash = true; + memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE); + memcpy(apc->hashkey, key, MANA_HASH_KEY_SIZE); + } + + err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); + + if (err) { /* recover to original values */ + if (update_table) { + for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + apc->indir_table[i] = save_table[i]; + } + + if (update_hash) + memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE); + + mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); + } + + return err; +} + +static void mana_get_channels(struct net_device *ndev, + struct ethtool_channels *channel) +{ + struct mana_port_context *apc = netdev_priv(ndev); + + channel->max_combined = apc->max_queues; + channel->combined_count = apc->num_queues; +} + +static int mana_set_channels(struct net_device *ndev, + struct ethtool_channels *channels) +{ + struct mana_port_context *apc = netdev_priv(ndev); + unsigned int new_count = channels->combined_count; + unsigned int old_count = apc->num_queues; + int err, err2; + + if (!apc->port_is_up) + return -EOPNOTSUPP; + + err = mana_detach(ndev, false); + if (err) { + netdev_err(ndev, "mana_detach failed: %d\n", err); + return err; + } + + apc->num_queues = new_count; + err = mana_attach(ndev); + if (!err) + return 0; + + netdev_err(ndev, "mana_attach failed: %d\n", err); + + /* Try to roll it back to the old configuration. */ + apc->num_queues = old_count; + err2 = mana_attach(ndev); + if (err2) + netdev_err(ndev, "mana re-attach failed: %d\n", err2); + + return err; +} + +const struct ethtool_ops mana_ethtool_ops = { + .get_ethtool_stats = mana_get_ethtool_stats, + .get_sset_count = mana_get_sset_count, + .get_strings = mana_get_strings, + .get_rxnfc = mana_get_rxnfc, + .get_rxfh_key_size = mana_get_rxfh_key_size, + .get_rxfh_indir_size = mana_rss_indir_size, + .get_rxfh = mana_get_rxfh, + .set_rxfh = mana_set_rxfh, + .get_channels = mana_get_channels, + .set_channels = mana_set_channels, +}; diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c new file mode 100644 index 000000000000..da255da62176 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2021, Microsoft Corporation. */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/mm.h> + +#include "shm_channel.h" + +#define PAGE_FRAME_L48_WIDTH_BYTES 6 +#define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8) +#define PAGE_FRAME_L48_MASK 0x0000FFFFFFFFFFFF +#define PAGE_FRAME_H4_WIDTH_BITS 4 +#define VECTOR_MASK 0xFFFF +#define SHMEM_VF_RESET_STATE ((u32)-1) + +#define SMC_MSG_TYPE_ESTABLISH_HWC 1 +#define SMC_MSG_TYPE_ESTABLISH_HWC_VERSION 0 + +#define SMC_MSG_TYPE_DESTROY_HWC 2 +#define SMC_MSG_TYPE_DESTROY_HWC_VERSION 0 + +#define SMC_MSG_DIRECTION_REQUEST 0 +#define SMC_MSG_DIRECTION_RESPONSE 1 + +/* Structures labeled with "HW DATA" are exchanged with the hardware. All of + * them are naturally aligned and hence don't need __packed. + */ + +/* Shared memory channel protocol header + * + * msg_type: set on request and response; response matches request. + * msg_version: newer PF writes back older response (matching request) + * older PF acts on latest version known and sets that version in result + * (less than request). + * direction: 0 for request, VF->PF; 1 for response, PF->VF. + * status: 0 on request, + * operation result on response (success = 0, failure = 1 or greater). + * reset_vf: If set on either establish or destroy request, indicates perform + * FLR before/after the operation. + * owner_is_pf: 1 indicates PF owned, 0 indicates VF owned. + */ +union smc_proto_hdr { + u32 as_uint32; + + struct { + u8 msg_type : 3; + u8 msg_version : 3; + u8 reserved_1 : 1; + u8 direction : 1; + + u8 status; + + u8 reserved_2; + + u8 reset_vf : 1; + u8 reserved_3 : 6; + u8 owner_is_pf : 1; + }; +}; /* HW DATA */ + +#define SMC_APERTURE_BITS 256 +#define SMC_BASIC_UNIT (sizeof(u32)) +#define SMC_APERTURE_DWORDS (SMC_APERTURE_BITS / (SMC_BASIC_UNIT * 8)) +#define SMC_LAST_DWORD (SMC_APERTURE_DWORDS - 1) + +static int mana_smc_poll_register(void __iomem *base, bool reset) +{ + void __iomem *ptr = base + SMC_LAST_DWORD * SMC_BASIC_UNIT; + u32 last_dword; + int i; + + /* Poll the hardware for the ownership bit. This should be pretty fast, + * but let's do it in a loop just in case the hardware or the PF + * driver are temporarily busy. + */ + for (i = 0; i < 20 * 1000; i++) { + last_dword = readl(ptr); + + /* shmem reads as 0xFFFFFFFF in the reset case */ + if (reset && last_dword == SHMEM_VF_RESET_STATE) + return 0; + + /* If bit_31 is set, the PF currently owns the SMC. */ + if (!(last_dword & BIT(31))) + return 0; + + usleep_range(1000, 2000); + } + + return -ETIMEDOUT; +} + +static int mana_smc_read_response(struct shm_channel *sc, u32 msg_type, + u32 msg_version, bool reset_vf) +{ + void __iomem *base = sc->base; + union smc_proto_hdr hdr; + int err; + + /* Wait for PF to respond. */ + err = mana_smc_poll_register(base, reset_vf); + if (err) + return err; + + hdr.as_uint32 = readl(base + SMC_LAST_DWORD * SMC_BASIC_UNIT); + + if (reset_vf && hdr.as_uint32 == SHMEM_VF_RESET_STATE) + return 0; + + /* Validate protocol fields from the PF driver */ + if (hdr.msg_type != msg_type || hdr.msg_version > msg_version || + hdr.direction != SMC_MSG_DIRECTION_RESPONSE) { + dev_err(sc->dev, "Wrong SMC response 0x%x, type=%d, ver=%d\n", + hdr.as_uint32, msg_type, msg_version); + return -EPROTO; + } + + /* Validate the operation result */ + if (hdr.status != 0) { + dev_err(sc->dev, "SMC operation failed: 0x%x\n", hdr.status); + return -EPROTO; + } + + return 0; +} + +void mana_smc_init(struct shm_channel *sc, struct device *dev, + void __iomem *base) +{ + sc->dev = dev; + sc->base = base; +} + +int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, + u64 cq_addr, u64 rq_addr, u64 sq_addr, + u32 eq_msix_index) +{ + union smc_proto_hdr *hdr; + u16 all_addr_h4bits = 0; + u16 frame_addr_seq = 0; + u64 frame_addr = 0; + u8 shm_buf[32]; + u64 *shmem; + u32 *dword; + u8 *ptr; + int err; + int i; + + /* Ensure VF already has possession of shared memory */ + err = mana_smc_poll_register(sc->base, false); + if (err) { + dev_err(sc->dev, "Timeout when setting up HWC: %d\n", err); + return err; + } + + if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) || + !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr)) + return -EINVAL; + + if ((eq_msix_index & VECTOR_MASK) != eq_msix_index) + return -EINVAL; + + /* Scheme for packing four addresses and extra info into 256 bits. + * + * Addresses must be page frame aligned, so only frame address bits + * are transferred. + * + * 52-bit frame addresses are split into the lower 48 bits and upper + * 4 bits. Lower 48 bits of 4 address are written sequentially from + * the start of the 256-bit shared memory region followed by 16 bits + * containing the upper 4 bits of the 4 addresses in sequence. + * + * A 16 bit EQ vector number fills out the next-to-last 32-bit dword. + * + * The final 32-bit dword is used for protocol control information as + * defined in smc_proto_hdr. + */ + + memset(shm_buf, 0, sizeof(shm_buf)); + ptr = shm_buf; + + /* EQ addr: low 48 bits of frame address */ + shmem = (u64 *)ptr; + frame_addr = PHYS_PFN(eq_addr); + *shmem = frame_addr & PAGE_FRAME_L48_MASK; + all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << + (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); + ptr += PAGE_FRAME_L48_WIDTH_BYTES; + + /* CQ addr: low 48 bits of frame address */ + shmem = (u64 *)ptr; + frame_addr = PHYS_PFN(cq_addr); + *shmem = frame_addr & PAGE_FRAME_L48_MASK; + all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << + (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); + ptr += PAGE_FRAME_L48_WIDTH_BYTES; + + /* RQ addr: low 48 bits of frame address */ + shmem = (u64 *)ptr; + frame_addr = PHYS_PFN(rq_addr); + *shmem = frame_addr & PAGE_FRAME_L48_MASK; + all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << + (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); + ptr += PAGE_FRAME_L48_WIDTH_BYTES; + + /* SQ addr: low 48 bits of frame address */ + shmem = (u64 *)ptr; + frame_addr = PHYS_PFN(sq_addr); + *shmem = frame_addr & PAGE_FRAME_L48_MASK; + all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << + (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); + ptr += PAGE_FRAME_L48_WIDTH_BYTES; + + /* High 4 bits of the four frame addresses */ + *((u16 *)ptr) = all_addr_h4bits; + ptr += sizeof(u16); + + /* EQ MSIX vector number */ + *((u16 *)ptr) = (u16)eq_msix_index; + ptr += sizeof(u16); + + /* 32-bit protocol header in final dword */ + *((u32 *)ptr) = 0; + + hdr = (union smc_proto_hdr *)ptr; + hdr->msg_type = SMC_MSG_TYPE_ESTABLISH_HWC; + hdr->msg_version = SMC_MSG_TYPE_ESTABLISH_HWC_VERSION; + hdr->direction = SMC_MSG_DIRECTION_REQUEST; + hdr->reset_vf = reset_vf; + + /* Write 256-message buffer to shared memory (final 32-bit write + * triggers HW to set possession bit to PF). + */ + dword = (u32 *)shm_buf; + for (i = 0; i < SMC_APERTURE_DWORDS; i++) + writel(*dword++, sc->base + i * SMC_BASIC_UNIT); + + /* Read shmem response (polling for VF possession) and validate. + * For setup, waiting for response on shared memory is not strictly + * necessary, since wait occurs later for results to appear in EQE's. + */ + err = mana_smc_read_response(sc, SMC_MSG_TYPE_ESTABLISH_HWC, + SMC_MSG_TYPE_ESTABLISH_HWC_VERSION, + reset_vf); + if (err) { + dev_err(sc->dev, "Error when setting up HWC: %d\n", err); + return err; + } + + return 0; +} + +int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf) +{ + union smc_proto_hdr hdr = {}; + int err; + + /* Ensure already has possession of shared memory */ + err = mana_smc_poll_register(sc->base, false); + if (err) { + dev_err(sc->dev, "Timeout when tearing down HWC\n"); + return err; + } + + /* Set up protocol header for HWC destroy message */ + hdr.msg_type = SMC_MSG_TYPE_DESTROY_HWC; + hdr.msg_version = SMC_MSG_TYPE_DESTROY_HWC_VERSION; + hdr.direction = SMC_MSG_DIRECTION_REQUEST; + hdr.reset_vf = reset_vf; + + /* Write message in high 32 bits of 256-bit shared memory, causing HW + * to set possession bit to PF. + */ + writel(hdr.as_uint32, sc->base + SMC_LAST_DWORD * SMC_BASIC_UNIT); + + /* Read shmem response (polling for VF possession) and validate. + * For teardown, waiting for response is required to ensure hardware + * invalidates MST entries before software frees memory. + */ + err = mana_smc_read_response(sc, SMC_MSG_TYPE_DESTROY_HWC, + SMC_MSG_TYPE_DESTROY_HWC_VERSION, + reset_vf); + if (err) { + dev_err(sc->dev, "Error when tearing down HWC: %d\n", err); + return err; + } + + return 0; +} diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.h b/drivers/net/ethernet/microsoft/mana/shm_channel.h new file mode 100644 index 000000000000..5199b41497ff --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/shm_channel.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _SHM_CHANNEL_H +#define _SHM_CHANNEL_H + +struct shm_channel { + struct device *dev; + void __iomem *base; +}; + +void mana_smc_init(struct shm_channel *sc, struct device *dev, + void __iomem *base); + +int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, + u64 cq_addr, u64 rq_addr, u64 sq_addr, + u32 eq_msix_index); + +int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf); + +#endif /* _SHM_CHANNEL_H */ diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index e72fd33a214c..64c6842bd452 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1350,9 +1350,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) __lpc_get_mac(pldat, ndev->dev_addr); if (!is_valid_ether_addr(ndev->dev_addr)) { - const char *macaddr = of_get_mac_address(np); - if (!IS_ERR(macaddr)) - ether_addr_copy(ndev->dev_addr, macaddr); + of_get_mac_address(np, ndev->dev_addr); } if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 71db1e2c7d8a..6583be570e45 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -888,55 +888,55 @@ static int ionic_get_ts_info(struct net_device *netdev, mask = cpu_to_le64(IONIC_PKT_CLS_NTP_ALL); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_NTP_ALL; + info->rx_filters |= BIT(HWTSTAMP_FILTER_NTP_ALL); mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_SYNC); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC); mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_DREQ); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); mask = cpu_to_le64(IONIC_PKT_CLS_PTP1_ALL); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT); mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_SYNC); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V2_L4_SYNC; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC); mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_DREQ); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L4_ALL); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT); mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_SYNC); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V2_L2_SYNC; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC); mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_DREQ); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_L2_ALL); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT); mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_SYNC); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V2_SYNC; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_SYNC); mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_DREQ); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); mask = cpu_to_le64(IONIC_PKT_CLS_PTP2_ALL); if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) == mask) - info->rx_filters |= HWTSTAMP_FILTER_PTP_V2_EVENT; + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c index 177dbf89affd..a87c87e86aef 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c @@ -225,7 +225,9 @@ int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) memcpy(&config, &lif->phc->ts_config, sizeof(config)); mutex_unlock(&lif->phc->config_lock); - return copy_to_user(ifr->ifr_data, &config, sizeof(config)); + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + return 0; } static u64 ionic_hwstamp_read(struct ionic *ionic, diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 5a3b65a6eb4f..ab9b02574a15 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -885,7 +885,7 @@ qca_spi_probe(struct spi_device *spi) struct net_device *qcaspi_devs = NULL; u8 legacy_mode = 0; u16 signature; - const char *mac; + int ret; if (!spi->dev.of_node) { dev_err(&spi->dev, "Missing device tree\n"); @@ -962,12 +962,8 @@ qca_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, qcaspi_devs); - mac = of_get_mac_address(spi->dev.of_node); - - if (!IS_ERR(mac)) - ether_addr_copy(qca->net_dev->dev_addr, mac); - - if (!is_valid_ether_addr(qca->net_dev->dev_addr)) { + ret = of_get_mac_address(spi->dev.of_node, qca->net_dev->dev_addr); + if (ret) { eth_hw_addr_random(qca->net_dev); dev_info(&spi->dev, "Using random MAC address: %pM\n", qca->net_dev->dev_addr); diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c index 362b4f5c162c..bcdeca7b3366 100644 --- a/drivers/net/ethernet/qualcomm/qca_uart.c +++ b/drivers/net/ethernet/qualcomm/qca_uart.c @@ -323,7 +323,6 @@ static int qca_uart_probe(struct serdev_device *serdev) { struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart)); struct qcauart *qca; - const char *mac; u32 speed = 115200; int ret; @@ -348,12 +347,8 @@ static int qca_uart_probe(struct serdev_device *serdev) of_property_read_u32(serdev->dev.of_node, "current-speed", &speed); - mac = of_get_mac_address(serdev->dev.of_node); - - if (!IS_ERR(mac)) - ether_addr_copy(qca->net_dev->dev_addr, mac); - - if (!is_valid_ether_addr(qca->net_dev->dev_addr)) { + ret = of_get_mac_address(serdev->dev.of_node, qca->net_dev->dev_addr); + if (ret) { eth_hw_addr_random(qca->net_dev); dev_info(&serdev->dev, "Using random MAC address: %pM\n", qca->net_dev->dev_addr); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 85031b4721fa..3e86fbe21431 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1910,6 +1910,32 @@ static void rtl8169_get_ringparam(struct net_device *dev, data->tx_pending = NUM_TX_DESC; } +static void rtl8169_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + bool tx_pause, rx_pause; + + phy_get_pause(tp->phydev, &tx_pause, &rx_pause); + + data->autoneg = tp->phydev->autoneg; + data->tx_pause = tx_pause ? 1 : 0; + data->rx_pause = rx_pause ? 1 : 0; +} + +static int rtl8169_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN) + return -EOPNOTSUPP; + + phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause); + + return 0; +} + static const struct ethtool_ops rtl8169_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, @@ -1931,6 +1957,8 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_ringparam = rtl8169_get_ringparam, + .get_pauseparam = rtl8169_get_pauseparam, + .set_pauseparam = rtl8169_set_pauseparam, }; static void rtl_enable_eee(struct rtl8169_private *tp) @@ -2358,6 +2386,15 @@ static void rtl_jumbo_config(struct rtl8169_private *tp) if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) pcie_set_readrq(tp->pci_dev, readrq); + + /* Chip doesn't support pause in jumbo mode */ + if (jumbo) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising); + phy_start_aneg(tp->phydev); + } } DECLARE_RTL_COND(rtl_chipcmd_cond) @@ -4633,8 +4670,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp) if (!tp->supports_gmii) phy_set_max_speed(phydev, SPEED_100); - phy_support_asym_pause(phydev); - phy_attached_info(phydev); return 0; @@ -5088,6 +5123,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp) tp->phydev->mac_managed_pm = 1; + phy_support_asym_pause(tp->phydev); + /* PHY will be woken up in rtl_open() */ phy_suspend(tp->phydev); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 1409ae986aa2..8c84c40ab9a0 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -109,11 +109,13 @@ static void ravb_set_buffer_align(struct sk_buff *skb) * Ethernet AVB device doesn't have ROM for MAC address. * This function gets the MAC address that was used by a bootloader. */ -static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) +static void ravb_read_mac_address(struct device_node *np, + struct net_device *ndev) { - if (!IS_ERR(mac)) { - ether_addr_copy(ndev->dev_addr, mac); - } else { + int ret; + + ret = of_get_mac_address(np, ndev->dev_addr); + if (ret) { u32 mahr = ravb_read(ndev, MAHR); u32 malr = ravb_read(ndev, MALR); @@ -2207,7 +2209,7 @@ static int ravb_probe(struct platform_device *pdev) priv->msg_enable = RAVB_DEF_MSG_ENABLE; /* Read and set MAC address */ - ravb_read_mac_address(ndev, of_get_mac_address(np)); + ravb_read_mac_address(np, ndev); if (!is_valid_ether_addr(ndev->dev_addr)) { dev_warn(&pdev->dev, "no valid MAC address supplied, using a random one\n"); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index ebedb1a11132..c5b154868c1f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3170,7 +3170,6 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) struct device_node *np = dev->of_node; struct sh_eth_plat_data *pdata; phy_interface_t interface; - const char *mac_addr; int ret; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); @@ -3182,9 +3181,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) return NULL; pdata->phy_interface = interface; - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(pdata->mac_addr, mac_addr); + of_get_mac_address(np, pdata->mac_addr); pdata->no_ether_link = of_property_read_bool(np, "renesas,no-ether-link"); diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 3473d296b2e2..a46633606cae 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2736,7 +2736,7 @@ static void rocker_switchdev_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) + if (!fdb_info->added_by_user || fdb_info->is_local) break; err = rocker_world_port_fdb_add(rocker_port, fdb_info); if (err) { @@ -2747,7 +2747,7 @@ static void rocker_switchdev_event_work(struct work_struct *work) break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) + if (!fdb_info->added_by_user || fdb_info->is_local) break; err = rocker_world_port_fdb_del(rocker_port, fdb_info); if (err) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index 33f79402850d..4639ed9438a3 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -25,8 +25,7 @@ #ifdef CONFIG_OF static int sxgbe_probe_config_dt(struct platform_device *pdev, - struct sxgbe_plat_data *plat, - const char **mac) + struct sxgbe_plat_data *plat) { struct device_node *np = pdev->dev.of_node; struct sxgbe_dma_cfg *dma_cfg; @@ -35,7 +34,6 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev, if (!np) return -ENODEV; - *mac = of_get_mac_address(np); err = of_get_phy_mode(np, &plat->interface); if (err && err != -ENODEV) return err; @@ -63,8 +61,7 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev, } #else static int sxgbe_probe_config_dt(struct platform_device *pdev, - struct sxgbe_plat_data *plat, - const char **mac) + struct sxgbe_plat_data *plat) { return -ENOSYS; } @@ -85,7 +82,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev) void __iomem *addr; struct sxgbe_priv_data *priv = NULL; struct sxgbe_plat_data *plat_dat = NULL; - const char *mac = NULL; struct net_device *ndev = platform_get_drvdata(pdev); struct device_node *node = dev->of_node; @@ -101,7 +97,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev) if (!plat_dat) return -ENOMEM; - ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac); + ret = sxgbe_probe_config_dt(pdev, plat_dat); if (ret) { pr_err("%s: main dt probe failed\n", __func__); return ret; @@ -122,8 +118,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev) } /* Get MAC address if available (DT) */ - if (!IS_ERR_OR_NULL(mac)) - ether_addr_copy(priv->dev->dev_addr, mac); + of_get_mac_address(node, priv->dev->dev_addr); /* Get the TX/RX IRQ numbers */ for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index da6886dcac37..c873f961d5a5 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1747,6 +1747,22 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) mask, names); } +static void efx_ef10_get_fec_stats(struct efx_nic *efx, + struct ethtool_fec_stats *fec_stats) +{ + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + + efx_ef10_get_stat_mask(efx, mask); + if (test_bit(EF10_STAT_fec_corrected_errors, mask)) + fec_stats->corrected_blocks.total = + stats[EF10_STAT_fec_corrected_errors]; + if (test_bit(EF10_STAT_fec_uncorrected_errors, mask)) + fec_stats->uncorrectable_blocks.total = + stats[EF10_STAT_fec_uncorrected_errors]; +} + static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, struct rtnl_link_stats64 *core_stats) { @@ -4122,6 +4138,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .get_wol = efx_ef10_get_wol, .set_wol = efx_ef10_set_wol, .resume_wol = efx_port_dummy_op_void, + .get_fec_stats = efx_ef10_get_fec_stats, .test_chip = efx_ef10_test_chip, .test_nvram = efx_mcdi_nvram_test_all, .mcdi_request = efx_ef10_mcdi_request, diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h index 3332cdf2918a..cd590e0685e5 100644 --- a/drivers/net/ethernet/sfc/enum.h +++ b/drivers/net/ethernet/sfc/enum.h @@ -78,7 +78,6 @@ enum efx_loopback_mode { (1 << LOOPBACK_XAUI) | \ (1 << LOOPBACK_GMII) | \ (1 << LOOPBACK_SGMII) | \ - (1 << LOOPBACK_SGMII) | \ (1 << LOOPBACK_XGBR) | \ (1 << LOOPBACK_XFI) | \ (1 << LOOPBACK_XAUI_FAR) | \ diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 12a91c559aa2..058d9fe41d99 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -206,6 +206,15 @@ static int efx_ethtool_set_wol(struct net_device *net_dev, return efx->type->set_wol(efx, wol->wolopts); } +static void efx_ethtool_get_fec_stats(struct net_device *net_dev, + struct ethtool_fec_stats *fec_stats) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->get_fec_stats) + efx->type->get_fec_stats(efx, fec_stats); +} + static int efx_ethtool_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info) { @@ -257,6 +266,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_module_eeprom = efx_ethtool_get_module_eeprom, .get_link_ksettings = efx_ethtool_get_link_ksettings, .set_link_ksettings = efx_ethtool_set_link_ksettings, + .get_fec_stats = efx_ethtool_get_fec_stats, .get_fecparam = efx_ethtool_get_fecparam, .set_fecparam = efx_ethtool_set_fecparam, }; diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9f7dfdf708cf..9b4b25704271 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1187,6 +1187,7 @@ struct efx_udp_tunnel { * @get_wol: Get WoL configuration from driver state * @set_wol: Push WoL configuration to the NIC * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) + * @get_fec_stats: Get standard FEC statistics. * @test_chip: Test registers. May use efx_farch_test_registers(), and is * expected to reset the NIC. * @test_nvram: Test validity of NVRAM contents @@ -1332,6 +1333,8 @@ struct efx_nic_type { void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); int (*set_wol)(struct efx_nic *efx, u32 type); void (*resume_wol)(struct efx_nic *efx); + void (*get_fec_stats)(struct efx_nic *efx, + struct ethtool_fec_stats *fec_stats); unsigned int (*check_caps)(const struct efx_nic *efx, u8 flag, u32 offset); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 501b9c7aba56..fcbb4bb31408 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1559,7 +1559,6 @@ static int ave_probe(struct platform_device *pdev) struct ave_private *priv; struct net_device *ndev; struct device_node *np; - const void *mac_addr; void __iomem *base; const char *name; int i, irq, ret; @@ -1600,12 +1599,9 @@ static int ave_probe(struct platform_device *pdev) ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN); - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - - /* if the mac address is invalid, use random mac address */ - if (!is_valid_ether_addr(ndev->dev_addr)) { + ret = of_get_mac_address(np, ndev->dev_addr); + if (ret) { + /* if the mac address is invalid, use random mac address */ eth_hw_addr_random(ndev); dev_warn(dev, "Using random MAC address: %pM\n", ndev->dev_addr); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index 08c76636c164..dfbaea06d108 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -115,7 +115,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev) if (IS_ERR(gmac)) return PTR_ERR(gmac); - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 27254b27d7ed..bc91fd867dcd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -438,7 +438,7 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) if (IS_ERR(stmmac_res.addr)) return PTR_ERR(stmmac_res.addr); - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index fad503820e04..fbfda55b4c52 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -27,7 +27,7 @@ static int dwmac_generic_probe(struct platform_device *pdev) return ret; if (pdev->dev.of_node) { - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index c1a361305a5a..84651207a1de 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -231,7 +231,7 @@ static int imx_dwmac_probe(struct platform_device *pdev) if (!dwmac) return -ENOMEM; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index 6c19fcc76c6f..06d287f104be 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -85,7 +85,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 60566598d644..ec140fc4a0f5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -296,6 +296,13 @@ static int intel_crosststamp(ktime_t *device, intel_priv = priv->plat->bsp_priv; + /* Both internal crosstimestamping and external triggered event + * timestamping cannot be run concurrently. + */ + if (priv->plat->ext_snapshot_en) + return -EBUSY; + + mutex_lock(&priv->aux_ts_lock); /* Enable Internal snapshot trigger */ acr_value = readl(ptpaddr + PTP_ACR); acr_value &= ~PTP_ACR_MASK; @@ -321,6 +328,8 @@ static int intel_crosststamp(ktime_t *device, acr_value = readl(ptpaddr + PTP_ACR); acr_value |= PTP_ACR_ATSFC; writel(acr_value, ptpaddr + PTP_ACR); + /* Release the mutex */ + mutex_unlock(&priv->aux_ts_lock); /* Trigger Internal snapshot signal * Create a rising edge by just toggle the GPO1 to low @@ -520,6 +529,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; plat->int_snapshot_num = AUX_SNAPSHOT1; + plat->ext_snapshot_num = AUX_SNAPSHOT0; plat->has_crossts = true; plat->crosststamp = intel_crosststamp; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 749585fe6fc9..28dd0ed85a82 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -255,7 +255,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) if (val) return val; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index 3d3f43d91b98..9d77c647badd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -37,7 +37,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index 9e4b83832938..58c0feaa8131 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -407,7 +407,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index bbc16b5a410a..16fb66a0ca72 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -52,7 +52,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 848e5c37746b..c7a6588d9398 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -398,7 +398,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c index 8551ea878ba5..adfeb8d3293d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c @@ -118,7 +118,7 @@ static int oxnas_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index a674b7d6b49a..84382fc5cc4d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -461,7 +461,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 6ef30252bfe0..8d28a536e1bb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1396,7 +1396,7 @@ static int rk_gmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 70d41783329d..85208128f135 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -398,7 +398,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index e1b63df6f96f..710d7435733e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -325,7 +325,7 @@ static int sti_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 5d4df4c5254e..2b38a499a404 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -371,7 +371,7 @@ static int stm32_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 19e7ec30af4c..4422baeed3d8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -1221,7 +1221,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) if (ret) return -EINVAL; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 0e1ca2cba3c7..527077c98ebc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -108,7 +108,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index d23be45a64e5..d046e33b8a29 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -208,7 +208,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 2b5022ef1e52..2cc91759b91f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -504,6 +504,8 @@ struct stmmac_ops { #define stmmac_fpe_irq_status(__priv, __args...) \ stmmac_do_callback(__priv, mac, fpe_irq_status, __args) +struct stmmac_priv; + /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); @@ -515,6 +517,7 @@ struct stmmac_hwtimestamp { int add_sub, int gmac4); void (*get_systime) (void __iomem *ioaddr, u64 *systime); void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time); + void (*timestamp_interrupt)(struct stmmac_priv *priv); }; #define stmmac_config_hw_tstamping(__priv, __args...) \ @@ -531,6 +534,8 @@ struct stmmac_hwtimestamp { stmmac_do_void_callback(__priv, ptp, get_systime, __args) #define stmmac_get_ptptime(__priv, __args...) \ stmmac_do_void_callback(__priv, ptp, get_ptptime, __args) +#define stmmac_timestamp_interrupt(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args) /* Helpers to manage the descriptors for chain and ring modes */ struct stmmac_mode_ops { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index c49debb62b05..b6cd43eda7ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -26,7 +26,7 @@ struct stmmac_resources { void __iomem *addr; - const char *mac; + u8 mac[ETH_ALEN]; int wol_irq; int lpi_irq; int irq; @@ -40,6 +40,7 @@ enum stmmac_txbuf_type { STMMAC_TXBUF_T_SKB, STMMAC_TXBUF_T_XDP_TX, STMMAC_TXBUF_T_XDP_NDO, + STMMAC_TXBUF_T_XSK_TX, }; struct stmmac_tx_info { @@ -69,6 +70,8 @@ struct stmmac_tx_queue { struct xdp_frame **xdpf; }; struct stmmac_tx_info *tx_skbuff_dma; + struct xsk_buff_pool *xsk_pool; + u32 xsk_frames_done; unsigned int cur_tx; unsigned int dirty_tx; dma_addr_t dma_tx_phy; @@ -77,9 +80,14 @@ struct stmmac_tx_queue { }; struct stmmac_rx_buffer { - struct page *page; - dma_addr_t addr; - __u32 page_offset; + union { + struct { + struct page *page; + dma_addr_t addr; + __u32 page_offset; + }; + struct xdp_buff *xdp; + }; struct page *sec_page; dma_addr_t sec_addr; }; @@ -88,6 +96,7 @@ struct stmmac_rx_queue { u32 rx_count_frames; u32 queue_index; struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; struct page_pool *page_pool; struct stmmac_rx_buffer *buf_pool; struct stmmac_priv *priv_data; @@ -95,6 +104,7 @@ struct stmmac_rx_queue { struct dma_desc *dma_rx ____cacheline_aligned_in_smp; unsigned int cur_rx; unsigned int dirty_rx; + unsigned int buf_alloc_num; u32 rx_zeroc_thresh; dma_addr_t dma_rx_phy; u32 rx_tail_addr; @@ -109,6 +119,7 @@ struct stmmac_rx_queue { struct stmmac_channel { struct napi_struct rx_napi ____cacheline_aligned_in_smp; struct napi_struct tx_napi ____cacheline_aligned_in_smp; + struct napi_struct rxtx_napi ____cacheline_aligned_in_smp; struct stmmac_priv *priv_data; spinlock_t lock; u32 index; @@ -239,6 +250,9 @@ struct stmmac_priv { int use_riwt; int irq_wake; spinlock_t ptp_lock; + /* Protects auxiliary snapshot registers from concurrent access. */ + struct mutex aux_ts_lock; + void __iomem *mmcaddr; void __iomem *ptpaddr; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -283,6 +297,7 @@ struct stmmac_priv { struct stmmac_rss rss; /* XDP BPF Program */ + unsigned long *af_xdp_zc_qps; struct bpf_prog *xdp_prog; }; @@ -328,6 +343,12 @@ static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv) return 0; } +void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue); +int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags); + #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) void stmmac_selftest_run(struct net_device *dev, struct ethtool_test *etest, u64 *buf); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 113c51bcc0b5..074e2cdfb0fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -12,8 +12,11 @@ #include <linux/io.h> #include <linux/iopoll.h> #include <linux/delay.h> +#include <linux/ptp_clock_kernel.h> #include "common.h" #include "stmmac_ptp.h" +#include "dwmac4.h" +#include "stmmac.h" static void config_hw_tstamping(void __iomem *ioaddr, u32 data) { @@ -163,6 +166,41 @@ static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) *ptp_time = ns; } +static void timestamp_interrupt(struct stmmac_priv *priv) +{ + u32 num_snapshot, ts_status, tsync_int; + struct ptp_clock_event event; + unsigned long flags; + u64 ptp_time; + int i; + + tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE; + + if (!tsync_int) + return; + + /* Read timestamp status to clear interrupt from either external + * timestamp or start/end of PPS. + */ + ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS); + + if (!priv->plat->ext_snapshot_en) + return; + + num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >> + GMAC_TIMESTAMP_ATSNS_SHIFT; + + for (i = 0; i < num_snapshot; i++) { + spin_lock_irqsave(&priv->ptp_lock, flags); + get_ptptime(priv->ptpaddr, &ptp_time); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ptp_time; + ptp_clock_event(priv->ptp_clock, &event); + } +} + const struct stmmac_hwtimestamp stmmac_ptp = { .config_hw_tstamping = config_hw_tstamping, .init_systime = init_systime, @@ -171,4 +209,5 @@ const struct stmmac_hwtimestamp stmmac_ptp = { .adjust_systime = adjust_systime, .get_systime = get_systime, .get_ptptime = get_ptptime, + .timestamp_interrupt = timestamp_interrupt, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 77285646c5fc..9f396648d76f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -40,6 +40,7 @@ #include <linux/udp.h> #include <linux/bpf_trace.h> #include <net/pkt_cls.h> +#include <net/xdp_sock_drv.h> #include "stmmac_ptp.h" #include "stmmac.h" #include "stmmac_xdp.h" @@ -69,6 +70,11 @@ MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) +/* Limit to make sure XDP TX and slow path can coexist */ +#define STMMAC_XSK_TX_BUDGET_MAX 256 +#define STMMAC_TX_XSK_AVAIL 16 +#define STMMAC_RX_FILL_BATCH 16 + #define STMMAC_XDP_PASS 0 #define STMMAC_XDP_CONSUMED BIT(0) #define STMMAC_XDP_TX BIT(1) @@ -117,6 +123,8 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); +static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); +static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); #ifdef CONFIG_DEBUG_FS static const struct net_device_ops stmmac_netdev_ops; @@ -179,11 +187,7 @@ static void stmmac_verify_args(void) eee_timer = STMMAC_DEFAULT_LPI_TIMER; } -/** - * stmmac_disable_all_queues - Disable all queues - * @priv: driver private structure - */ -static void stmmac_disable_all_queues(struct stmmac_priv *priv) +static void __stmmac_disable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; u32 tx_queues_cnt = priv->plat->tx_queues_to_use; @@ -193,6 +197,12 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) { + napi_disable(&ch->rxtx_napi); + continue; + } + if (queue < rx_queues_cnt) napi_disable(&ch->rx_napi); if (queue < tx_queues_cnt) @@ -201,6 +211,28 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) } /** + * stmmac_disable_all_queues - Disable all queues + * @priv: driver private structure + */ +static void stmmac_disable_all_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + struct stmmac_rx_queue *rx_q; + u32 queue; + + /* synchronize_rcu() needed for pending XDP buffers to drain */ + for (queue = 0; queue < rx_queues_cnt; queue++) { + rx_q = &priv->rx_queue[queue]; + if (rx_q->xsk_pool) { + synchronize_rcu(); + break; + } + } + + __stmmac_disable_all_queues(priv); +} + +/** * stmmac_enable_all_queues - Enable all queues * @priv: driver private structure */ @@ -214,6 +246,12 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) { + napi_enable(&ch->rxtx_napi); + continue; + } + if (queue < rx_queues_cnt) napi_enable(&ch->rx_napi); if (queue < tx_queues_cnt) @@ -1388,12 +1426,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); - if (!buf->page) - return -ENOMEM; - buf->page_offset = stmmac_rx_offset(priv); + if (!buf->page) { + buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); + if (!buf->page) + return -ENOMEM; + buf->page_offset = stmmac_rx_offset(priv); + } - if (priv->sph) { + if (priv->sph && !buf->sec_page) { buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); if (!buf->sec_page) return -ENOMEM; @@ -1465,6 +1505,9 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) tx_q->xdpf[i] = NULL; } + if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) + tx_q->xsk_frames_done++; + if (tx_q->tx_skbuff[i] && tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { dev_kfree_skb_any(tx_q->tx_skbuff[i]); @@ -1476,167 +1519,206 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) } /** - * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer. - * @priv: driver private structure - * Description: this function is called to re-allocate a receive buffer, perform - * the DMA mapping and init the descriptor. + * dma_free_rx_skbufs - free RX dma buffers + * @priv: private structure + * @queue: RX queue index */ -static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv) +static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) { - u32 rx_count = priv->plat->rx_queues_to_use; - u32 queue; int i; - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (i = 0; i < priv->dma_rx_size; i++) + stmmac_free_rx_buffer(priv, queue, i); +} - for (i = 0; i < priv->dma_rx_size; i++) { - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; +static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, + gfp_t flags) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int i; - if (buf->page) { - page_pool_recycle_direct(rx_q->page_pool, buf->page); - buf->page = NULL; - } + for (i = 0; i < priv->dma_rx_size; i++) { + struct dma_desc *p; + int ret; - if (priv->sph && buf->sec_page) { - page_pool_recycle_direct(rx_q->page_pool, buf->sec_page); - buf->sec_page = NULL; - } - } + if (priv->extend_desc) + p = &((rx_q->dma_erx + i)->basic); + else + p = rx_q->dma_rx + i; + + ret = stmmac_init_rx_buffers(priv, p, i, flags, + queue); + if (ret) + return ret; + + rx_q->buf_alloc_num++; } - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + return 0; +} - for (i = 0; i < priv->dma_rx_size; i++) { - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - struct dma_desc *p; +/** + * dma_free_rx_xskbufs - free RX dma buffers from XSK pool + * @priv: private structure + * @queue: RX queue index + */ +static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int i; - if (priv->extend_desc) - p = &((rx_q->dma_erx + i)->basic); - else - p = rx_q->dma_rx + i; + for (i = 0; i < priv->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - if (!buf->page) { - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); - if (!buf->page) - goto err_reinit_rx_buffers; + if (!buf->xdp) + continue; - buf->addr = page_pool_get_dma_addr(buf->page) + - buf->page_offset; - } + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + } +} - if (priv->sph && !buf->sec_page) { - buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); - if (!buf->sec_page) - goto err_reinit_rx_buffers; +static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int i; - buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); - } + for (i = 0; i < priv->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf; + dma_addr_t dma_addr; + struct dma_desc *p; - stmmac_set_desc_addr(priv, p, buf->addr); - if (priv->sph) - stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); - else - stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); - if (priv->dma_buf_sz == BUF_SIZE_16KiB) - stmmac_init_desc3(priv, p); - } - } + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + i); + else + p = rx_q->dma_rx + i; - return; + buf = &rx_q->buf_pool[i]; -err_reinit_rx_buffers: - do { - while (--i >= 0) - stmmac_free_rx_buffer(priv, queue, i); + buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); + if (!buf->xdp) + return -ENOMEM; - if (queue == 0) - break; + dma_addr = xsk_buff_xdp_get_dma(buf->xdp); + stmmac_set_desc_addr(priv, p, dma_addr); + rx_q->buf_alloc_num++; + } - i = priv->dma_rx_size; - } while (queue-- > 0); + return 0; +} + +static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) +{ + if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) + return NULL; + + return xsk_get_pool_from_qid(priv->dev, queue); } /** - * init_dma_rx_desc_rings - init the RX descriptor rings - * @dev: net device structure + * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) + * @priv: driver private structure + * @queue: RX queue index * @flags: gfp flag. * Description: this function initializes the DMA RX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ -static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) +static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) { - struct stmmac_priv *priv = netdev_priv(dev); - u32 rx_count = priv->plat->rx_queues_to_use; - int ret = -ENOMEM; - int queue; - int i; + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int ret; - /* RX INITIALIZATION */ netif_dbg(priv, probe, priv->dev, - "SKB addresses:\nskb\t\tskb data\tdma data\n"); + "(%s) dma_rx_phy=0x%08x\n", __func__, + (u32)rx_q->dma_rx_phy); - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - int ret; + stmmac_clear_rx_descriptors(priv, queue); - netif_dbg(priv, probe, priv->dev, - "(%s) dma_rx_phy=0x%08x\n", __func__, - (u32)rx_q->dma_rx_phy); + xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); - stmmac_clear_rx_descriptors(priv, queue); + rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); + if (rx_q->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + netdev_info(priv->dev, + "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", + rx_q->queue_index); + xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); + } else { WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, MEM_TYPE_PAGE_POOL, rx_q->page_pool)); - netdev_info(priv->dev, "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", rx_q->queue_index); + } - for (i = 0; i < priv->dma_rx_size; i++) { - struct dma_desc *p; + if (rx_q->xsk_pool) { + /* RX XDP ZC buffer pool may not be populated, e.g. + * xdpsock TX-only. + */ + stmmac_alloc_rx_buffers_zc(priv, queue); + } else { + ret = stmmac_alloc_rx_buffers(priv, queue, flags); + if (ret < 0) + return -ENOMEM; + } - if (priv->extend_desc) - p = &((rx_q->dma_erx + i)->basic); - else - p = rx_q->dma_rx + i; + rx_q->cur_rx = 0; + rx_q->dirty_rx = 0; - ret = stmmac_init_rx_buffers(priv, p, i, flags, - queue); - if (ret) - goto err_init_rx_buffers; - } + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, rx_q->dma_erx, + rx_q->dma_rx_phy, + priv->dma_rx_size, 1); + else + stmmac_mode_init(priv, rx_q->dma_rx, + rx_q->dma_rx_phy, + priv->dma_rx_size, 0); + } - rx_q->cur_rx = 0; - rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); - - /* Setup the chained descriptor addresses */ - if (priv->mode == STMMAC_CHAIN_MODE) { - if (priv->extend_desc) - stmmac_mode_init(priv, rx_q->dma_erx, - rx_q->dma_rx_phy, - priv->dma_rx_size, 1); - else - stmmac_mode_init(priv, rx_q->dma_rx, - rx_q->dma_rx_phy, - priv->dma_rx_size, 0); - } + return 0; +} + +static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + int ret; + + /* RX INITIALIZATION */ + netif_dbg(priv, probe, priv->dev, + "SKB addresses:\nskb\t\tskb data\tdma data\n"); + + for (queue = 0; queue < rx_count; queue++) { + ret = __init_dma_rx_desc_rings(priv, queue, flags); + if (ret) + goto err_init_rx_buffers; } return 0; err_init_rx_buffers: while (queue >= 0) { - while (--i >= 0) - stmmac_free_rx_buffer(priv, queue, i); + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + if (rx_q->xsk_pool) + dma_free_rx_xskbufs(priv, queue); + else + dma_free_rx_skbufs(priv, queue); + + rx_q->buf_alloc_num = 0; + rx_q->xsk_pool = NULL; if (queue == 0) break; - i = priv->dma_rx_size; queue--; } @@ -1644,63 +1726,75 @@ err_init_rx_buffers: } /** - * init_dma_tx_desc_rings - init the TX descriptor rings - * @dev: net device structure. + * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) + * @priv: driver private structure + * @queue : TX queue index * Description: this function initializes the DMA TX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ -static int init_dma_tx_desc_rings(struct net_device *dev) +static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) { - struct stmmac_priv *priv = netdev_priv(dev); - u32 tx_queue_cnt = priv->plat->tx_queues_to_use; - u32 queue; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; int i; - for (queue = 0; queue < tx_queue_cnt; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + netif_dbg(priv, probe, priv->dev, + "(%s) dma_tx_phy=0x%08x\n", __func__, + (u32)tx_q->dma_tx_phy); - netif_dbg(priv, probe, priv->dev, - "(%s) dma_tx_phy=0x%08x\n", __func__, - (u32)tx_q->dma_tx_phy); - - /* Setup the chained descriptor addresses */ - if (priv->mode == STMMAC_CHAIN_MODE) { - if (priv->extend_desc) - stmmac_mode_init(priv, tx_q->dma_etx, - tx_q->dma_tx_phy, - priv->dma_tx_size, 1); - else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) - stmmac_mode_init(priv, tx_q->dma_tx, - tx_q->dma_tx_phy, - priv->dma_tx_size, 0); - } + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, tx_q->dma_etx, + tx_q->dma_tx_phy, + priv->dma_tx_size, 1); + else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) + stmmac_mode_init(priv, tx_q->dma_tx, + tx_q->dma_tx_phy, + priv->dma_tx_size, 0); + } - for (i = 0; i < priv->dma_tx_size; i++) { - struct dma_desc *p; - if (priv->extend_desc) - p = &((tx_q->dma_etx + i)->basic); - else if (tx_q->tbs & STMMAC_TBS_AVAIL) - p = &((tx_q->dma_entx + i)->basic); - else - p = tx_q->dma_tx + i; + tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); - stmmac_clear_desc(priv, p); + for (i = 0; i < priv->dma_tx_size; i++) { + struct dma_desc *p; - tx_q->tx_skbuff_dma[i].buf = 0; - tx_q->tx_skbuff_dma[i].map_as_page = false; - tx_q->tx_skbuff_dma[i].len = 0; - tx_q->tx_skbuff_dma[i].last_segment = false; - tx_q->tx_skbuff[i] = NULL; - } + if (priv->extend_desc) + p = &((tx_q->dma_etx + i)->basic); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &((tx_q->dma_entx + i)->basic); + else + p = tx_q->dma_tx + i; - tx_q->dirty_tx = 0; - tx_q->cur_tx = 0; - tx_q->mss = 0; + stmmac_clear_desc(priv, p); - netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); + tx_q->tx_skbuff_dma[i].buf = 0; + tx_q->tx_skbuff_dma[i].map_as_page = false; + tx_q->tx_skbuff_dma[i].len = 0; + tx_q->tx_skbuff_dma[i].last_segment = false; + tx_q->tx_skbuff[i] = NULL; } + tx_q->dirty_tx = 0; + tx_q->cur_tx = 0; + tx_q->mss = 0; + + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); + + return 0; +} + +static int init_dma_tx_desc_rings(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 tx_queue_cnt; + u32 queue; + + tx_queue_cnt = priv->plat->tx_queues_to_use; + + for (queue = 0; queue < tx_queue_cnt; queue++) + __init_dma_tx_desc_rings(priv, queue); + return 0; } @@ -1732,29 +1826,25 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) } /** - * dma_free_rx_skbufs - free RX dma buffers - * @priv: private structure - * @queue: RX queue index - */ -static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) -{ - int i; - - for (i = 0; i < priv->dma_rx_size; i++) - stmmac_free_rx_buffer(priv, queue, i); -} - -/** * dma_free_tx_skbufs - free TX dma buffers * @priv: private structure * @queue: TX queue index */ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; int i; + tx_q->xsk_frames_done = 0; + for (i = 0; i < priv->dma_tx_size; i++) stmmac_free_tx_buffer(priv, queue, i); + + if (tx_q->xsk_pool && tx_q->xsk_frames_done) { + xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); + tx_q->xsk_frames_done = 0; + tx_q->xsk_pool = NULL; + } } /** @@ -1771,153 +1861,186 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) } /** - * free_dma_rx_desc_resources - free RX dma desc resources + * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) * @priv: private structure + * @queue: RX queue index */ -static void free_dma_rx_desc_resources(struct stmmac_priv *priv) +static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) { - u32 rx_count = priv->plat->rx_queues_to_use; - u32 queue; - - /* Free RX queue resources */ - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - /* Release the DMA RX socket buffers */ + /* Release the DMA RX socket buffers */ + if (rx_q->xsk_pool) + dma_free_rx_xskbufs(priv, queue); + else dma_free_rx_skbufs(priv, queue); - /* Free DMA regions of consistent memory previously allocated */ - if (!priv->extend_desc) - dma_free_coherent(priv->device, priv->dma_rx_size * - sizeof(struct dma_desc), - rx_q->dma_rx, rx_q->dma_rx_phy); - else - dma_free_coherent(priv->device, priv->dma_rx_size * - sizeof(struct dma_extended_desc), - rx_q->dma_erx, rx_q->dma_rx_phy); + rx_q->buf_alloc_num = 0; + rx_q->xsk_pool = NULL; - if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) - xdp_rxq_info_unreg(&rx_q->xdp_rxq); + /* Free DMA regions of consistent memory previously allocated */ + if (!priv->extend_desc) + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_desc), + rx_q->dma_rx, rx_q->dma_rx_phy); + else + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + rx_q->dma_erx, rx_q->dma_rx_phy); - kfree(rx_q->buf_pool); - if (rx_q->page_pool) - page_pool_destroy(rx_q->page_pool); - } + if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) + xdp_rxq_info_unreg(&rx_q->xdp_rxq); + + kfree(rx_q->buf_pool); + if (rx_q->page_pool) + page_pool_destroy(rx_q->page_pool); +} + +static void free_dma_rx_desc_resources(struct stmmac_priv *priv) +{ + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + + /* Free RX queue resources */ + for (queue = 0; queue < rx_count; queue++) + __free_dma_rx_desc_resources(priv, queue); } /** - * free_dma_tx_desc_resources - free TX dma desc resources + * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) * @priv: private structure + * @queue: TX queue index */ -static void free_dma_tx_desc_resources(struct stmmac_priv *priv) +static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) { - u32 tx_count = priv->plat->tx_queues_to_use; - u32 queue; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + size_t size; + void *addr; - /* Free TX queue resources */ - for (queue = 0; queue < tx_count; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - size_t size; - void *addr; + /* Release the DMA TX socket buffers */ + dma_free_tx_skbufs(priv, queue); + + if (priv->extend_desc) { + size = sizeof(struct dma_extended_desc); + addr = tx_q->dma_etx; + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { + size = sizeof(struct dma_edesc); + addr = tx_q->dma_entx; + } else { + size = sizeof(struct dma_desc); + addr = tx_q->dma_tx; + } - /* Release the DMA TX socket buffers */ - dma_free_tx_skbufs(priv, queue); + size *= priv->dma_tx_size; - if (priv->extend_desc) { - size = sizeof(struct dma_extended_desc); - addr = tx_q->dma_etx; - } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { - size = sizeof(struct dma_edesc); - addr = tx_q->dma_entx; - } else { - size = sizeof(struct dma_desc); - addr = tx_q->dma_tx; - } + dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); - size *= priv->dma_tx_size; + kfree(tx_q->tx_skbuff_dma); + kfree(tx_q->tx_skbuff); +} - dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); +static void free_dma_tx_desc_resources(struct stmmac_priv *priv) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; - kfree(tx_q->tx_skbuff_dma); - kfree(tx_q->tx_skbuff); - } + /* Free TX queue resources */ + for (queue = 0; queue < tx_count; queue++) + __free_dma_tx_desc_resources(priv, queue); } /** - * alloc_dma_rx_desc_resources - alloc RX resources. + * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). * @priv: private structure + * @queue: RX queue index * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ -static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) +static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; bool xdp_prog = stmmac_xdp_is_enabled(priv); - u32 rx_count = priv->plat->rx_queues_to_use; - int ret = -ENOMEM; - u32 queue; + struct page_pool_params pp_params = { 0 }; + unsigned int num_pages; + unsigned int napi_id; + int ret; - /* RX queues buffers and DMA */ - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - struct stmmac_channel *ch = &priv->channel[queue]; - struct page_pool_params pp_params = { 0 }; - unsigned int num_pages; - int ret; + rx_q->queue_index = queue; + rx_q->priv_data = priv; + + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = priv->dma_rx_size; + num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); + pp_params.order = ilog2(num_pages); + pp_params.nid = dev_to_node(priv->device); + pp_params.dev = priv->device; + pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; + pp_params.offset = stmmac_rx_offset(priv); + pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); + + rx_q->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_q->page_pool)) { + ret = PTR_ERR(rx_q->page_pool); + rx_q->page_pool = NULL; + return ret; + } - rx_q->queue_index = queue; - rx_q->priv_data = priv; - - pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; - pp_params.pool_size = priv->dma_rx_size; - num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); - pp_params.order = ilog2(num_pages); - pp_params.nid = dev_to_node(priv->device); - pp_params.dev = priv->device; - pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - pp_params.offset = stmmac_rx_offset(priv); - pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); - - rx_q->page_pool = page_pool_create(&pp_params); - if (IS_ERR(rx_q->page_pool)) { - ret = PTR_ERR(rx_q->page_pool); - rx_q->page_pool = NULL; - goto err_dma; - } + rx_q->buf_pool = kcalloc(priv->dma_rx_size, + sizeof(*rx_q->buf_pool), + GFP_KERNEL); + if (!rx_q->buf_pool) + return -ENOMEM; - rx_q->buf_pool = kcalloc(priv->dma_rx_size, - sizeof(*rx_q->buf_pool), - GFP_KERNEL); - if (!rx_q->buf_pool) - goto err_dma; + if (priv->extend_desc) { + rx_q->dma_erx = dma_alloc_coherent(priv->device, + priv->dma_rx_size * + sizeof(struct dma_extended_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_erx) + return -ENOMEM; - if (priv->extend_desc) { - rx_q->dma_erx = dma_alloc_coherent(priv->device, - priv->dma_rx_size * - sizeof(struct dma_extended_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); - if (!rx_q->dma_erx) - goto err_dma; + } else { + rx_q->dma_rx = dma_alloc_coherent(priv->device, + priv->dma_rx_size * + sizeof(struct dma_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_rx) + return -ENOMEM; + } - } else { - rx_q->dma_rx = dma_alloc_coherent(priv->device, - priv->dma_rx_size * - sizeof(struct dma_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); - if (!rx_q->dma_rx) - goto err_dma; - } + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) + napi_id = ch->rxtx_napi.napi_id; + else + napi_id = ch->rx_napi.napi_id; - ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, - rx_q->queue_index, - ch->rx_napi.napi_id); - if (ret) { - netdev_err(priv->dev, "Failed to register xdp rxq info\n"); + ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, + rx_q->queue_index, + napi_id); + if (ret) { + netdev_err(priv->dev, "Failed to register xdp rxq info\n"); + return -EINVAL; + } + + return 0; +} + +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) +{ + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + int ret; + + /* RX queues buffers and DMA */ + for (queue = 0; queue < rx_count; queue++) { + ret = __alloc_dma_rx_desc_resources(priv, queue); + if (ret) goto err_dma; - } } return 0; @@ -1929,60 +2052,70 @@ err_dma: } /** - * alloc_dma_tx_desc_resources - alloc TX resources. + * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). * @priv: private structure + * @queue: TX queue index * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ -static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) +static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) { - u32 tx_count = priv->plat->tx_queues_to_use; - int ret = -ENOMEM; - u32 queue; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + size_t size; + void *addr; - /* TX queues buffers and DMA */ - for (queue = 0; queue < tx_count; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - size_t size; - void *addr; + tx_q->queue_index = queue; + tx_q->priv_data = priv; - tx_q->queue_index = queue; - tx_q->priv_data = priv; + tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, + sizeof(*tx_q->tx_skbuff_dma), + GFP_KERNEL); + if (!tx_q->tx_skbuff_dma) + return -ENOMEM; - tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, - sizeof(*tx_q->tx_skbuff_dma), - GFP_KERNEL); - if (!tx_q->tx_skbuff_dma) - goto err_dma; + tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!tx_q->tx_skbuff) + return -ENOMEM; - tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, - sizeof(struct sk_buff *), - GFP_KERNEL); - if (!tx_q->tx_skbuff) - goto err_dma; + if (priv->extend_desc) + size = sizeof(struct dma_extended_desc); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + size = sizeof(struct dma_edesc); + else + size = sizeof(struct dma_desc); - if (priv->extend_desc) - size = sizeof(struct dma_extended_desc); - else if (tx_q->tbs & STMMAC_TBS_AVAIL) - size = sizeof(struct dma_edesc); - else - size = sizeof(struct dma_desc); + size *= priv->dma_tx_size; - size *= priv->dma_tx_size; + addr = dma_alloc_coherent(priv->device, size, + &tx_q->dma_tx_phy, GFP_KERNEL); + if (!addr) + return -ENOMEM; - addr = dma_alloc_coherent(priv->device, size, - &tx_q->dma_tx_phy, GFP_KERNEL); - if (!addr) - goto err_dma; + if (priv->extend_desc) + tx_q->dma_etx = addr; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_q->dma_entx = addr; + else + tx_q->dma_tx = addr; - if (priv->extend_desc) - tx_q->dma_etx = addr; - else if (tx_q->tbs & STMMAC_TBS_AVAIL) - tx_q->dma_entx = addr; - else - tx_q->dma_tx = addr; + return 0; +} + +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; + int ret; + + /* TX queues buffers and DMA */ + for (queue = 0; queue < tx_count; queue++) { + ret = __alloc_dma_tx_desc_resources(priv, queue); + if (ret) + goto err_dma; } return 0; @@ -2182,12 +2315,24 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) /* configure all channels */ for (chan = 0; chan < rx_channels_count; chan++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; + u32 buf_size; + qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, qmode); - stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, - chan); + + if (rx_q->xsk_pool) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + chan); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_buf_sz, + chan); + } } for (chan = 0; chan < tx_channels_count; chan++) { @@ -2198,6 +2343,101 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) } } +static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) +{ + struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct xsk_buff_pool *pool = tx_q->xsk_pool; + unsigned int entry = tx_q->cur_tx; + struct dma_desc *tx_desc = NULL; + struct xdp_desc xdp_desc; + bool work_done = true; + + /* Avoids TX time-out as we are sharing with slow path */ + nq->trans_start = jiffies; + + budget = min(budget, stmmac_tx_avail(priv, queue)); + + while (budget-- > 0) { + dma_addr_t dma_addr; + bool set_ic; + + /* We are sharing with slow path and stop XSK TX desc submission when + * available TX ring is less than threshold. + */ + if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || + !netif_carrier_ok(priv->dev)) { + work_done = false; + break; + } + + if (!xsk_tx_peek_desc(pool, &xdp_desc)) + break; + + if (likely(priv->extend_desc)) + tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_desc = &tx_q->dma_entx[entry].basic; + else + tx_desc = tx_q->dma_tx + entry; + + dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; + + /* To return XDP buffer to XSK pool, we simple call + * xsk_tx_completed(), so we don't need to fill up + * 'buf' and 'xdpf'. + */ + tx_q->tx_skbuff_dma[entry].buf = 0; + tx_q->xdpf[entry] = NULL; + + tx_q->tx_skbuff_dma[entry].map_as_page = false; + tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; + tx_q->tx_skbuff_dma[entry].last_segment = true; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + stmmac_set_desc_addr(priv, tx_desc, dma_addr); + + tx_q->tx_count_frames++; + + if (!priv->tx_coal_frames[queue]) + set_ic = false; + else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, tx_desc); + priv->xstats.tx_set_ic_bit++; + } + + stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, + true, priv->mode, true, true, + xdp_desc.len); + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); + entry = tx_q->cur_tx; + } + + if (tx_desc) { + stmmac_flush_tx_descriptors(priv, queue); + xsk_tx_release(pool); + } + + /* Return true if all of the 3 conditions are met + * a) TX Budget is still available + * b) work_done = true when XSK TX desc peek is empty (no more + * pending XSK TX for transmission) + */ + return !!budget && work_done; +} + /** * stmmac_tx_clean - to manage the transmission completion * @priv: driver private structure @@ -2209,14 +2449,18 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; - unsigned int entry, count = 0; + unsigned int entry, xmits = 0, count = 0; __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); priv->xstats.tx_clean++; + tx_q->xsk_frames_done = 0; + entry = tx_q->dirty_tx; - while ((entry != tx_q->cur_tx) && (count < budget)) { + + /* Try to clean all TX complete frame in 1 shot */ + while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) { struct xdp_frame *xdpf; struct sk_buff *skb; struct dma_desc *p; @@ -2301,6 +2545,9 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) tx_q->xdpf[entry] = NULL; } + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) + tx_q->xsk_frames_done++; + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { if (likely(skb)) { pkts_compl++; @@ -2328,6 +2575,28 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); } + if (tx_q->xsk_pool) { + bool work_done; + + if (tx_q->xsk_frames_done) + xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); + + if (xsk_uses_need_wakeup(tx_q->xsk_pool)) + xsk_set_tx_need_wakeup(tx_q->xsk_pool); + + /* For XSK TX, we try to send as many as possible. + * If XSK work done (XSK TX desc empty and budget still + * available), return "budget - 1" to reenable TX IRQ. + * Else, return "budget" to make NAPI continue polling. + */ + work_done = stmmac_xdp_xmit_zc(priv, queue, + STMMAC_XSK_TX_BUDGET_MAX); + if (work_done) + xmits = budget - 1; + else + xmits = budget; + } + if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) { stmmac_enable_eee_mode(priv); @@ -2342,7 +2611,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); - return count; + /* Combine decisions from TX clean and XSK TX */ + return max(count, xmits); } /** @@ -2424,24 +2694,31 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) { int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, &priv->xstats, chan, dir); + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; struct stmmac_channel *ch = &priv->channel[chan]; + struct napi_struct *rx_napi; + struct napi_struct *tx_napi; unsigned long flags; + rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; + tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; + if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { - if (napi_schedule_prep(&ch->rx_napi)) { + if (napi_schedule_prep(rx_napi)) { spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); spin_unlock_irqrestore(&ch->lock, flags); - __napi_schedule(&ch->rx_napi); + __napi_schedule(rx_napi); } } if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { - if (napi_schedule_prep(&ch->tx_napi)) { + if (napi_schedule_prep(tx_napi)) { spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); spin_unlock_irqrestore(&ch->lock, flags); - __napi_schedule(&ch->tx_napi); + __napi_schedule(tx_napi); } } @@ -2598,7 +2875,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) rx_q->dma_rx_phy, chan); rx_q->rx_tail_addr = rx_q->dma_rx_phy + - (priv->dma_rx_size * + (rx_q->buf_alloc_num * sizeof(struct dma_desc)); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, chan); @@ -2639,16 +2916,18 @@ static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); struct stmmac_priv *priv = tx_q->priv_data; struct stmmac_channel *ch; + struct napi_struct *napi; ch = &priv->channel[tx_q->queue_index]; + napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; - if (likely(napi_schedule_prep(&ch->tx_napi))) { + if (likely(napi_schedule_prep(napi))) { unsigned long flags; spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); spin_unlock_irqrestore(&ch->lock, flags); - __napi_schedule(&ch->tx_napi); + __napi_schedule(napi); } return HRTIMER_NORESTART; @@ -4368,20 +4647,13 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, return res; } -static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, - struct xdp_buff *xdp) +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, + struct bpf_prog *prog, + struct xdp_buff *xdp) { - struct bpf_prog *prog; - int res; u32 act; - - rcu_read_lock(); - - prog = READ_ONCE(priv->xdp_prog); - if (!prog) { - res = STMMAC_XDP_PASS; - goto unlock; - } + int res; act = bpf_prog_run_xdp(prog, xdp); switch (act) { @@ -4408,6 +4680,24 @@ static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, break; } + return res; +} + +static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + rcu_read_lock(); + + prog = READ_ONCE(priv->xdp_prog); + if (!prog) { + res = STMMAC_XDP_PASS; + goto unlock; + } + + res = __stmmac_xdp_run_prog(priv, prog, xdp); unlock: rcu_read_unlock(); return ERR_PTR(-res); @@ -4428,6 +4718,302 @@ static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, xdp_do_flush(); } +static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + skb = __napi_alloc_skb(&ch->rxtx_napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + return skb; +} + +static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, + struct dma_desc *p, struct dma_desc *np, + struct xdp_buff *xdp) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned int len = xdp->data_end - xdp->data; + enum pkt_hash_types hash_type; + int coe = priv->hw->rx_csum; + struct sk_buff *skb; + u32 hash; + + skb = stmmac_construct_skb_zc(ch, xdp); + if (!skb) { + priv->dev->stats.rx_dropped++; + return; + } + + stmmac_get_rx_hwtstamp(priv, p, np, skb); + stmmac_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(!coe)) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) + skb_set_hash(skb, hash, hash_type); + + skb_record_rx_queue(skb, queue); + napi_gro_receive(&ch->rxtx_napi, skb); + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += len; +} + +static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + unsigned int entry = rx_q->dirty_rx; + struct dma_desc *rx_desc = NULL; + bool ret = true; + + budget = min(budget, stmmac_rx_dirty(priv, queue)); + + while (budget-- > 0 && entry != rx_q->cur_rx) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; + dma_addr_t dma_addr; + bool use_rx_wd; + + if (!buf->xdp) { + buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); + if (!buf->xdp) { + ret = false; + break; + } + } + + if (priv->extend_desc) + rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); + else + rx_desc = rx_q->dma_rx + entry; + + dma_addr = xsk_buff_xdp_get_dma(buf->xdp); + stmmac_set_desc_addr(priv, rx_desc, dma_addr); + stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); + stmmac_refill_desc3(priv, rx_q, rx_desc); + + rx_q->rx_count_frames++; + rx_q->rx_count_frames += priv->rx_coal_frames[queue]; + if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) + rx_q->rx_count_frames = 0; + + use_rx_wd = !priv->rx_coal_frames[queue]; + use_rx_wd |= rx_q->rx_count_frames > 0; + if (!priv->use_riwt) + use_rx_wd = false; + + dma_wmb(); + stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); + } + + if (rx_desc) { + rx_q->dirty_rx = entry; + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->dirty_rx * sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); + } + + return ret; +} + +static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + unsigned int count = 0, error = 0, len = 0; + int dirty = stmmac_rx_dirty(priv, queue); + unsigned int next_entry = rx_q->cur_rx; + unsigned int desc_size; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + int status = 0; + + if (netif_msg_rx_status(priv)) { + void *rx_head; + + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); + if (priv->extend_desc) { + rx_head = (void *)rx_q->dma_erx; + desc_size = sizeof(struct dma_extended_desc); + } else { + rx_head = (void *)rx_q->dma_rx; + desc_size = sizeof(struct dma_desc); + } + + stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, + rx_q->dma_rx_phy, desc_size); + } + while (count < limit) { + struct stmmac_rx_buffer *buf; + unsigned int buf1_len = 0; + struct dma_desc *np, *p; + int entry; + int res; + + if (!count && rx_q->state_saved) { + error = rx_q->state.error; + len = rx_q->state.len; + } else { + rx_q->state_saved = false; + error = 0; + len = 0; + } + + if (count >= limit) + break; + +read_again: + buf1_len = 0; + entry = next_entry; + buf = &rx_q->buf_pool[entry]; + + if (dirty >= STMMAC_RX_FILL_BATCH) { + failure = failure || + !stmmac_rx_refill_zc(priv, queue, dirty); + dirty = 0; + } + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + entry); + else + p = rx_q->dma_rx + entry; + + /* read the status of the incoming frame */ + status = stmmac_rx_status(priv, &priv->dev->stats, + &priv->xstats, p); + /* check if managed by the DMA otherwise go ahead */ + if (unlikely(status & dma_own)) + break; + + /* Prefetch the next RX descriptor */ + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, + priv->dma_rx_size); + next_entry = rx_q->cur_rx; + + if (priv->extend_desc) + np = (struct dma_desc *)(rx_q->dma_erx + next_entry); + else + np = rx_q->dma_rx + next_entry; + + prefetch(np); + + if (priv->extend_desc) + stmmac_rx_extended_status(priv, &priv->dev->stats, + &priv->xstats, + rx_q->dma_erx + entry); + if (unlikely(status == discard_frame)) { + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + dirty++; + error = 1; + if (!priv->hwts_rx_en) + priv->dev->stats.rx_errors++; + } + + if (unlikely(error && (status & rx_not_ls))) + goto read_again; + if (unlikely(error)) { + count++; + continue; + } + + /* Ensure a valid XSK buffer before proceed */ + if (!buf->xdp) + break; + + /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ + if (likely(status & rx_not_ls)) { + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + dirty++; + count++; + goto read_again; + } + + /* XDP ZC Frame only support primary buffers for now */ + buf1_len = stmmac_rx_buf1_len(priv, p, status, len); + len += buf1_len; + + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 + * Type frames (LLC/LLC-SNAP) + * + * llc_snap is never checked in GMAC >= 4, so this ACS + * feature is always disabled and packets need to be + * stripped manually. + */ + if (likely(!(status & rx_not_ls)) && + (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || + unlikely(status != llc_snap))) { + buf1_len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } + + /* RX buffer is good and fit into a XSK pool buffer */ + buf->xdp->data_end = buf->xdp->data + buf1_len; + xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); + + rcu_read_lock(); + prog = READ_ONCE(priv->xdp_prog); + res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); + rcu_read_unlock(); + + switch (res) { + case STMMAC_XDP_PASS: + stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); + xsk_buff_free(buf->xdp); + break; + case STMMAC_XDP_CONSUMED: + xsk_buff_free(buf->xdp); + priv->dev->stats.rx_dropped++; + break; + case STMMAC_XDP_TX: + case STMMAC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + buf->xdp = NULL; + dirty++; + count++; + } + + if (status & rx_not_ls) { + rx_q->state_saved = true; + rx_q->state.error = error; + rx_q->state.len = len; + } + + stmmac_finalize_xdp_rx(priv, xdp_status); + + if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { + if (failure || stmmac_rx_dirty(priv, queue) > 0) + xsk_set_rx_need_wakeup(rx_q->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx_q->xsk_pool); + + return (int)count; + } + + return failure ? limit : (int)count; +} + /** * stmmac_rx - manage the receive process * @priv: driver private structure @@ -4742,7 +5328,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) priv->xstats.napi_poll++; - work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); + work_done = stmmac_tx_clean(priv, budget, chan); work_done = min(work_done, budget); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -4756,6 +5342,42 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) return work_done; } +static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) +{ + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, rxtx_napi); + struct stmmac_priv *priv = ch->priv_data; + int rx_done, tx_done; + u32 chan = ch->index; + + priv->xstats.napi_poll++; + + tx_done = stmmac_tx_clean(priv, budget, chan); + tx_done = min(tx_done, budget); + + rx_done = stmmac_rx_zc(priv, budget, chan); + + /* If either TX or RX work is not complete, return budget + * and keep pooling + */ + if (tx_done >= budget || rx_done >= budget) + return budget; + + /* all work done, exit the polling mode */ + if (napi_complete_done(napi, rx_done)) { + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + /* Both RX and TX work done are compelte, + * so enable both RX & TX IRQs. + */ + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } + + return min(rx_done, budget - 1); +} + /** * stmmac_tx_timeout * @dev : Pointer to net device structure @@ -4989,6 +5611,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) else netif_carrier_off(priv->dev); } + + stmmac_timestamp_interrupt(priv, priv); } } @@ -5203,7 +5827,7 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) return ret; - stmmac_disable_all_queues(priv); + __stmmac_disable_all_queues(priv); switch (type) { case TC_SETUP_CLSU32: @@ -5624,6 +6248,9 @@ static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) switch (bpf->command) { case XDP_SETUP_PROG: return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, + bpf->xsk.queue_id); default: return -EOPNOTSUPP; } @@ -5671,6 +6298,156 @@ static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, return nxmit; } +void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); + + stmmac_stop_rx_dma(priv, queue); + __free_dma_rx_desc_resources(priv, queue); +} + +void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + u32 buf_size; + int ret; + + ret = __alloc_dma_rx_desc_resources(priv, queue); + if (ret) { + netdev_err(priv->dev, "Failed to alloc RX desc.\n"); + return; + } + + ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL); + if (ret) { + __free_dma_rx_desc_resources(priv, queue); + netdev_err(priv->dev, "Failed to init RX desc.\n"); + return; + } + + stmmac_clear_rx_descriptors(priv, queue); + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, rx_q->queue_index); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, rx_q->queue_index); + + if (rx_q->xsk_pool && rx_q->buf_alloc_num) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + rx_q->queue_index); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_buf_sz, + rx_q->queue_index); + } + + stmmac_start_rx_dma(priv, queue); + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); +} + +void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); + + stmmac_stop_tx_dma(priv, queue); + __free_dma_tx_desc_resources(priv, queue); +} + +void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + int ret; + + ret = __alloc_dma_tx_desc_resources(priv, queue); + if (ret) { + netdev_err(priv->dev, "Failed to alloc TX desc.\n"); + return; + } + + ret = __init_dma_tx_desc_rings(priv, queue); + if (ret) { + __free_dma_tx_desc_resources(priv, queue); + netdev_err(priv->dev, "Failed to init TX desc.\n"); + return; + } + + stmmac_clear_tx_descriptors(priv, queue); + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, tx_q->queue_index); + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, tx_q->queue_index); + + stmmac_start_tx_dma(priv, queue); + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); +} + +int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + struct stmmac_channel *ch; + + if (test_bit(STMMAC_DOWN, &priv->state) || + !netif_carrier_ok(priv->dev)) + return -ENETDOWN; + + if (!stmmac_xdp_is_enabled(priv)) + return -ENXIO; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + rx_q = &priv->rx_queue[queue]; + tx_q = &priv->tx_queue[queue]; + ch = &priv->channel[queue]; + + if (!rx_q->xsk_pool && !tx_q->xsk_pool) + return -ENXIO; + + if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { + /* EQoS does not have per-DMA channel SW interrupt, + * so we schedule RX Napi straight-away. + */ + if (likely(napi_schedule_prep(&ch->rxtx_napi))) + __napi_schedule(&ch->rxtx_napi); + } + + return 0; +} + static const struct net_device_ops stmmac_netdev_ops = { .ndo_open = stmmac_open, .ndo_start_xmit = stmmac_xmit, @@ -5691,6 +6468,7 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, .ndo_bpf = stmmac_bpf, .ndo_xdp_xmit = stmmac_xdp_xmit, + .ndo_xsk_wakeup = stmmac_xsk_wakeup, }; static void stmmac_reset_subtask(struct stmmac_priv *priv) @@ -5849,6 +6627,12 @@ static void stmmac_napi_add(struct net_device *dev) stmmac_napi_poll_tx, NAPI_POLL_WEIGHT); } + if (queue < priv->plat->rx_queues_to_use && + queue < priv->plat->tx_queues_to_use) { + netif_napi_add(dev, &ch->rxtx_napi, + stmmac_napi_poll_rxtx, + NAPI_POLL_WEIGHT); + } } } @@ -5866,6 +6650,10 @@ static void stmmac_napi_del(struct net_device *dev) netif_napi_del(&ch->rx_napi); if (queue < priv->plat->tx_queues_to_use) netif_napi_del(&ch->tx_napi); + if (queue < priv->plat->rx_queues_to_use && + queue < priv->plat->tx_queues_to_use) { + netif_napi_del(&ch->rxtx_napi); + } } } @@ -6016,7 +6804,7 @@ int stmmac_dvr_probe(struct device *device, for (i = 0; i < MTL_MAX_TX_QUEUES; i++) priv->tx_irq[i] = res->tx_irq[i]; - if (!IS_ERR_OR_NULL(res->mac)) + if (!is_zero_ether_addr(res->mac)) memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); dev_set_drvdata(device, priv->dev); @@ -6024,6 +6812,10 @@ int stmmac_dvr_probe(struct device *device, /* Verify driver arguments */ stmmac_verify_args(); + priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); + if (!priv->af_xdp_zc_qps) + return -ENOMEM; + /* Allocate workqueue */ priv->wq = create_singlethread_workqueue("stmmac_wq"); if (!priv->wq) { @@ -6456,7 +7248,7 @@ int stmmac_resume(struct device *dev) mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); - stmmac_reinit_rx_buffers(priv); + stmmac_free_tx_skbufs(priv); stmmac_clear_descriptors(priv); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 5a1e018884e6..1e17a23d9118 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -394,7 +394,7 @@ static int stmmac_of_get_mac_mode(struct device_node *np) * set some private fields that will be used by the main at runtime. */ struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) +stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) { struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; @@ -406,12 +406,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) if (!plat) return ERR_PTR(-ENOMEM); - *mac = of_get_mac_address(np); - if (IS_ERR(*mac)) { - if (PTR_ERR(*mac) == -EPROBE_DEFER) - return ERR_CAST(*mac); + rc = of_get_mac_address(np, mac); + if (rc) { + if (rc == -EPROBE_DEFER) + return ERR_PTR(rc); - *mac = NULL; + eth_zero_addr(mac); } plat->phy_interface = device_get_phy_mode(&pdev->dev); @@ -627,7 +627,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev, } #else struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) +stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) { return ERR_PTR(-EINVAL); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index 3a4663b7b460..3fff3f59d73d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -12,7 +12,7 @@ #include "stmmac.h" struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); +stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); void stmmac_remove_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b164ae22e35f..4e86cdf2bc9f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -135,7 +135,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp, { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; struct stmmac_pps_cfg *cfg; + u32 intr_value, acr_value; int ret = -EOPNOTSUPP; unsigned long flags; @@ -159,6 +162,37 @@ static int stmmac_enable(struct ptp_clock_info *ptp, priv->systime_flags); spin_unlock_irqrestore(&priv->ptp_lock, flags); break; + case PTP_CLK_REQ_EXTTS: + priv->plat->ext_snapshot_en = on; + mutex_lock(&priv->aux_ts_lock); + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + if (on) { + /* Enable External snapshot trigger */ + acr_value |= priv->plat->ext_snapshot_num; + acr_value |= PTP_ACR_ATSFC; + netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + /* Enable Timestamp Interrupt */ + intr_value = readl(ioaddr + GMAC_INT_EN); + intr_value |= GMAC_INT_TSIE; + writel(intr_value, ioaddr + GMAC_INT_EN); + + } else { + netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + /* Disable Timestamp Interrupt */ + intr_value = readl(ioaddr + GMAC_INT_EN); + intr_value &= ~GMAC_INT_TSIE; + writel(intr_value, ioaddr + GMAC_INT_EN); + } + writel(acr_value, ptpaddr + PTP_ACR); + mutex_unlock(&priv->aux_ts_lock); + ret = 0; + break; + default: break; } @@ -202,7 +236,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { .name = "stmmac ptp", .max_adj = 62500000, .n_alarm = 0, - .n_ext_ts = 0, + .n_ext_ts = 0, /* will be overwritten in stmmac_ptp_register */ .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */ .n_pins = 0, .pps = 0, @@ -237,8 +271,10 @@ void stmmac_ptp_register(struct stmmac_priv *priv) stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; + stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; spin_lock_init(&priv->ptp_lock); + mutex_init(&priv->aux_ts_lock); priv->ptp_clock_ops = stmmac_ptp_clock_ops; priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, @@ -264,4 +300,6 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv) pr_debug("Removed PTP HW clock successfully on %s\n", priv->dev->name); } + + mutex_destroy(&priv->aux_ts_lock); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index f88727ce4d30..53172a439810 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -73,6 +73,7 @@ #define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */ #define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */ #define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */ +#define PTP_ACR_ATSEN_SHIFT 5 /* Auxiliary Snapshot shift */ #define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */ #define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */ #define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c index bf38d231860b..105821b53020 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c @@ -1,9 +1,104 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021, Intel Corporation. */ +#include <net/xdp_sock_drv.h> + #include "stmmac.h" #include "stmmac_xdp.h" +static int stmmac_xdp_enable_pool(struct stmmac_priv *priv, + struct xsk_buff_pool *pool, u16 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + bool need_update; + u32 frame_size; + int err; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + /* XDP ZC does not span multiple frame, make sure XSK pool buffer + * size can at least store Q-in-Q frame. + */ + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) + return -EOPNOTSUPP; + + err = xsk_pool_dma_map(pool, priv->device, STMMAC_RX_DMA_ATTR); + if (err) { + netdev_err(priv->dev, "Failed to map xsk pool\n"); + return err; + } + + need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); + + if (need_update) { + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); + napi_disable(&ch->rx_napi); + napi_disable(&ch->tx_napi); + } + + set_bit(queue, priv->af_xdp_zc_qps); + + if (need_update) { + napi_enable(&ch->rxtx_napi); + stmmac_enable_rx_queue(priv, queue); + stmmac_enable_tx_queue(priv, queue); + + err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX); + if (err) + return err; + } + + return 0; +} + +static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + struct xsk_buff_pool *pool; + bool need_update; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + pool = xsk_get_pool_from_qid(priv->dev, queue); + if (!pool) + return -EINVAL; + + need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); + + if (need_update) { + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); + synchronize_rcu(); + napi_disable(&ch->rxtx_napi); + } + + xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR); + + clear_bit(queue, priv->af_xdp_zc_qps); + + if (need_update) { + napi_enable(&ch->rx_napi); + napi_enable(&ch->tx_napi); + stmmac_enable_rx_queue(priv, queue); + stmmac_enable_tx_queue(priv, queue); + } + + return 0; +} + +int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, + u16 queue) +{ + return pool ? stmmac_xdp_enable_pool(priv, pool, queue) : + stmmac_xdp_disable_pool(priv, queue); +} + int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, struct netlink_ext_ack *extack) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h index 93948569d92a..896dc987d4ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h @@ -5,7 +5,10 @@ #define _STMMAC_XDP_H_ #define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM) +#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, + u16 queue); int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, struct netlink_ext_ack *extack); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 638d7b03be4b..6a67b026df0b 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1824,7 +1824,6 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) for_each_child_of_node(node, port_np) { struct am65_cpsw_port *port; - const void *mac_addr; u32 port_id; /* it is not a slave port node, continue */ @@ -1903,15 +1902,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) return ret; } - mac_addr = of_get_mac_address(port_np); - if (!IS_ERR(mac_addr)) { - ether_addr_copy(port->slave.mac_addr, mac_addr); - } else if (am65_cpsw_am654_get_efuse_macid(port_np, - port->port_id, - port->slave.mac_addr) || - !is_valid_ether_addr(port->slave.mac_addr)) { - random_ether_addr(port->slave.mac_addr); - dev_err(dev, "Use random MAC address\n"); + ret = of_get_mac_address(port_np, port->slave.mac_addr); + if (ret) { + am65_cpsw_am654_get_efuse_macid(port_np, + port->port_id, + port->slave.mac_addr); + if (!is_valid_ether_addr(port->slave.mac_addr)) { + random_ether_addr(port->slave.mac_addr); + dev_err(dev, "Use random MAC address\n"); + } } } of_node_put(node); diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c index d93ffd8a08b0..23cfb91e9c4d 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c +++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c @@ -385,7 +385,7 @@ static void am65_cpsw_switchdev_event_work(struct work_struct *work) fdb->addr, fdb->vid, fdb->added_by_user, fdb->offloaded, port_id); - if (!fdb->added_by_user) + if (!fdb->added_by_user || fdb->is_local) break; if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) port_id = HOST_PORT_NUM; @@ -401,7 +401,7 @@ static void am65_cpsw_switchdev_event_work(struct work_struct *work) fdb->addr, fdb->vid, fdb->added_by_user, fdb->offloaded, port_id); - if (!fdb->added_by_user) + if (!fdb->added_by_user || fdb->is_local) break; if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) port_id = HOST_PORT_NUM; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 074702af3dc6..c0cd7de88316 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1296,7 +1296,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, for_each_available_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = data->slave_data + i; - const void *mac_addr = NULL; int lenp; const __be32 *parp; @@ -1368,10 +1367,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } no_phy_slave: - mac_addr = of_get_mac_address(slave_node); - if (!IS_ERR(mac_addr)) { - ether_addr_copy(slave_data->mac_addr, mac_addr); - } else { + ret = of_get_mac_address(slave_node, slave_data->mac_addr); + if (ret) { ret = ti_cm_get_macid(&pdev->dev, i, slave_data->mac_addr); if (ret) diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 0751f77de2c7..69b7a4e0220a 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1257,7 +1257,6 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw) for_each_child_of_node(tmp_node, port_np) { struct cpsw_slave_data *slave_data; - const void *mac_addr; u32 port_id; ret = of_property_read_u32(port_np, "reg", &port_id); @@ -1316,10 +1315,8 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw) goto err_node_put; } - mac_addr = of_get_mac_address(port_np); - if (!IS_ERR(mac_addr)) { - ether_addr_copy(slave_data->mac_addr, mac_addr); - } else { + ret = of_get_mac_address(port_np, slave_data->mac_addr); + if (ret) { ret = ti_cm_get_macid(dev, port_id - 1, slave_data->mac_addr); if (ret) diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c index a72bb570756f..05a64fb7a04f 100644 --- a/drivers/net/ethernet/ti/cpsw_switchdev.c +++ b/drivers/net/ethernet/ti/cpsw_switchdev.c @@ -395,7 +395,7 @@ static void cpsw_switchdev_event_work(struct work_struct *work) fdb->addr, fdb->vid, fdb->added_by_user, fdb->offloaded, port); - if (!fdb->added_by_user) + if (!fdb->added_by_user || fdb->is_local) break; if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) port = HOST_PORT_NUM; @@ -411,7 +411,7 @@ static void cpsw_switchdev_event_work(struct work_struct *work) fdb->addr, fdb->vid, fdb->added_by_user, fdb->offloaded, port); - if (!fdb->added_by_user) + if (!fdb->added_by_user || fdb->is_local) break; if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) port = HOST_PORT_NUM; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index c7031e1960d4..14e7da7d302f 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1687,7 +1687,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) const struct of_device_id *match; const struct emac_platform_data *auxdata; struct emac_platform_data *pdata = NULL; - const u8 *mac_addr; if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) return dev_get_platdata(&pdev->dev); @@ -1699,11 +1698,8 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) np = pdev->dev.of_node; pdata->version = EMAC_VERSION_2; - if (!is_valid_ether_addr(pdata->mac_addr)) { - mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) - ether_addr_copy(pdata->mac_addr, mac_addr); - } + if (!is_valid_ether_addr(pdata->mac_addr)) + of_get_mac_address(np, pdata->mac_addr); of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", &pdata->ctrl_reg_offset); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index d7a144b4a09f..9030e619e543 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1966,7 +1966,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device, struct resource res; void __iomem *efuse = NULL; u32 efuse_mac = 0; - const void *mac_addr; u8 efuse_mac_addr[6]; u32 temp[2]; int ret = 0; @@ -2036,10 +2035,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device, devm_iounmap(dev, efuse); devm_release_mem_region(dev, res.start, size); } else { - mac_addr = of_get_mac_address(node_interface); - if (!IS_ERR(mac_addr)) - ether_addr_copy(ndev->dev_addr, mac_addr); - else + ret = of_get_mac_address(node_interface, ndev->dev_addr); + if (ret) eth_random_addr(ndev->dev_addr); } diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c index 2b4126d2427d..2b84848dc26a 100644 --- a/drivers/net/ethernet/wiznet/w5100-spi.c +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -423,8 +423,14 @@ static int w5100_spi_probe(struct spi_device *spi) const struct of_device_id *of_id; const struct w5100_ops *ops; kernel_ulong_t driver_data; + const void *mac = NULL; + u8 tmpmac[ETH_ALEN]; int priv_size; - const void *mac = of_get_mac_address(spi->dev.of_node); + int ret; + + ret = of_get_mac_address(spi->dev.of_node, tmpmac); + if (!ret) + mac = tmpmac; if (spi->dev.of_node) { of_id = of_match_device(w5100_of_match, &spi->dev); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index c0d181a7f83a..ec5db481c9cd 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1157,7 +1157,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, INIT_WORK(&priv->setrx_work, w5100_setrx_work); INIT_WORK(&priv->restart_work, w5100_restart_work); - if (!IS_ERR_OR_NULL(mac_addr)) + if (mac_addr) memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); else eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index c6eb7f2368aa..911b5ef9e680 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -18,12 +18,14 @@ if NET_VENDOR_XILINX config XILINX_EMACLITE tristate "Xilinx 10/100 Ethernet Lite support" + depends on HAS_IOMEM select PHYLIB help This driver supports the 10/100 Ethernet Lite from Xilinx. config XILINX_AXI_EMAC tristate "Xilinx 10/100/1000 AXI Ethernet support" + depends on HAS_IOMEM select PHYLINK help This driver supports the 10/100/1000 Ethernet from Xilinx for the @@ -31,6 +33,7 @@ config XILINX_AXI_EMAC config XILINX_LL_TEMAC tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" + depends on HAS_IOMEM select PHYLIB help This driver supports the Xilinx 10/100/1000 LocalLink TEMAC diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 030185301014..a1f5f07f4ca9 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -438,7 +438,7 @@ static void temac_do_set_mac_address(struct net_device *ndev) static int temac_init_mac_address(struct net_device *ndev, const void *address) { - ether_addr_copy(ndev->dev_addr, address); + memcpy(ndev->dev_addr, address, ETH_ALEN); if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); temac_do_set_mac_address(ndev); @@ -1351,7 +1351,7 @@ static int temac_probe(struct platform_device *pdev) struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np; struct temac_local *lp; struct net_device *ndev; - const void *addr; + u8 addr[ETH_ALEN]; __be32 *p; bool little_endian; int rc = 0; @@ -1542,8 +1542,8 @@ static int temac_probe(struct platform_device *pdev) if (temac_np) { /* Retrieve the MAC address */ - addr = of_get_mac_address(temac_np); - if (IS_ERR(addr)) { + rc = of_get_mac_address(temac_np, addr); + if (rc) { dev_err(&pdev->dev, "could not find MAC address\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index feb1aa4ec927..b508c9453f40 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1835,8 +1835,8 @@ static int axienet_probe(struct platform_device *pdev) struct device_node *np; struct axienet_local *lp; struct net_device *ndev; - const void *mac_addr; struct resource *ethres; + u8 mac_addr[ETH_ALEN]; int addr_width = 32; u32 value; @@ -2062,13 +2062,14 @@ static int axienet_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); /* Retrieve the MAC address */ - mac_addr = of_get_mac_address(pdev->dev.of_node); - if (IS_ERR(mac_addr)) { - dev_warn(&pdev->dev, "could not find MAC address property: %ld\n", - PTR_ERR(mac_addr)); - mac_addr = NULL; + ret = of_get_mac_address(pdev->dev.of_node, mac_addr); + if (!ret) { + axienet_set_mac_address(ndev, mac_addr); + } else { + dev_warn(&pdev->dev, "could not find MAC address property: %d\n", + ret); + axienet_set_mac_address(ndev, NULL); } - axienet_set_mac_address(ndev, mac_addr); lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 007840d4a807..d9d58a7dabee 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1115,7 +1115,6 @@ static int xemaclite_of_probe(struct platform_device *ofdev) struct net_device *ndev = NULL; struct net_local *lp = NULL; struct device *dev = &ofdev->dev; - const void *mac_address; int rc = 0; @@ -1157,12 +1156,9 @@ static int xemaclite_of_probe(struct platform_device *ofdev) lp->next_rx_buf_to_use = 0x0; lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); - mac_address = of_get_mac_address(ofdev->dev.of_node); - if (!IS_ERR(mac_address)) { - /* Set the MAC address. */ - ether_addr_copy(ndev->dev_addr, mac_address); - } else { + rc = of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr); + if (rc) { dev_warn(dev, "No MAC address found, using random\n"); eth_hw_addr_random(ndev); } diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index e3b2375ac5eb..072de880b99f 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -892,6 +892,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; + if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) + return -EINVAL; + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, geneve->cfg.info.key.tp_dst, sport); @@ -986,6 +989,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; + if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) + return -EINVAL; + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, geneve->cfg.info.key.tp_dst, sport); diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile index 8c0ac8790354..1efe1a88104b 100644 --- a/drivers/net/ipa/Makefile +++ b/drivers/net/ipa/Makefile @@ -10,4 +10,5 @@ ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \ ipa_resource.o ipa_qmi.o ipa_qmi_msg.o ipa-y += ipa_data-v3.5.1.o ipa_data-v4.2.o \ - ipa_data-v4.5.o ipa_data-v4.11.o + ipa_data-v4.5.o ipa_data-v4.9.o \ + ipa_data-v4.11.o diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c new file mode 100644 index 000000000000..e41be790f45e --- /dev/null +++ b/drivers/net/ipa/ipa_data-v4.9.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2021 Linaro Ltd. */ + +#include <linux/log2.h> + +#include "gsi.h" +#include "ipa_data.h" +#include "ipa_endpoint.h" +#include "ipa_mem.h" + +/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */ +enum ipa_resource_type { + /* Source resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS, + IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_RESOURCE_TYPE_SRC_HPS_DMARS, + IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES, + + /* Destination resource types; first must have value 0 */ + IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0, + IPA_RESOURCE_TYPE_DST_DPS_DMARS, +}; + +/* Resource groups used for an SoC having IPA v4.9 */ +enum ipa_rsrc_group_id { + /* Source resource group identifiers */ + IPA_RSRC_GROUP_SRC_UL_DL = 0, + IPA_RSRC_GROUP_SRC_DMA, + IPA_RSRC_GROUP_SRC_UC_RX_Q, + IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ + + /* Destination resource group identifiers */ + IPA_RSRC_GROUP_DST_UL_DL_DPL = 0, + IPA_RSRC_GROUP_DST_DMA, + IPA_RSRC_GROUP_DST_UC, + IPA_RSRC_GROUP_DST_DRB_IP, + IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ +}; + +/* QSB configuration data for an SoC having IPA v4.9 */ +static const struct ipa_qsb_data ipa_qsb_data[] = { + [IPA_QSB_MASTER_DDR] = { + .max_writes = 8, + .max_reads = 0, /* no limit (hardware max) */ + .max_reads_beats = 120, + }, +}; + +/* Endpoint configuration data for an SoC having IPA v4.9 */ +static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { + [IPA_ENDPOINT_AP_COMMAND_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 6, + .endpoint_id = 7, + .toward_ipa = true, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 20, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .dma_mode = true, + .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, + .tx = { + .seq_type = IPA_SEQ_DMA, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_LAN_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 7, + .endpoint_id = 11, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .aggregation = true, + .status_enable = true, + .rx = { + .pad_align = ilog2(sizeof(u32)), + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_TX] = { + .ee_id = GSI_EE_AP, + .channel_id = 2, + .endpoint_id = 2, + .toward_ipa = true, + .channel = { + .tre_count = 512, + .event_count = 512, + .tlv_count = 16, + }, + .endpoint = { + .filter_support = true, + .config = { + .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .qmap = true, + .status_enable = true, + .tx = { + .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC, + .status_endpoint = + IPA_ENDPOINT_MODEM_AP_RX, + }, + }, + }, + }, + [IPA_ENDPOINT_AP_MODEM_RX] = { + .ee_id = GSI_EE_AP, + .channel_id = 12, + .endpoint_id = 20, + .toward_ipa = false, + .channel = { + .tre_count = 256, + .event_count = 256, + .tlv_count = 9, + }, + .endpoint = { + .config = { + .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .qmap = true, + .aggregation = true, + .rx = { + .aggr_close_eof = true, + }, + }, + }, + }, + [IPA_ENDPOINT_MODEM_AP_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 0, + .endpoint_id = 5, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, + [IPA_ENDPOINT_MODEM_AP_RX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 7, + .endpoint_id = 16, + .toward_ipa = false, + }, + [IPA_ENDPOINT_MODEM_DL_NLO_TX] = { + .ee_id = GSI_EE_MODEM, + .channel_id = 2, + .endpoint_id = 8, + .toward_ipa = true, + .endpoint = { + .filter_support = true, + }, + }, +}; + +/* Source resource configuration data for an SoC having IPA v4.9 */ +static const struct ipa_resource ipa_resource_src[] = { + [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 1, .max = 12, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 1, .max = 12, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 20, .max = 20, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 2, .max = 2, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 3, .max = 3, + }, + }, + [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 38, .max = 38, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 4, .max = 4, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, + [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 0, .max = 4, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 0, .max = 4, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 0, .max = 4, + }, + }, + [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = { + .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { + .min = 30, .max = 30, + }, + .limits[IPA_RSRC_GROUP_SRC_DMA] = { + .min = 8, .max = 8, + }, + .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { + .min = 8, .max = 8, + }, + }, +}; + +/* Destination resource configuration data for an SoC having IPA v4.9 */ +static const struct ipa_resource ipa_resource_dst[] = { + [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 9, .max = 9, + }, + .limits[IPA_RSRC_GROUP_DST_DMA] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_DST_UC] = { + .min = 1, .max = 1, + }, + .limits[IPA_RSRC_GROUP_DST_DRB_IP] = { + .min = 39, .max = 39, + }, + }, + [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .min = 2, .max = 3, + }, + .limits[IPA_RSRC_GROUP_DST_DMA] = { + .min = 1, .max = 2, + }, + .limits[IPA_RSRC_GROUP_DST_UC] = { + .min = 0, .max = 2, + }, + }, +}; + +/* Resource configuration data for an SoC having IPA v4.9 */ +static const struct ipa_resource_data ipa_resource_data = { + .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT, + .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT, + .resource_src_count = ARRAY_SIZE(ipa_resource_src), + .resource_src = ipa_resource_src, + .resource_dst_count = ARRAY_SIZE(ipa_resource_dst), + .resource_dst = ipa_resource_dst, +}; + +/* IPA-resident memory region data for an SoC having IPA v4.9 */ +static const struct ipa_mem ipa_mem_local_data[] = { + [IPA_MEM_UC_SHARED] = { + .offset = 0x0000, + .size = 0x0080, + .canary_count = 0, + }, + [IPA_MEM_UC_INFO] = { + .offset = 0x0080, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_V4_FILTER_HASHED] = { .offset = 0x0288, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_FILTER] = { + .offset = 0x0308, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_FILTER_HASHED] = { + .offset = 0x0388, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_FILTER] = { + .offset = 0x0408, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_ROUTE_HASHED] = { + .offset = 0x0488, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V4_ROUTE] = { + .offset = 0x0508, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_ROUTE_HASHED] = { + .offset = 0x0588, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_V6_ROUTE] = { + .offset = 0x0608, + .size = 0x0078, + .canary_count = 2, + }, + [IPA_MEM_MODEM_HEADER] = { + .offset = 0x0688, + .size = 0x0240, + .canary_count = 2, + }, + [IPA_MEM_AP_HEADER] = { + .offset = 0x08c8, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_MODEM_PROC_CTX] = { + .offset = 0x0ad0, + .size = 0x0b20, + .canary_count = 2, + }, + [IPA_MEM_AP_PROC_CTX] = { + .offset = 0x15f0, + .size = 0x0200, + .canary_count = 0, + }, + [IPA_MEM_NAT_TABLE] = { + .offset = 0x1800, + .size = 0x0d00, + .canary_count = 4, + }, + [IPA_MEM_STATS_QUOTA_MODEM] = { + .offset = 0x2510, + .size = 0x0030, + .canary_count = 4, + }, + [IPA_MEM_STATS_QUOTA_AP] = { + .offset = 0x2540, + .size = 0x0048, + .canary_count = 0, + }, + [IPA_MEM_STATS_TETHERING] = { + .offset = 0x2588, + .size = 0x0238, + .canary_count = 0, + }, + [IPA_MEM_STATS_FILTER_ROUTE] = { + .offset = 0x27c0, + .size = 0x0800, + .canary_count = 0, + }, + [IPA_MEM_STATS_DROP] = { + .offset = 0x2fc0, + .size = 0x0020, + .canary_count = 0, + }, + [IPA_MEM_MODEM] = { + .offset = 0x2fe8, + .size = 0x0800, + .canary_count = 2, + }, + [IPA_MEM_UC_EVENT_RING] = { + .offset = 0x3800, + .size = 0x1000, + .canary_count = 1, + }, + [IPA_MEM_PDN_CONFIG] = { + .offset = 0x4800, + .size = 0x0050, + .canary_count = 0, + }, +}; + +/* Memory configuration data for an SoC having IPA v4.9 */ +static const struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x146bd000, + .imem_size = 0x00002000, + .smem_id = 497, + .smem_size = 0x00009000, +}; + +/* Interconnect rates are in 1000 byte/second units */ +static const struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "ipa_to_llcc", + .peak_bandwidth = 600000, /* 600 MBps */ + .average_bandwidth = 150000, /* 150 MBps */ + }, + { + .name = "llcc_to_ebi1", + .peak_bandwidth = 1804000, /* 1.804 GBps */ + .average_bandwidth = 150000, /* 150 MBps */ + }, + /* Average rate is unused for the next interconnect */ + { + .name = "appss_to_ipa", + .peak_bandwidth = 74000, /* 74 MBps */ + .average_bandwidth = 0, /* unused */ + }, + +}; + +/* Clock and interconnect configuration data for an SoC having IPA v4.9 */ +static const struct ipa_clock_data ipa_clock_data = { + .core_clock_rate = 60 * 1000 * 1000, /* Hz */ + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, +}; + +/* Configuration data for an SoC having IPA v4.9. */ +const struct ipa_data ipa_data_v4_9 = { + .version = IPA_VERSION_4_9, + .qsb_count = ARRAY_SIZE(ipa_qsb_data), + .qsb_data = ipa_qsb_data, + .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), + .endpoint_data = ipa_gsi_endpoint_data, + .resource_data = &ipa_resource_data, + .mem_data = &ipa_mem_data, + .clock_data = &ipa_clock_data, +}; diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index e3212ea9e3bc..5c4c8d72d7d8 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -303,6 +303,7 @@ struct ipa_data { extern const struct ipa_data ipa_data_v3_5_1; extern const struct ipa_data ipa_data_v4_2; extern const struct ipa_data ipa_data_v4_5; +extern const struct ipa_data ipa_data_v4_9; extern const struct ipa_data ipa_data_v4_11; #endif /* _IPA_DATA_H_ */ diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 0d168afcdf04..9915603ed10b 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -67,7 +67,7 @@ */ /* The name of the GSI firmware file relative to /lib/firmware */ -#define IPA_FWS_PATH "ipa_fws.mdt" +#define IPA_FW_PATH_DEFAULT "ipa_fws.mdt" #define IPA_PAS_ID 15 /* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */ @@ -517,6 +517,7 @@ static int ipa_firmware_load(struct device *dev) struct device_node *node; struct resource res; phys_addr_t phys; + const char *path; ssize_t size; void *virt; int ret; @@ -534,9 +535,17 @@ static int ipa_firmware_load(struct device *dev) return ret; } - ret = request_firmware(&fw, IPA_FWS_PATH, dev); + /* Use name from DTB if specified; use default for *any* error */ + ret = of_property_read_string(dev->of_node, "firmware-name", &path); if (ret) { - dev_err(dev, "error %d requesting \"%s\"\n", ret, IPA_FWS_PATH); + dev_dbg(dev, "error %d getting \"firmware-name\" resource\n", + ret); + path = IPA_FW_PATH_DEFAULT; + } + + ret = request_firmware(&fw, path, dev); + if (ret) { + dev_err(dev, "error %d requesting \"%s\"\n", ret, path); return ret; } @@ -549,13 +558,11 @@ static int ipa_firmware_load(struct device *dev) goto out_release_firmware; } - ret = qcom_mdt_load(dev, fw, IPA_FWS_PATH, IPA_PAS_ID, - virt, phys, size, NULL); + ret = qcom_mdt_load(dev, fw, path, IPA_PAS_ID, virt, phys, size, NULL); if (ret) - dev_err(dev, "error %d loading \"%s\"\n", ret, IPA_FWS_PATH); + dev_err(dev, "error %d loading \"%s\"\n", ret, path); else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID))) - dev_err(dev, "error %d authenticating \"%s\"\n", ret, - IPA_FWS_PATH); + dev_err(dev, "error %d authenticating \"%s\"\n", ret, path); memunmap(virt); out_release_firmware: @@ -578,6 +585,10 @@ static const struct of_device_id ipa_match[] = { .data = &ipa_data_v4_5, }, { + .compatible = "qcom,sm8350-ipa", + .data = &ipa_data_v4_9, + }, + { .compatible = "qcom,sc7280-ipa", .data = &ipa_data_v4_11, }, diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index d7799beb811c..e0f56850edc5 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -144,6 +144,9 @@ #define ATH8035_PHY_ID 0x004dd072 #define AT8030_PHY_ID_MASK 0xffffffef +#define AT803X_PAGE_FIBER 0 +#define AT803X_PAGE_COPPER 1 + MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver"); MODULE_AUTHOR("Matus Ujhelyi"); MODULE_LICENSE("GPL"); @@ -198,6 +201,35 @@ static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg, return phy_write(phydev, AT803X_DEBUG_DATA, val); } +static int at803x_write_page(struct phy_device *phydev, int page) +{ + int mask; + int set; + + if (page == AT803X_PAGE_COPPER) { + set = AT803X_BT_BX_REG_SEL; + mask = 0; + } else { + set = 0; + mask = AT803X_BT_BX_REG_SEL; + } + + return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set); +} + +static int at803x_read_page(struct phy_device *phydev) +{ + int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG); + + if (ccr < 0) + return ccr; + + if (ccr & AT803X_BT_BX_REG_SEL) + return AT803X_PAGE_COPPER; + + return AT803X_PAGE_FIBER; +} + static int at803x_enable_rx_delay(struct phy_device *phydev) { return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0, @@ -535,6 +567,7 @@ static int at803x_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; struct at803x_priv *priv; + int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -542,7 +575,20 @@ static int at803x_probe(struct phy_device *phydev) phydev->priv = priv; - return at803x_parse_dt(phydev); + ret = at803x_parse_dt(phydev); + if (ret) + return ret; + + /* Some bootloaders leave the fiber page selected. + * Switch to the copper page, as otherwise we read + * the PHY capabilities from the fiber side. + */ + if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) { + ret = phy_select_page(phydev, AT803X_PAGE_COPPER); + ret = phy_restore_page(phydev, AT803X_PAGE_COPPER, ret); + } + + return ret; } static void at803x_remove(struct phy_device *phydev) @@ -1166,6 +1212,8 @@ static struct phy_driver at803x_driver[] = { .get_wol = at803x_get_wol, .suspend = at803x_suspend, .resume = at803x_resume, + .read_page = at803x_read_page, + .write_page = at803x_write_page, /* PHY_GBIT_FEATURES */ .read_status = at803x_read_status, .config_intr = &at803x_config_intr, diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c index eca8c2f20684..9b9ac3ef735d 100644 --- a/drivers/net/phy/marvell-88x2222.c +++ b/drivers/net/phy/marvell-88x2222.c @@ -32,6 +32,10 @@ #define MV_HOST_RST_SW BIT(7) #define MV_PORT_RST_SW (MV_LINE_RST_SW | MV_HOST_RST_SW) +/* PMD Receive Signal Detect */ +#define MV_RX_SIGNAL_DETECT 0x000A +#define MV_RX_SIGNAL_DETECT_GLOBAL BIT(0) + /* 1000Base-X/SGMII Control Register */ #define MV_1GBX_CTRL (0x2000 + MII_BMCR) @@ -48,9 +52,12 @@ #define MV_1GBX_PHY_STAT_SPEED100 BIT(14) #define MV_1GBX_PHY_STAT_SPEED1000 BIT(15) +#define AUTONEG_TIMEOUT 3 + struct mv2222_data { phy_interface_t line_interface; __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + bool sfp_link; }; /* SFI PMA transmit enable */ @@ -81,86 +88,6 @@ static int mv2222_soft_reset(struct phy_device *phydev) 5000, 1000000, true); } -/* Returns negative on error, 0 if link is down, 1 if link is up */ -static int mv2222_read_status_10g(struct phy_device *phydev) -{ - int val, link = 0; - - val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); - if (val < 0) - return val; - - if (val & MDIO_STAT1_LSTATUS) { - link = 1; - - /* 10GBASE-R do not support auto-negotiation */ - phydev->autoneg = AUTONEG_DISABLE; - phydev->speed = SPEED_10000; - phydev->duplex = DUPLEX_FULL; - } - - return link; -} - -/* Returns negative on error, 0 if link is down, 1 if link is up */ -static int mv2222_read_status_1g(struct phy_device *phydev) -{ - int val, link = 0; - - val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT); - if (val < 0) - return val; - - if (!(val & BMSR_LSTATUS) || - (phydev->autoneg == AUTONEG_ENABLE && - !(val & BMSR_ANEGCOMPLETE))) - return 0; - - link = 1; - - val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_PHY_STAT); - if (val < 0) - return val; - - if (val & MV_1GBX_PHY_STAT_AN_RESOLVED) { - if (val & MV_1GBX_PHY_STAT_DUPLEX) - phydev->duplex = DUPLEX_FULL; - else - phydev->duplex = DUPLEX_HALF; - - if (val & MV_1GBX_PHY_STAT_SPEED1000) - phydev->speed = SPEED_1000; - else if (val & MV_1GBX_PHY_STAT_SPEED100) - phydev->speed = SPEED_100; - else - phydev->speed = SPEED_10; - } - - return link; -} - -static int mv2222_read_status(struct phy_device *phydev) -{ - struct mv2222_data *priv = phydev->priv; - int link; - - phydev->link = 0; - phydev->speed = SPEED_UNKNOWN; - phydev->duplex = DUPLEX_UNKNOWN; - - if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) - link = mv2222_read_status_10g(phydev); - else - link = mv2222_read_status_1g(phydev); - - if (link < 0) - return link; - - phydev->link = link; - - return 0; -} - static int mv2222_disable_aneg(struct phy_device *phydev) { int ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_CTRL, @@ -248,6 +175,24 @@ static bool mv2222_is_1gbx_capable(struct phy_device *phydev) priv->supported); } +static bool mv2222_is_sgmii_capable(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + + return (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + priv->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + priv->supported)); +} + static int mv2222_config_line(struct phy_device *phydev) { struct mv2222_data *priv = phydev->priv; @@ -267,7 +212,8 @@ static int mv2222_config_line(struct phy_device *phydev) } } -static int mv2222_setup_forced(struct phy_device *phydev) +/* Switch between 1G (1000Base-X/SGMII) and 10G (10GBase-R) modes */ +static int mv2222_swap_line_type(struct phy_device *phydev) { struct mv2222_data *priv = phydev->priv; bool changed = false; @@ -275,25 +221,23 @@ static int mv2222_setup_forced(struct phy_device *phydev) switch (priv->line_interface) { case PHY_INTERFACE_MODE_10GBASER: - if (phydev->speed == SPEED_1000 && - mv2222_is_1gbx_capable(phydev)) { + if (mv2222_is_1gbx_capable(phydev)) { priv->line_interface = PHY_INTERFACE_MODE_1000BASEX; changed = true; } - break; - case PHY_INTERFACE_MODE_1000BASEX: - if (phydev->speed == SPEED_10000 && - mv2222_is_10g_capable(phydev)) { - priv->line_interface = PHY_INTERFACE_MODE_10GBASER; + if (mv2222_is_sgmii_capable(phydev)) { + priv->line_interface = PHY_INTERFACE_MODE_SGMII; changed = true; } break; + case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_SGMII: - ret = mv2222_set_sgmii_speed(phydev); - if (ret < 0) - return ret; + if (mv2222_is_10g_capable(phydev)) { + priv->line_interface = PHY_INTERFACE_MODE_10GBASER; + changed = true; + } break; default: @@ -306,6 +250,29 @@ static int mv2222_setup_forced(struct phy_device *phydev) return ret; } + return 0; +} + +static int mv2222_setup_forced(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int ret; + + if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) { + if (phydev->speed < SPEED_10000 && + phydev->speed != SPEED_UNKNOWN) { + ret = mv2222_swap_line_type(phydev); + if (ret < 0) + return ret; + } + } + + if (priv->line_interface == PHY_INTERFACE_MODE_SGMII) { + ret = mv2222_set_sgmii_speed(phydev); + if (ret < 0) + return ret; + } + return mv2222_disable_aneg(phydev); } @@ -319,17 +286,9 @@ static int mv2222_config_aneg(struct phy_device *phydev) return 0; if (phydev->autoneg == AUTONEG_DISABLE || - phydev->speed == SPEED_10000) + priv->line_interface == PHY_INTERFACE_MODE_10GBASER) return mv2222_setup_forced(phydev); - if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER && - mv2222_is_1gbx_capable(phydev)) { - priv->line_interface = PHY_INTERFACE_MODE_1000BASEX; - ret = mv2222_config_line(phydev); - if (ret < 0) - return ret; - } - adv = linkmode_adv_to_mii_adv_x(priv->supported, ETHTOOL_LINK_MODE_1000baseX_Full_BIT); @@ -363,6 +322,135 @@ static int mv2222_aneg_done(struct phy_device *phydev) return (ret & BMSR_ANEGCOMPLETE); } +/* Returns negative on error, 0 if link is down, 1 if link is up */ +static int mv2222_read_status_10g(struct phy_device *phydev) +{ + static int timeout; + int val, link = 0; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); + if (val < 0) + return val; + + if (val & MDIO_STAT1_LSTATUS) { + link = 1; + + /* 10GBASE-R do not support auto-negotiation */ + phydev->autoneg = AUTONEG_DISABLE; + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + } else { + if (phydev->autoneg == AUTONEG_ENABLE) { + timeout++; + + if (timeout > AUTONEG_TIMEOUT) { + timeout = 0; + + val = mv2222_swap_line_type(phydev); + if (val < 0) + return val; + + return mv2222_config_aneg(phydev); + } + } + } + + return link; +} + +/* Returns negative on error, 0 if link is down, 1 if link is up */ +static int mv2222_read_status_1g(struct phy_device *phydev) +{ + static int timeout; + int val, link = 0; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT); + if (val < 0) + return val; + + if (phydev->autoneg == AUTONEG_ENABLE && + !(val & BMSR_ANEGCOMPLETE)) { + timeout++; + + if (timeout > AUTONEG_TIMEOUT) { + timeout = 0; + + val = mv2222_swap_line_type(phydev); + if (val < 0) + return val; + + return mv2222_config_aneg(phydev); + } + + return 0; + } + + if (!(val & BMSR_LSTATUS)) + return 0; + + link = 1; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_PHY_STAT); + if (val < 0) + return val; + + if (val & MV_1GBX_PHY_STAT_AN_RESOLVED) { + if (val & MV_1GBX_PHY_STAT_DUPLEX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + + if (val & MV_1GBX_PHY_STAT_SPEED1000) + phydev->speed = SPEED_1000; + else if (val & MV_1GBX_PHY_STAT_SPEED100) + phydev->speed = SPEED_100; + else + phydev->speed = SPEED_10; + } + + return link; +} + +static bool mv2222_link_is_operational(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_RX_SIGNAL_DETECT); + if (val < 0 || !(val & MV_RX_SIGNAL_DETECT_GLOBAL)) + return false; + + if (phydev->sfp_bus && !priv->sfp_link) + return false; + + return true; +} + +static int mv2222_read_status(struct phy_device *phydev) +{ + struct mv2222_data *priv = phydev->priv; + int link; + + phydev->link = 0; + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + + if (!mv2222_link_is_operational(phydev)) + return 0; + + if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) + link = mv2222_read_status_10g(phydev); + else + link = mv2222_read_status_1g(phydev); + + if (link < 0) + return link; + + phydev->link = link; + + return 0; +} + static int mv2222_resume(struct phy_device *phydev) { return mv2222_tx_enable(phydev); @@ -424,11 +512,7 @@ static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) return ret; if (mutex_trylock(&phydev->lock)) { - if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) - ret = mv2222_setup_forced(phydev); - else - ret = mv2222_config_aneg(phydev); - + ret = mv2222_config_aneg(phydev); mutex_unlock(&phydev->lock); } @@ -446,9 +530,29 @@ static void mv2222_sfp_remove(void *upstream) linkmode_zero(priv->supported); } +static void mv2222_sfp_link_up(void *upstream) +{ + struct phy_device *phydev = upstream; + struct mv2222_data *priv; + + priv = phydev->priv; + priv->sfp_link = true; +} + +static void mv2222_sfp_link_down(void *upstream) +{ + struct phy_device *phydev = upstream; + struct mv2222_data *priv; + + priv = phydev->priv; + priv->sfp_link = false; +} + static const struct sfp_upstream_ops sfp_phy_ops = { .module_insert = mv2222_sfp_insert, .module_remove = mv2222_sfp_remove, + .link_up = mv2222_sfp_link_up, + .link_down = mv2222_sfp_link_down, .attach = phy_sfp_attach, .detach = phy_sfp_detach, }; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index e26a5d663f8a..8018ddf7f316 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -3021,9 +3021,34 @@ static struct phy_driver marvell_drivers[] = { .get_stats = marvell_get_stats, }, { - .phy_id = MARVELL_PHY_ID_88E6390, + .phy_id = MARVELL_PHY_ID_88E6341_FAMILY, .phy_id_mask = MARVELL_PHY_ID_MASK, - .name = "Marvell 88E6390", + .name = "Marvell 88E6341 Family", + /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, + .probe = m88e1510_probe, + .config_init = marvell_config_init, + .config_aneg = m88e6390_config_aneg, + .read_status = marvell_read_status, + .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, + .cable_test_start = marvell_vct7_cable_test_start, + .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start, + .cable_test_get_status = marvell_vct7_cable_test_get_status, + }, + { + .phy_id = MARVELL_PHY_ID_88E6390_FAMILY, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E6390 Family", /* PHY_GBIT_FEATURES */ .flags = PHY_POLL_CABLE_TEST, .probe = m88e6390_probe, @@ -3107,7 +3132,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, - { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK }, { } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 20fb5638ac65..9119a860e9bd 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -29,7 +29,7 @@ #include <linux/usb/r8152.h> /* Information for net-next */ -#define NETNEXT_VERSION "11" +#define NETNEXT_VERSION "12" /* Information for net */ #define NET_VERSION "11" @@ -43,10 +43,14 @@ #define PLA_IDR 0xc000 #define PLA_RCR 0xc010 +#define PLA_RCR1 0xc012 #define PLA_RMS 0xc016 #define PLA_RXFIFO_CTRL0 0xc0a0 +#define PLA_RXFIFO_FULL 0xc0a2 #define PLA_RXFIFO_CTRL1 0xc0a4 +#define PLA_RX_FIFO_FULL 0xc0a6 #define PLA_RXFIFO_CTRL2 0xc0a8 +#define PLA_RX_FIFO_EMPTY 0xc0aa #define PLA_DMY_REG0 0xc0b0 #define PLA_FMC 0xc0b4 #define PLA_CFG_WOL 0xc0b6 @@ -63,6 +67,8 @@ #define PLA_MACDBG_PRE 0xd38c /* RTL_VER_04 only */ #define PLA_MACDBG_POST 0xd38e /* RTL_VER_04 only */ #define PLA_EXTRA_STATUS 0xd398 +#define PLA_GPHY_CTRL 0xd3ae +#define PLA_POL_GPIO_CTRL 0xdc6a #define PLA_EFUSE_DATA 0xdd00 #define PLA_EFUSE_CMD 0xdd02 #define PLA_LEDSEL 0xdd90 @@ -72,6 +78,8 @@ #define PLA_LWAKE_CTRL_REG 0xe007 #define PLA_GPHY_INTR_IMR 0xe022 #define PLA_EEE_CR 0xe040 +#define PLA_EEE_TXTWSYS 0xe04c +#define PLA_EEE_TXTWSYS_2P5G 0xe058 #define PLA_EEEP_CR 0xe080 #define PLA_MAC_PWR_CTRL 0xe0c0 #define PLA_MAC_PWR_CTRL2 0xe0ca @@ -82,6 +90,7 @@ #define PLA_TCR1 0xe612 #define PLA_MTPS 0xe615 #define PLA_TXFIFO_CTRL 0xe618 +#define PLA_TXFIFO_FULL 0xe61a #define PLA_RSTTALLY 0xe800 #define PLA_CR 0xe813 #define PLA_CRWECR 0xe81c @@ -98,6 +107,7 @@ #define PLA_SFF_STS_7 0xe8de #define PLA_PHYSTATUS 0xe908 #define PLA_CONFIG6 0xe90a /* CONFIG6 */ +#define PLA_USB_CFG 0xe952 #define PLA_BP_BA 0xfc26 #define PLA_BP_0 0xfc28 #define PLA_BP_1 0xfc2a @@ -112,6 +122,7 @@ #define USB_USB2PHY 0xb41e #define USB_SSPHYLINK1 0xb426 #define USB_SSPHYLINK2 0xb428 +#define USB_L1_CTRL 0xb45e #define USB_U2P3_CTRL 0xb460 #define USB_CSR_DUMMY1 0xb464 #define USB_CSR_DUMMY2 0xb466 @@ -122,7 +133,12 @@ #define USB_FW_FIX_EN0 0xcfca #define USB_FW_FIX_EN1 0xcfcc #define USB_LPM_CONFIG 0xcfd8 +#define USB_ECM_OPTION 0xcfee #define USB_CSTMR 0xcfef /* RTL8153A */ +#define USB_MISC_2 0xcfff +#define USB_ECM_OP 0xd26b +#define USB_GPHY_CTRL 0xd284 +#define USB_SPEED_OPTION 0xd32a #define USB_FW_CTRL 0xd334 /* RTL8153B */ #define USB_FC_TIMER 0xd340 #define USB_USB_CTRL 0xd406 @@ -136,16 +152,20 @@ #define USB_RX_EXTRA_AGGR_TMR 0xd432 /* RTL8153B */ #define USB_TX_DMA 0xd434 #define USB_UPT_RXDMA_OWN 0xd437 +#define USB_UPHY3_MDCMDIO 0xd480 #define USB_TOLERANCE 0xd490 #define USB_LPM_CTRL 0xd41a #define USB_BMU_RESET 0xd4b0 +#define USB_BMU_CONFIG 0xd4b4 #define USB_U1U2_TIMER 0xd4da #define USB_FW_TASK 0xd4e8 /* RTL8153B */ +#define USB_RX_AGGR_NUM 0xd4ee #define USB_UPS_CTRL 0xd800 #define USB_POWER_CUT 0xd80a #define USB_MISC_0 0xd81a #define USB_MISC_1 0xd81f #define USB_AFE_CTRL2 0xd824 +#define USB_UPHY_XTAL 0xd826 #define USB_UPS_CFG 0xd842 #define USB_UPS_FLAGS 0xd848 #define USB_WDT1_CTRL 0xe404 @@ -188,6 +208,9 @@ #define OCP_EEE_ABLE 0xa5c4 #define OCP_EEE_ADV 0xa5d0 #define OCP_EEE_LPABLE 0xa5d2 +#define OCP_10GBT_CTRL 0xa5d4 +#define OCP_10GBT_STAT 0xa5d6 +#define OCP_EEE_ADV2 0xa6d4 #define OCP_PHY_STATE 0xa708 /* nway state for 8153 */ #define OCP_PHY_PATCH_STAT 0xb800 #define OCP_PHY_PATCH_CMD 0xb820 @@ -199,6 +222,7 @@ /* SRAM Register */ #define SRAM_GREEN_CFG 0x8011 #define SRAM_LPF_CFG 0x8012 +#define SRAM_GPHY_FW_VER 0x801e #define SRAM_10M_AMP1 0x8080 #define SRAM_10M_AMP2 0x8082 #define SRAM_IMPEDANCE 0x8084 @@ -210,11 +234,19 @@ #define RCR_AM 0x00000004 #define RCR_AB 0x00000008 #define RCR_ACPT_ALL (RCR_AAP | RCR_APM | RCR_AM | RCR_AB) +#define SLOT_EN BIT(11) + +/* PLA_RCR1 */ +#define OUTER_VLAN BIT(7) +#define INNER_VLAN BIT(6) /* PLA_RXFIFO_CTRL0 */ #define RXFIFO_THR1_NORMAL 0x00080002 #define RXFIFO_THR1_OOB 0x01800003 +/* PLA_RXFIFO_FULL */ +#define RXFIFO_FULL_MASK 0xfff + /* PLA_RXFIFO_CTRL1 */ #define RXFIFO_THR2_FULL 0x00000060 #define RXFIFO_THR2_HIGH 0x00000038 @@ -249,6 +281,9 @@ /* PLA_TCR1 */ #define VERSION_MASK 0x7cf0 +#define IFG_MASK (BIT(3) | BIT(9) | BIT(8)) +#define IFG_144NS BIT(9) +#define IFG_96NS (BIT(9) | BIT(8)) /* PLA_MTPS */ #define MTPS_JUMBO (12 * 1024 / 64) @@ -282,6 +317,7 @@ #define MCU_BORW_EN 0x4000 /* PLA_CPCR */ +#define FLOW_CTRL_EN BIT(0) #define CPCR_RX_VLAN 0x0040 /* PLA_CFG_WOL */ @@ -307,6 +343,10 @@ /* PLA_CONFIG6 */ #define LANWAKE_CLR_EN BIT(0) +/* PLA_USB_CFG */ +#define EN_XG_LIP BIT(1) +#define EN_G_LIP BIT(2) + /* PLA_CONFIG5 */ #define BWF_EN 0x0040 #define MWF_EN 0x0020 @@ -330,6 +370,7 @@ /* PLA_MAC_PWR_CTRL2 */ #define EEE_SPDWN_RATIO 0x8007 #define MAC_CLK_SPDWN_EN BIT(15) +#define EEE_SPDWN_RATIO_MASK 0xff /* PLA_MAC_PWR_CTRL3 */ #define PLA_MCU_SPDWN_EN BIT(14) @@ -342,6 +383,7 @@ #define PWRSAVE_SPDWN_EN 0x1000 #define RXDV_SPDWN_EN 0x0800 #define TX10MIDLE_EN 0x0100 +#define IDLE_SPDWN_EN BIT(6) #define TP100_SPDWN_EN 0x0020 #define TP500_SPDWN_EN 0x0010 #define TP1000_SPDWN_EN 0x0008 @@ -382,6 +424,13 @@ #define LINK_CHANGE_FLAG BIT(8) #define POLL_LINK_CHG BIT(0) +/* PLA_GPHY_CTRL */ +#define GPHY_FLASH BIT(1) + +/* PLA_POL_GPIO_CTRL */ +#define DACK_DET_EN BIT(15) +#define POL_GPHY_PATCH BIT(4) + /* USB_USB2PHY */ #define USB2PHY_SUSPEND 0x0001 #define USB2PHY_L1 0x0002 @@ -430,6 +479,9 @@ #define BMU_RESET_EP_IN 0x01 #define BMU_RESET_EP_OUT 0x02 +/* USB_BMU_CONFIG */ +#define ACT_ODMA BIT(1) + /* USB_UPT_RXDMA_OWN */ #define OWN_UPDATE BIT(0) #define OWN_CLEAR BIT(1) @@ -437,27 +489,52 @@ /* USB_FW_TASK */ #define FC_PATCH_TASK BIT(1) +/* USB_RX_AGGR_NUM */ +#define RX_AGGR_NUM_MASK 0x1ff + /* USB_UPS_CTRL */ #define POWER_CUT 0x0100 /* USB_PM_CTRL_STATUS */ #define RESUME_INDICATE 0x0001 +/* USB_ECM_OPTION */ +#define BYPASS_MAC_RESET BIT(5) + /* USB_CSTMR */ #define FORCE_SUPER BIT(0) +/* USB_MISC_2 */ +#define UPS_FORCE_PWR_DOWN BIT(0) + +/* USB_ECM_OP */ +#define EN_ALL_SPEED BIT(0) + +/* USB_GPHY_CTRL */ +#define GPHY_PATCH_DONE BIT(2) +#define BYPASS_FLASH BIT(5) +#define BACKUP_RESTRORE BIT(6) + +/* USB_SPEED_OPTION */ +#define RG_PWRDN_EN BIT(8) +#define ALL_SPEED_OFF BIT(9) + /* USB_FW_CTRL */ #define FLOW_CTRL_PATCH_OPT BIT(1) +#define AUTO_SPEEDUP BIT(3) +#define FLOW_CTRL_PATCH_2 BIT(8) /* USB_FC_TIMER */ #define CTRL_TIMER_EN BIT(15) /* USB_USB_CTRL */ +#define CDC_ECM_EN BIT(3) #define RX_AGG_DISABLE 0x0010 #define RX_ZERO_EN 0x0080 /* USB_U2P3_CTRL */ #define U2P3_ENABLE 0x0001 +#define RX_DETECT8 BIT(3) /* USB_POWER_CUT */ #define PWR_EN 0x0001 @@ -493,8 +570,12 @@ #define SEN_VAL_NORMAL 0xa000 #define SEL_RXIDLE 0x0100 +/* USB_UPHY_XTAL */ +#define OOBS_POLLING BIT(8) + /* USB_UPS_CFG */ #define SAW_CNT_1MS_MASK 0x0fff +#define MID_REVERSE BIT(5) /* RTL8156A */ /* USB_UPS_FLAGS */ #define UPS_FLAGS_R_TUNE BIT(0) @@ -502,6 +583,7 @@ #define UPS_FLAGS_250M_CKDIV BIT(2) #define UPS_FLAGS_EN_ALDPS BIT(3) #define UPS_FLAGS_CTAP_SHORT_DIS BIT(4) +#define UPS_FLAGS_SPEED_MASK (0xf << 16) #define ups_flags_speed(x) ((x) << 16) #define UPS_FLAGS_EN_EEE BIT(20) #define UPS_FLAGS_EN_500M_EEE BIT(21) @@ -522,6 +604,8 @@ enum spd_duplex { FORCE_10M_FULL, FORCE_100M_HALF, FORCE_100M_FULL, + FORCE_1000M_FULL, + NWAY_2500M_FULL, }; /* OCP_ALDPS_CONFIG */ @@ -586,6 +670,9 @@ enum spd_duplex { #define EN_10M_CLKDIV BIT(11) #define EN_10M_BGOFF 0x0080 +/* OCP_10GBT_CTRL */ +#define RTL_ADV2_5G_F_R BIT(5) /* Advertise 2.5GBASE-T fast-retrain */ + /* OCP_PHY_STATE */ #define TXDIS_STATE 0x01 #define ABD_STATE 0x02 @@ -605,7 +692,8 @@ enum spd_duplex { #define EN_EMI_L 0x0040 /* OCP_SYSCLK_CFG */ -#define clk_div_expo(x) (min(x, 5) << 8) +#define sysclk_div_expo(x) (min(x, 5) << 8) +#define clk_div_expo(x) (min(x, 5) << 4) /* SRAM_GREEN_CFG */ #define GREEN_ETH_EN BIT(15) @@ -636,6 +724,11 @@ enum spd_duplex { #define BP4_SUPER_ONLY 0x1578 /* RTL_VER_04 only */ enum rtl_register_content { + _2500bps = BIT(10), + _1250bps = BIT(9), + _500bps = BIT(8), + _tx_flow = BIT(6), + _rx_flow = BIT(5), _1000bps = 0x10, _100bps = 0x08, _10bps = 0x04, @@ -643,6 +736,9 @@ enum rtl_register_content { FULL_DUP = 0x01, }; +#define is_speed_2500(_speed) (((_speed) & (_2500bps | LINK_STATUS)) == (_2500bps | LINK_STATUS)) +#define is_flow_control(_speed) (((_speed) & (_tx_flow | _rx_flow)) == (_tx_flow | _rx_flow)) + #define RTL8152_MAX_TX 4 #define RTL8152_MAX_RX 10 #define INTBUFSIZE 2 @@ -654,15 +750,12 @@ enum rtl_register_content { #define INTR_LINK 0x0004 -#define RTL8153_MAX_PACKET 9216 /* 9K */ -#define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - \ - ETH_FCS_LEN) #define RTL8152_RMS (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define RTL8153_RMS RTL8153_MAX_PACKET #define RTL8152_TX_TIMEOUT (5 * HZ) -#define RTL8152_NAPI_WEIGHT 64 -#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + ETH_FCS_LEN + \ - sizeof(struct rx_desc) + RX_ALIGN) +#define mtu_to_size(m) ((m) + VLAN_ETH_HLEN + ETH_FCS_LEN) +#define size_to_mtu(s) ((s) - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define rx_reserved_size(x) (mtu_to_size(x) + sizeof(struct rx_desc) + RX_ALIGN) /* rtl8152 flags */ enum rtl8152_flags { @@ -792,9 +885,11 @@ struct r8152 { bool (*in_nway)(struct r8152 *tp); void (*hw_phy_cfg)(struct r8152 *tp); void (*autosuspend_en)(struct r8152 *tp, bool enable); + void (*change_mtu)(struct r8152 *tp); } rtl_ops; struct ups_info { + u32 r_tune:1; u32 _10m_ckdiv:1; u32 _250m_ckdiv:1; u32 aldps:1; @@ -836,7 +931,9 @@ struct r8152 { u32 rx_buf_sz; u32 rx_copybreak; u32 rx_pending; + u32 fc_pause_on, fc_pause_off; + u32 support_2500full:1; u16 ocp_base; u16 speed; u16 eee_adv; @@ -871,6 +968,66 @@ struct fw_header { struct fw_block blocks[]; } __packed; +enum rtl8152_fw_flags { + FW_FLAGS_USB = 0, + FW_FLAGS_PLA, + FW_FLAGS_START, + FW_FLAGS_STOP, + FW_FLAGS_NC, + FW_FLAGS_NC1, + FW_FLAGS_NC2, + FW_FLAGS_UC2, + FW_FLAGS_UC, + FW_FLAGS_SPEED_UP, + FW_FLAGS_VER, +}; + +enum rtl8152_fw_fixup_cmd { + FW_FIXUP_AND = 0, + FW_FIXUP_OR, + FW_FIXUP_NOT, + FW_FIXUP_XOR, +}; + +struct fw_phy_set { + __le16 addr; + __le16 data; +} __packed; + +struct fw_phy_speed_up { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 version; + __le16 fw_reg; + __le16 reserved; + char info[]; +} __packed; + +struct fw_phy_ver { + struct fw_block blk_hdr; + struct fw_phy_set ver; + __le32 reserved; +} __packed; + +struct fw_phy_fixup { + struct fw_block blk_hdr; + struct fw_phy_set setting; + __le16 bit_cmd; + __le16 reserved; +} __packed; + +struct fw_phy_union { + struct fw_block blk_hdr; + __le16 fw_offset; + __le16 fw_reg; + struct fw_phy_set pre_set[2]; + struct fw_phy_set bp[8]; + struct fw_phy_set bp_en; + u8 pre_num; + u8 bp_num; + char info[]; +} __packed; + /** * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB. * The layout of the firmware block is: @@ -975,6 +1132,15 @@ enum rtl_fw_type { RTL_FW_PHY_START, RTL_FW_PHY_STOP, RTL_FW_PHY_NC, + RTL_FW_PHY_FIXUP, + RTL_FW_PHY_UNION_NC, + RTL_FW_PHY_UNION_NC1, + RTL_FW_PHY_UNION_NC2, + RTL_FW_PHY_UNION_UC2, + RTL_FW_PHY_UNION_UC, + RTL_FW_PHY_UNION_MISC, + RTL_FW_PHY_SPEED_UP, + RTL_FW_PHY_VER, }; enum rtl_version { @@ -988,6 +1154,15 @@ enum rtl_version { RTL_VER_07, RTL_VER_08, RTL_VER_09, + + RTL_TEST_01, + RTL_VER_10, + RTL_VER_11, + RTL_VER_12, + RTL_VER_13, + RTL_VER_14, + RTL_VER_15, + RTL_VER_MAX }; @@ -1003,6 +1178,7 @@ enum tx_csum_stat { #define RTL_ADVERTISED_100_FULL BIT(3) #define RTL_ADVERTISED_1000_HALF BIT(4) #define RTL_ADVERTISED_1000_FULL BIT(5) +#define RTL_ADVERTISED_2500_FULL BIT(6) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). * The RTL chips use a 64 element hash table based on the Ethernet CRC. @@ -1010,8 +1186,7 @@ enum tx_csum_stat { static const int multicast_filter_limit = 32; static unsigned int agg_buf_sz = 16384; -#define RTL_LIMITED_TSO_SIZE (agg_buf_sz - sizeof(struct tx_desc) - \ - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc)) static int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) @@ -2597,7 +2772,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, static void r8152b_reset_packet_filter(struct r8152 *tp) { - u32 ocp_data; + u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_FMC); ocp_data &= ~FMC_FCR_MCU_EN; @@ -2608,28 +2783,58 @@ static void r8152b_reset_packet_filter(struct r8152 *tp) static void rtl8152_nic_reset(struct r8152 *tp) { - int i; + u32 ocp_data; + int i; - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST); + switch (tp->version) { + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); + ocp_data &= ~CR_TE; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); + ocp_data &= ~BMU_RESET_EP_IN; + ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data |= CDC_ECM_EN; + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); + ocp_data &= ~CR_RE; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); + ocp_data |= BMU_RESET_EP_IN; + ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data &= ~CDC_ECM_EN; + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + break; - for (i = 0; i < 1000; i++) { - if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) - break; - usleep_range(100, 400); + default: + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST); + + for (i = 0; i < 1000; i++) { + if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) + break; + usleep_range(100, 400); + } + break; } } static void set_tx_qlen(struct r8152 *tp) { - struct net_device *netdev = tp->netdev; - - tp->tx_qlen = agg_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN + - sizeof(struct tx_desc)); + tp->tx_qlen = agg_buf_sz / (mtu_to_size(tp->netdev->mtu) + sizeof(struct tx_desc)); } -static inline u8 rtl8152_get_speed(struct r8152 *tp) +static inline u16 rtl8152_get_speed(struct r8152 *tp) { - return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); + return ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); } static void rtl_eee_plus_en(struct r8152 *tp, bool enable) @@ -2747,6 +2952,29 @@ static int rtl_stop_rx(struct r8152 *tp) return 0; } +static void rtl_set_ifg(struct r8152 *tp, u16 speed) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1); + ocp_data &= ~IFG_MASK; + if ((speed & (_10bps | _100bps)) && !(speed & FULL_DUP)) { + ocp_data |= IFG_144NS; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data &= ~TX10MIDLE_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + } else { + ocp_data |= IFG_96NS; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data |= TX10MIDLE_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + } +} + static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp) { ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN, @@ -2766,6 +2994,7 @@ static int rtl_enable(struct r8152 *tp) switch (tp->version) { case RTL_VER_08: case RTL_VER_09: + case RTL_VER_14: r8153b_rx_agg_chg_indicate(tp); break; default: @@ -2803,6 +3032,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp) case RTL_VER_08: case RTL_VER_09: + case RTL_VER_14: /* The RTL8153B uses USB_RX_EXTRA_AGGR_TMR for rx timeout * primarily. For USB_RX_EARLY_TIMEOUT, we fix it to 128ns. */ @@ -2812,6 +3042,18 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp) ocp_data); break; + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, + 640 / 8); + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, + ocp_data); + r8153b_rx_agg_chg_indicate(tp); + break; + default: break; } @@ -2831,9 +3073,20 @@ static void r8153_set_rx_early_size(struct r8152 *tp) break; case RTL_VER_08: case RTL_VER_09: + case RTL_VER_14: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 8); break; + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, + ocp_data / 8); + r8153b_rx_agg_chg_indicate(tp); + break; default: WARN_ON_ONCE(1); break; @@ -2842,6 +3095,8 @@ static void r8153_set_rx_early_size(struct r8152 *tp) static int rtl8153_enable(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return -ENODEV; @@ -2850,15 +3105,20 @@ static int rtl8153_enable(struct r8152 *tp) r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); - if (tp->version == RTL_VER_09) { - u32 ocp_data; + rtl_set_ifg(tp, rtl8152_get_speed(tp)); + switch (tp->version) { + case RTL_VER_09: + case RTL_VER_14: ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data &= ~FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); usleep_range(1000, 2000); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + break; + default: + break; } return rtl_enable(tp); @@ -2923,12 +3183,40 @@ static void rtl_rx_vlan_en(struct r8152 *tp, bool enable) { u32 ocp_data; - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); - if (enable) - ocp_data |= CPCR_RX_VLAN; - else - ocp_data &= ~CPCR_RX_VLAN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + case RTL_VER_07: + case RTL_VER_08: + case RTL_VER_09: + case RTL_VER_14: + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); + if (enable) + ocp_data |= CPCR_RX_VLAN; + else + ocp_data &= ~CPCR_RX_VLAN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + break; + + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + default: + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR1); + if (enable) + ocp_data |= OUTER_VLAN | INNER_VLAN; + else + ocp_data &= ~(OUTER_VLAN | INNER_VLAN); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR1, ocp_data); + break; + } } static int rtl8152_set_features(struct net_device *dev, @@ -3021,6 +3309,40 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts) device_set_wakeup_enable(&tp->udev->dev, false); } +static void r8153_mac_clk_speed_down(struct r8152 *tp, bool enable) +{ + u32 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); + + /* MAC clock speed down */ + if (enable) + ocp_data |= MAC_CLK_SPDWN_EN; + else + ocp_data &= ~MAC_CLK_SPDWN_EN; + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); +} + +static void r8156_mac_clk_spd(struct r8152 *tp, bool enable) +{ + u32 ocp_data; + + /* MAC clock speed down */ + if (enable) { + /* aldps_spdwn_ratio, tp10_spdwn_ratio */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, + 0x0403); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); + ocp_data &= ~EEE_SPDWN_RATIO_MASK; + ocp_data |= MAC_CLK_SPDWN_EN | 0x03; /* eee_spdwn_ratio */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + } else { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); + ocp_data &= ~MAC_CLK_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + } +} + static void r8153_u1u2en(struct r8152 *tp, bool enable) { u8 u1u2[8]; @@ -3080,6 +3402,9 @@ static void r8153b_ups_flags(struct r8152 *tp) if (tp->ups_info.eee_cmod_lv) ups_flags |= UPS_FLAGS_EEE_CMOD_LV_EN; + if (tp->ups_info.r_tune) + ups_flags |= UPS_FLAGS_R_TUNE; + if (tp->ups_info._10m_ckdiv) ups_flags |= UPS_FLAGS_EN_10M_CKDIV; @@ -3130,6 +3455,88 @@ static void r8153b_ups_flags(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); } +static void r8156_ups_flags(struct r8152 *tp) +{ + u32 ups_flags = 0; + + if (tp->ups_info.green) + ups_flags |= UPS_FLAGS_EN_GREEN; + + if (tp->ups_info.aldps) + ups_flags |= UPS_FLAGS_EN_ALDPS; + + if (tp->ups_info.eee) + ups_flags |= UPS_FLAGS_EN_EEE; + + if (tp->ups_info.flow_control) + ups_flags |= UPS_FLAGS_EN_FLOW_CTR; + + if (tp->ups_info.eee_ckdiv) + ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; + + if (tp->ups_info._10m_ckdiv) + ups_flags |= UPS_FLAGS_EN_10M_CKDIV; + + if (tp->ups_info.eee_plloff_100) + ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; + + if (tp->ups_info.eee_plloff_giga) + ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; + + if (tp->ups_info._250m_ckdiv) + ups_flags |= UPS_FLAGS_250M_CKDIV; + + switch (tp->ups_info.speed_duplex) { + case FORCE_10M_HALF: + ups_flags |= ups_flags_speed(0); + break; + case FORCE_10M_FULL: + ups_flags |= ups_flags_speed(1); + break; + case FORCE_100M_HALF: + ups_flags |= ups_flags_speed(2); + break; + case FORCE_100M_FULL: + ups_flags |= ups_flags_speed(3); + break; + case NWAY_10M_HALF: + ups_flags |= ups_flags_speed(4); + break; + case NWAY_10M_FULL: + ups_flags |= ups_flags_speed(5); + break; + case NWAY_100M_HALF: + ups_flags |= ups_flags_speed(6); + break; + case NWAY_100M_FULL: + ups_flags |= ups_flags_speed(7); + break; + case NWAY_1000M_FULL: + ups_flags |= ups_flags_speed(8); + break; + case NWAY_2500M_FULL: + ups_flags |= ups_flags_speed(9); + break; + default: + break; + } + + switch (tp->ups_info.lite_mode) { + case 1: + ups_flags |= 0 << 5; + break; + case 2: + ups_flags |= 2 << 5; + break; + case 0: + default: + ups_flags |= 1 << 5; + break; + } + + ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); +} + static void rtl_green_en(struct r8152 *tp, bool enable) { u16 data; @@ -3193,16 +3600,56 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); - ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff); - ocp_data |= BIT(0); - ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data |= UPS_FORCE_PWR_DOWN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + } else { + ocp_data &= ~(UPS_EN | USP_PREWAKE); + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data &= ~UPS_FORCE_PWR_DOWN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + + if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { + int i; + + for (i = 0; i < 500; i++) { + if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & + AUTOLOAD_DONE) + break; + msleep(20); + } + + tp->rtl_ops.hw_phy_cfg(tp); + + rtl8152_set_speed(tp, tp->autoneg, tp->speed, + tp->duplex, tp->advertising); + } + } +} + +static void r8153c_ups_en(struct r8152 *tp, bool enable) +{ + u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); + + if (enable) { + r8153b_ups_flags(tp); + + ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data |= UPS_FORCE_PWR_DOWN; + ocp_data &= ~BIT(7); + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); } else { ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); - ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff); - ocp_data &= ~BIT(0); - ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data &= ~UPS_FORCE_PWR_DOWN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { int i; @@ -3219,6 +3666,55 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); } + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); + ocp_data |= BIT(8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); + } +} + +static void r8156_ups_en(struct r8152 *tp, bool enable) +{ + u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); + + if (enable) { + r8156_ups_flags(tp); + + ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data |= UPS_FORCE_PWR_DOWN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + + switch (tp->version) { + case RTL_VER_13: + case RTL_VER_15: + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPHY_XTAL); + ocp_data &= ~OOBS_POLLING; + ocp_write_byte(tp, MCU_TYPE_USB, USB_UPHY_XTAL, ocp_data); + break; + default: + break; + } + } else { + ocp_data &= ~(UPS_EN | USP_PREWAKE); + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data &= ~UPS_FORCE_PWR_DOWN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + + if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { + tp->rtl_ops.hw_phy_cfg(tp); + + rtl8152_set_speed(tp, tp->autoneg, tp->speed, + tp->duplex, tp->advertising); + } } } @@ -3351,6 +3847,38 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) } } +static void rtl8153c_runtime_enable(struct r8152 *tp, bool enable) +{ + if (enable) { + r8153_queue_wake(tp, true); + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + rtl_runtime_suspend_enable(tp, true); + r8153c_ups_en(tp, true); + } else { + r8153c_ups_en(tp, false); + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); + r8153b_u1u2en(tp, true); + } +} + +static void rtl8156_runtime_enable(struct r8152 *tp, bool enable) +{ + if (enable) { + r8153_queue_wake(tp, true); + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + rtl_runtime_suspend_enable(tp, true); + } else { + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); + r8153_u2p3en(tp, true); + if (tp->udev->speed >= USB_SPEED_SUPER) + r8153b_u1u2en(tp, true); + } +} + static void r8153_teredo_off(struct r8152 *tp) { u32 ocp_data; @@ -3371,14 +3899,19 @@ static void r8153_teredo_off(struct r8152 *tp) case RTL_VER_08: case RTL_VER_09: + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_14: + case RTL_VER_15: + default: /* The bit 0 ~ 7 are relative with teredo settings. They are * W1C (write 1 to clear), so set all 1 to disable it. */ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, 0xff); break; - - default: - break; } ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE); @@ -3413,6 +3946,12 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type) break; case RTL_VER_08: case RTL_VER_09: + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_14: + case RTL_VER_15: default: if (type == MCU_TYPE_USB) { ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0); @@ -3521,6 +4060,162 @@ static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait) return 0; } +static bool rtl8152_is_fw_phy_speed_up_ok(struct r8152 *tp, struct fw_phy_speed_up *phy) +{ + u16 fw_offset; + u32 length; + bool rc = false; + + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + case RTL_VER_07: + case RTL_VER_08: + case RTL_VER_09: + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_14: + goto out; + case RTL_VER_13: + case RTL_VER_15: + default: + break; + } + + fw_offset = __le16_to_cpu(phy->fw_offset); + length = __le32_to_cpu(phy->blk_hdr.length); + if (fw_offset < sizeof(*phy) || length <= fw_offset) { + dev_err(&tp->intf->dev, "invalid fw_offset\n"); + goto out; + } + + length -= fw_offset; + if (length & 3) { + dev_err(&tp->intf->dev, "invalid block length\n"); + goto out; + } + + if (__le16_to_cpu(phy->fw_reg) != 0x9A00) { + dev_err(&tp->intf->dev, "invalid register to load firmware\n"); + goto out; + } + + rc = true; +out: + return rc; +} + +static bool rtl8152_is_fw_phy_ver_ok(struct r8152 *tp, struct fw_phy_ver *ver) +{ + bool rc = false; + + switch (tp->version) { + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + break; + default: + goto out; + } + + if (__le32_to_cpu(ver->blk_hdr.length) != sizeof(*ver)) { + dev_err(&tp->intf->dev, "invalid block length\n"); + goto out; + } + + if (__le16_to_cpu(ver->ver.addr) != SRAM_GPHY_FW_VER) { + dev_err(&tp->intf->dev, "invalid phy ver addr\n"); + goto out; + } + + rc = true; +out: + return rc; +} + +static bool rtl8152_is_fw_phy_fixup_ok(struct r8152 *tp, struct fw_phy_fixup *fix) +{ + bool rc = false; + + switch (tp->version) { + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + break; + default: + goto out; + } + + if (__le32_to_cpu(fix->blk_hdr.length) != sizeof(*fix)) { + dev_err(&tp->intf->dev, "invalid block length\n"); + goto out; + } + + if (__le16_to_cpu(fix->setting.addr) != OCP_PHY_PATCH_CMD || + __le16_to_cpu(fix->setting.data) != BIT(7)) { + dev_err(&tp->intf->dev, "invalid phy fixup\n"); + goto out; + } + + rc = true; +out: + return rc; +} + +static bool rtl8152_is_fw_phy_union_ok(struct r8152 *tp, struct fw_phy_union *phy) +{ + u16 fw_offset; + u32 length; + bool rc = false; + + switch (tp->version) { + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + break; + default: + goto out; + } + + fw_offset = __le16_to_cpu(phy->fw_offset); + length = __le32_to_cpu(phy->blk_hdr.length); + if (fw_offset < sizeof(*phy) || length <= fw_offset) { + dev_err(&tp->intf->dev, "invalid fw_offset\n"); + goto out; + } + + length -= fw_offset; + if (length & 1) { + dev_err(&tp->intf->dev, "invalid block length\n"); + goto out; + } + + if (phy->pre_num > 2) { + dev_err(&tp->intf->dev, "invalid pre_num %d\n", phy->pre_num); + goto out; + } + + if (phy->bp_num > 8) { + dev_err(&tp->intf->dev, "invalid bp_num %d\n", phy->bp_num); + goto out; + } + + rc = true; +out: + return rc; +} + static bool rtl8152_is_fw_phy_nc_ok(struct r8152 *tp, struct fw_phy_nc *phy) { u32 length; @@ -3622,6 +4317,11 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac) case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_14: + case RTL_VER_15: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; bp_en_addr = PLA_BP_EN; @@ -3645,6 +4345,11 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac) break; case RTL_VER_08: case RTL_VER_09: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_14: + case RTL_VER_15: fw_reg = 0xe600; bp_ba_addr = USB_BP_BA; bp_en_addr = USB_BP2_EN; @@ -3772,10 +4477,7 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw) { const struct firmware *fw = rtl_fw->fw; struct fw_header *fw_hdr = (struct fw_header *)fw->data; - struct fw_mac *pla = NULL, *usb = NULL; - struct fw_phy_patch_key *start = NULL; - struct fw_phy_nc *phy_nc = NULL; - struct fw_block *stop = NULL; + unsigned long fw_flags = 0; long ret = -EFAULT; int i; @@ -3804,50 +4506,56 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw) goto fail; goto fw_end; case RTL_FW_PLA: - if (pla) { + if (test_bit(FW_FLAGS_PLA, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PLA firmware encountered"); goto fail; } - pla = (struct fw_mac *)block; - if (!rtl8152_is_fw_mac_ok(tp, pla)) { + if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { dev_err(&tp->intf->dev, "check PLA firmware failed\n"); goto fail; } + __set_bit(FW_FLAGS_PLA, &fw_flags); break; case RTL_FW_USB: - if (usb) { + if (test_bit(FW_FLAGS_USB, &fw_flags)) { dev_err(&tp->intf->dev, "multiple USB firmware encountered"); goto fail; } - usb = (struct fw_mac *)block; - if (!rtl8152_is_fw_mac_ok(tp, usb)) { + if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { dev_err(&tp->intf->dev, "check USB firmware failed\n"); goto fail; } + __set_bit(FW_FLAGS_USB, &fw_flags); break; case RTL_FW_PHY_START: - if (start || phy_nc || stop) { + if (test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_NC, &fw_flags) || + test_bit(FW_FLAGS_NC1, &fw_flags) || + test_bit(FW_FLAGS_NC2, &fw_flags) || + test_bit(FW_FLAGS_UC2, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "check PHY_START fail\n"); goto fail; } - if (__le32_to_cpu(block->length) != sizeof(*start)) { + if (__le32_to_cpu(block->length) != sizeof(struct fw_phy_patch_key)) { dev_err(&tp->intf->dev, "Invalid length for PHY_START\n"); goto fail; } - - start = (struct fw_phy_patch_key *)block; + __set_bit(FW_FLAGS_START, &fw_flags); break; case RTL_FW_PHY_STOP: - if (stop || !start) { + if (test_bit(FW_FLAGS_STOP, &fw_flags) || + !test_bit(FW_FLAGS_START, &fw_flags)) { dev_err(&tp->intf->dev, "Check PHY_STOP fail\n"); goto fail; @@ -3858,29 +4566,175 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw) "Invalid length for PHY_STOP\n"); goto fail; } - - stop = block; + __set_bit(FW_FLAGS_STOP, &fw_flags); break; case RTL_FW_PHY_NC: - if (!start || stop) { + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "check PHY_NC fail\n"); goto fail; } - if (phy_nc) { + if (test_bit(FW_FLAGS_NC, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY NC encountered\n"); goto fail; } - phy_nc = (struct fw_phy_nc *)block; - if (!rtl8152_is_fw_phy_nc_ok(tp, phy_nc)) { + if (!rtl8152_is_fw_phy_nc_ok(tp, (struct fw_phy_nc *)block)) { dev_err(&tp->intf->dev, "check PHY NC firmware failed\n"); goto fail; } + __set_bit(FW_FLAGS_NC, &fw_flags); + break; + case RTL_FW_PHY_UNION_NC: + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_NC1, &fw_flags) || + test_bit(FW_FLAGS_NC2, &fw_flags) || + test_bit(FW_FLAGS_UC2, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "PHY_UNION_NC out of order\n"); + goto fail; + } + if (test_bit(FW_FLAGS_NC, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY_UNION_NC encountered\n"); + goto fail; + } + + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check PHY_UNION_NC failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_NC, &fw_flags); + break; + case RTL_FW_PHY_UNION_NC1: + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_NC2, &fw_flags) || + test_bit(FW_FLAGS_UC2, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "PHY_UNION_NC1 out of order\n"); + goto fail; + } + + if (test_bit(FW_FLAGS_NC1, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY NC1 encountered\n"); + goto fail; + } + + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check PHY_UNION_NC1 failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_NC1, &fw_flags); + break; + case RTL_FW_PHY_UNION_NC2: + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_UC2, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "PHY_UNION_NC2 out of order\n"); + goto fail; + } + + if (test_bit(FW_FLAGS_NC2, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY NC2 encountered\n"); + goto fail; + } + + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check PHY_UNION_NC2 failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_NC2, &fw_flags); + break; + case RTL_FW_PHY_UNION_UC2: + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "PHY_UNION_UC2 out of order\n"); + goto fail; + } + + if (test_bit(FW_FLAGS_UC2, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY UC2 encountered\n"); + goto fail; + } + + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check PHY_UNION_UC2 failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_UC2, &fw_flags); + break; + case RTL_FW_PHY_UNION_UC: + if (!test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "PHY_UNION_UC out of order\n"); + goto fail; + } + + if (test_bit(FW_FLAGS_UC, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY UC encountered\n"); + goto fail; + } + + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check PHY_UNION_UC failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_UC, &fw_flags); + break; + case RTL_FW_PHY_UNION_MISC: + if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { + dev_err(&tp->intf->dev, "check RTL_FW_PHY_UNION_MISC failed\n"); + goto fail; + } + break; + case RTL_FW_PHY_FIXUP: + if (!rtl8152_is_fw_phy_fixup_ok(tp, (struct fw_phy_fixup *)block)) { + dev_err(&tp->intf->dev, "check PHY fixup failed\n"); + goto fail; + } + break; + case RTL_FW_PHY_SPEED_UP: + if (test_bit(FW_FLAGS_SPEED_UP, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY firmware encountered"); + goto fail; + } + + if (!rtl8152_is_fw_phy_speed_up_ok(tp, (struct fw_phy_speed_up *)block)) { + dev_err(&tp->intf->dev, "check PHY speed up failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_SPEED_UP, &fw_flags); + break; + case RTL_FW_PHY_VER: + if (test_bit(FW_FLAGS_START, &fw_flags) || + test_bit(FW_FLAGS_NC, &fw_flags) || + test_bit(FW_FLAGS_NC1, &fw_flags) || + test_bit(FW_FLAGS_NC2, &fw_flags) || + test_bit(FW_FLAGS_UC2, &fw_flags) || + test_bit(FW_FLAGS_UC, &fw_flags) || + test_bit(FW_FLAGS_STOP, &fw_flags)) { + dev_err(&tp->intf->dev, "Invalid order to set PHY version\n"); + goto fail; + } + + if (test_bit(FW_FLAGS_VER, &fw_flags)) { + dev_err(&tp->intf->dev, "multiple PHY version encountered"); + goto fail; + } + + if (!rtl8152_is_fw_phy_ver_ok(tp, (struct fw_phy_ver *)block)) { + dev_err(&tp->intf->dev, "check PHY version failed\n"); + goto fail; + } + __set_bit(FW_FLAGS_VER, &fw_flags); break; default: dev_warn(&tp->intf->dev, "Unknown type %u is found\n", @@ -3893,7 +4747,7 @@ static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw) } fw_end: - if ((phy_nc || start) && !stop) { + if (test_bit(FW_FLAGS_START, &fw_flags) && !test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "without PHY_STOP\n"); goto fail; } @@ -3903,6 +4757,143 @@ fail: return ret; } +static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy, bool wait) +{ + u32 len; + u8 *data; + + if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) { + dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); + return; + } + + len = __le32_to_cpu(phy->blk_hdr.length); + len -= __le16_to_cpu(phy->fw_offset); + data = (u8 *)phy + __le16_to_cpu(phy->fw_offset); + + if (rtl_phy_patch_request(tp, true, wait)) + return; + + while (len) { + u32 ocp_data, size; + int i; + + if (len < 2048) + size = len; + else + size = 2048; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL); + ocp_data |= GPHY_PATCH_DONE | BACKUP_RESTRORE; + ocp_write_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL, ocp_data); + + generic_ocp_write(tp, __le16_to_cpu(phy->fw_reg), 0xff, size, data, MCU_TYPE_USB); + + data += size; + len -= size; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL); + ocp_data |= POL_GPHY_PATCH; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL, ocp_data); + + for (i = 0; i < 1000; i++) { + if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & POL_GPHY_PATCH)) + break; + } + + if (i == 1000) { + dev_err(&tp->intf->dev, "ram code speedup mode timeout\n"); + return; + } + } + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base); + rtl_phy_patch_request(tp, false, wait); + + if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version)) + dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); + else + dev_err(&tp->intf->dev, "ram code speedup mode fail\n"); +} + +static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver) +{ + u16 ver_addr, ver; + + ver_addr = __le16_to_cpu(phy_ver->ver.addr); + ver = __le16_to_cpu(phy_ver->ver.data); + + if (sram_read(tp, ver_addr) >= ver) { + dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); + return 0; + } + + sram_write(tp, ver_addr, ver); + + dev_dbg(&tp->intf->dev, "PHY firmware version %x\n", ver); + + return ver; +} + +static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix) +{ + u16 addr, data; + + addr = __le16_to_cpu(fix->setting.addr); + data = ocp_reg_read(tp, addr); + + switch (__le16_to_cpu(fix->bit_cmd)) { + case FW_FIXUP_AND: + data &= __le16_to_cpu(fix->setting.data); + break; + case FW_FIXUP_OR: + data |= __le16_to_cpu(fix->setting.data); + break; + case FW_FIXUP_NOT: + data &= ~__le16_to_cpu(fix->setting.data); + break; + case FW_FIXUP_XOR: + data ^= __le16_to_cpu(fix->setting.data); + break; + default: + return; + } + + ocp_reg_write(tp, addr, data); + + dev_dbg(&tp->intf->dev, "applied ocp %x %x\n", addr, data); +} + +static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *phy) +{ + __le16 *data; + u32 length; + int i, num; + + num = phy->pre_num; + for (i = 0; i < num; i++) + sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr), + __le16_to_cpu(phy->pre_set[i].data)); + + length = __le32_to_cpu(phy->blk_hdr.length); + length -= __le16_to_cpu(phy->fw_offset); + num = length / 2; + data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset)); + + ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg)); + for (i = 0; i < num; i++) + ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i])); + + num = phy->bp_num; + for (i = 0; i < num; i++) + sram_write(tp, __le16_to_cpu(phy->bp[i].addr), __le16_to_cpu(phy->bp[i].data)); + + if (phy->bp_num && phy->bp_en.addr) + sram_write(tp, __le16_to_cpu(phy->bp_en.addr), __le16_to_cpu(phy->bp_en.data)); + + dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); +} + static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy) { u16 mode_reg, bp_index; @@ -3956,6 +4947,12 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac) return; } + fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg); + if (fw_ver_reg && ocp_read_byte(tp, MCU_TYPE_USB, fw_ver_reg) >= mac->fw_ver_data) { + dev_dbg(&tp->intf->dev, "%s firmware has been the newest\n", type ? "PLA" : "USB"); + return; + } + rtl_clear_bp(tp, type); /* Enable backup/restore of MACDBG. This is required after clearing PLA @@ -3991,7 +4988,6 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac) ocp_write_word(tp, type, bp_en_addr, __le16_to_cpu(mac->bp_en_value)); - fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg); if (fw_ver_reg) ocp_write_byte(tp, MCU_TYPE_USB, fw_ver_reg, mac->fw_ver_data); @@ -4006,7 +5002,7 @@ static void rtl8152_apply_firmware(struct r8152 *tp, bool power_cut) struct fw_header *fw_hdr; struct fw_phy_patch_key *key; u16 key_addr = 0; - int i; + int i, patch_phy = 1; if (IS_ERR_OR_NULL(rtl_fw->fw)) return; @@ -4028,17 +5024,40 @@ static void rtl8152_apply_firmware(struct r8152 *tp, bool power_cut) rtl8152_fw_mac_apply(tp, (struct fw_mac *)block); break; case RTL_FW_PHY_START: + if (!patch_phy) + break; key = (struct fw_phy_patch_key *)block; key_addr = __le16_to_cpu(key->key_reg); rtl_pre_ram_code(tp, key_addr, __le16_to_cpu(key->key_data), !power_cut); break; case RTL_FW_PHY_STOP: + if (!patch_phy) + break; WARN_ON(!key_addr); rtl_post_ram_code(tp, key_addr, !power_cut); break; case RTL_FW_PHY_NC: rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block); break; + case RTL_FW_PHY_VER: + patch_phy = rtl8152_fw_phy_ver(tp, (struct fw_phy_ver *)block); + break; + case RTL_FW_PHY_UNION_NC: + case RTL_FW_PHY_UNION_NC1: + case RTL_FW_PHY_UNION_NC2: + case RTL_FW_PHY_UNION_UC2: + case RTL_FW_PHY_UNION_UC: + case RTL_FW_PHY_UNION_MISC: + if (patch_phy) + rtl8152_fw_phy_union_apply(tp, (struct fw_phy_union *)block); + break; + case RTL_FW_PHY_FIXUP: + if (patch_phy) + rtl8152_fw_phy_fixup(tp, (struct fw_phy_fixup *)block); + break; + case RTL_FW_PHY_SPEED_UP: + rtl_ram_code_speed_up(tp, (struct fw_phy_speed_up *)block, !power_cut); + break; default: break; } @@ -4185,6 +5204,22 @@ static void r8153_eee_en(struct r8152 *tp, bool enable) tp->ups_info.eee = enable; } +static void r8156_eee_en(struct r8152 *tp, bool enable) +{ + u16 config; + + r8153_eee_en(tp, enable); + + config = ocp_reg_read(tp, OCP_EEE_ADV2); + + if (enable) + config |= MDIO_EEE_2_5GT; + else + config &= ~MDIO_EEE_2_5GT; + + ocp_reg_write(tp, OCP_EEE_ADV2, config); +} + static void rtl_eee_enable(struct r8152 *tp, bool enable) { switch (tp->version) { @@ -4206,6 +5241,7 @@ static void rtl_eee_enable(struct r8152 *tp, bool enable) case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: + case RTL_VER_14: if (enable) { r8153_eee_en(tp, true); ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); @@ -4214,6 +5250,19 @@ static void rtl_eee_enable(struct r8152 *tp, bool enable) ocp_reg_write(tp, OCP_EEE_ADV, 0); } break; + case RTL_VER_10: + case RTL_VER_11: + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + if (enable) { + r8156_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); + } else { + r8156_eee_en(tp, false); + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } + break; default: break; } @@ -4260,6 +5309,20 @@ static void wait_oob_link_list_ready(struct r8152 *tp) } } +static void r8156b_wait_loading_flash(struct r8152 *tp) +{ + if ((ocp_read_word(tp, MCU_TYPE_PLA, PLA_GPHY_CTRL) & GPHY_FLASH) && + !(ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & BYPASS_FLASH)) { + int i; + + for (i = 0; i < 100; i++) { + if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE) + break; + usleep_range(1000, 2000); + } + } +} + static void r8152b_exit_oob(struct r8152 *tp) { u32 ocp_data; @@ -4310,7 +5373,7 @@ static void r8152b_exit_oob(struct r8152 *tp) } /* TX share fifo free credit full threshold */ - ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2); ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH); @@ -4487,6 +5550,36 @@ static int r8153b_post_firmware_1(struct r8152 *tp) return 0; } +static int r8153c_post_firmware_1(struct r8152 *tp) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); + ocp_data |= FLOW_CTRL_PATCH_2; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); + ocp_data |= FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + + return 0; +} + +static int r8156a_post_firmware_1(struct r8152 *tp) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); + ocp_data |= FW_IP_RESET_EN; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); + + /* Modify U3PHY parameter for compatibility issue */ + ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4026840e); + ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4001acc9); + + return 0; +} + static void r8153_aldps_en(struct r8152 *tp, bool enable) { u16 data; @@ -4689,6 +5782,19 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp) set_bit(PHY_RESET, &tp->flags); } +static void r8153c_hw_phy_cfg(struct r8152 *tp) +{ + r8153b_hw_phy_cfg(tp); + + tp->ups_info.r_tune = true; +} + +static void rtl8153_change_mtu(struct r8152 *tp) +{ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); +} + static void r8153_first_init(struct r8152 *tp) { u32 ocp_data; @@ -4721,9 +5827,7 @@ static void r8153_first_init(struct r8152 *tp) rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); - ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); + rtl8153_change_mtu(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); ocp_data |= TCR0_AUTO_FIFO; @@ -4758,8 +5862,7 @@ static void r8153_enter_oob(struct r8152 *tp) wait_oob_link_list_ready(tp); - ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); switch (tp->version) { case RTL_VER_03: @@ -4773,6 +5876,7 @@ static void r8153_enter_oob(struct r8152 *tp) case RTL_VER_08: case RTL_VER_09: + case RTL_VER_14: /* Clear teredo wake event. bit[15:8] is the teredo wakeup * type. Set it to zero. bits[7:0] are the W1C bits about * the events. Set them to all 1 to clear them. @@ -4809,6 +5913,96 @@ static void rtl8153_disable(struct r8152 *tp) r8153_aldps_en(tp, true); } +static int rtl8156_enable(struct r8152 *tp) +{ + u32 ocp_data; + u16 speed; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return -ENODEV; + + set_tx_qlen(tp); + rtl_set_eee_plus(tp); + r8153_set_rx_early_timeout(tp); + r8153_set_rx_early_size(tp); + + speed = rtl8152_get_speed(tp); + rtl_set_ifg(tp, speed); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + if (speed & _2500bps) + ocp_data &= ~IDLE_SPDWN_EN; + else + ocp_data |= IDLE_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + + if (speed & _1000bps) + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x11); + else if (speed & _500bps) + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x3d); + + if (tp->udev->speed == USB_SPEED_HIGH) { + /* USB 0xb45e[3:0] l1_nyet_hird */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); + ocp_data &= ~0xf; + if (is_flow_control(speed)) + ocp_data |= 0xf; + else + ocp_data |= 0x1; + ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); + } + + return rtl_enable(tp); +} + +static int rtl8156b_enable(struct r8152 *tp) +{ + u32 ocp_data; + u16 speed; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return -ENODEV; + + set_tx_qlen(tp); + rtl_set_eee_plus(tp); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM); + ocp_data &= ~RX_AGGR_NUM_MASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM, ocp_data); + + r8153_set_rx_early_timeout(tp); + r8153_set_rx_early_size(tp); + + speed = rtl8152_get_speed(tp); + rtl_set_ifg(tp, speed); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + if (speed & _2500bps) + ocp_data &= ~IDLE_SPDWN_EN; + else + ocp_data |= IDLE_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + + if (tp->udev->speed == USB_SPEED_HIGH) { + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); + ocp_data &= ~0xf; + if (is_flow_control(speed)) + ocp_data |= 0xf; + else + ocp_data |= 0x1; + ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); + ocp_data &= ~FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + usleep_range(1000, 2000); + ocp_data |= FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + + return rtl_enable(tp); +} + static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, u32 advertising) { @@ -4857,58 +6051,73 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, tp->mii.force_media = 1; } else { - u16 anar, tmp1; + u16 orig, new1; u32 support; support = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; - if (tp->mii.supports_gmii) + if (tp->mii.supports_gmii) { support |= RTL_ADVERTISED_1000_FULL; + if (tp->support_2500full) + support |= RTL_ADVERTISED_2500_FULL; + } + if (!(advertising & support)) return -EINVAL; - anar = r8152_mdio_read(tp, MII_ADVERTISE); - tmp1 = anar & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + orig = r8152_mdio_read(tp, MII_ADVERTISE); + new1 = orig & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL); if (advertising & RTL_ADVERTISED_10_HALF) { - tmp1 |= ADVERTISE_10HALF; + new1 |= ADVERTISE_10HALF; tp->ups_info.speed_duplex = NWAY_10M_HALF; } if (advertising & RTL_ADVERTISED_10_FULL) { - tmp1 |= ADVERTISE_10FULL; + new1 |= ADVERTISE_10FULL; tp->ups_info.speed_duplex = NWAY_10M_FULL; } if (advertising & RTL_ADVERTISED_100_HALF) { - tmp1 |= ADVERTISE_100HALF; + new1 |= ADVERTISE_100HALF; tp->ups_info.speed_duplex = NWAY_100M_HALF; } if (advertising & RTL_ADVERTISED_100_FULL) { - tmp1 |= ADVERTISE_100FULL; + new1 |= ADVERTISE_100FULL; tp->ups_info.speed_duplex = NWAY_100M_FULL; } - if (anar != tmp1) { - r8152_mdio_write(tp, MII_ADVERTISE, tmp1); - tp->mii.advertising = tmp1; + if (orig != new1) { + r8152_mdio_write(tp, MII_ADVERTISE, new1); + tp->mii.advertising = new1; } if (tp->mii.supports_gmii) { - u16 gbcr; - - gbcr = r8152_mdio_read(tp, MII_CTRL1000); - tmp1 = gbcr & ~(ADVERTISE_1000FULL | + orig = r8152_mdio_read(tp, MII_CTRL1000); + new1 = orig & ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); if (advertising & RTL_ADVERTISED_1000_FULL) { - tmp1 |= ADVERTISE_1000FULL; + new1 |= ADVERTISE_1000FULL; tp->ups_info.speed_duplex = NWAY_1000M_FULL; } - if (gbcr != tmp1) - r8152_mdio_write(tp, MII_CTRL1000, tmp1); + if (orig != new1) + r8152_mdio_write(tp, MII_CTRL1000, new1); + } + + if (tp->support_2500full) { + orig = ocp_reg_read(tp, OCP_10GBT_CTRL); + new1 = orig & ~MDIO_AN_10GBT_CTRL_ADV2_5G; + + if (advertising & RTL_ADVERTISED_2500_FULL) { + new1 |= MDIO_AN_10GBT_CTRL_ADV2_5G; + tp->ups_info.speed_duplex = NWAY_2500M_FULL; + } + + if (orig != new1) + ocp_reg_write(tp, OCP_10GBT_CTRL, new1); } bmcr = BMCR_ANENABLE | BMCR_ANRESTART; @@ -5064,6 +6273,253 @@ static void rtl8153b_down(struct r8152 *tp) r8153_aldps_en(tp, true); } +static void rtl8153c_change_mtu(struct r8152 *tp) +{ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, 10 * 1024 / 64); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); + + /* Adjust the tx fifo free credit full threshold, otherwise + * the fifo would be too small to send a jumbo frame packet. + */ + if (tp->netdev->mtu < 8000) + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 2048 / 8); + else + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 900 / 8); +} + +static void rtl8153c_up(struct r8152 *tp) +{ + u32 ocp_data; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + r8153_aldps_en(tp, false); + + rxdy_gated_en(tp, true); + r8153_teredo_off(tp); + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data &= ~RCR_ACPT_ALL; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + rtl8152_nic_reset(tp); + rtl_reset_bmu(tp); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data &= ~NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data &= ~MCU_BORW_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + + wait_oob_link_list_ready(tp); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data |= RE_INIT_LL; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + + wait_oob_link_list_ready(tp); + + rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); + + rtl8153c_change_mtu(tp); + + rtl8152_nic_reset(tp); + + /* rx share fifo credit full threshold */ + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, 0x02); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 0x08); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL); + + ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); + ocp_data |= BIT(8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + r8153_aldps_en(tp, true); + r8153b_u1u2en(tp, true); +} + +static inline u32 fc_pause_on_auto(struct r8152 *tp) +{ + return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024); +} + +static inline u32 fc_pause_off_auto(struct r8152 *tp) +{ + return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024); +} + +static void r8156_fc_parameter(struct r8152 *tp) +{ + u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp); + u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp); + + switch (tp->version) { + case RTL_VER_10: + case RTL_VER_11: + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8); + break; + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16); + break; + default: + break; + } +} + +static void rtl8156_change_mtu(struct r8152 *tp) +{ + u32 rx_max_size = mtu_to_size(tp->netdev->mtu); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rx_max_size); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); + r8156_fc_parameter(tp); + + /* TX share fifo free credit full threshold */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, + ALIGN(rx_max_size + sizeof(struct tx_desc), 1024) / 16); +} + +static void rtl8156_up(struct r8152 *tp) +{ + u32 ocp_data; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + r8153_aldps_en(tp, false); + + rxdy_gated_en(tp, true); + r8153_teredo_off(tp); + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data &= ~RCR_ACPT_ALL; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + rtl8152_nic_reset(tp); + rtl_reset_bmu(tp); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data &= ~NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data &= ~MCU_BORW_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + + rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); + + rtl8156_change_mtu(tp); + + switch (tp->version) { + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG); + ocp_data |= ACT_ODMA; + ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); + break; + default: + break; + } + + /* share FIFO settings */ + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL); + ocp_data &= ~RXFIFO_FULL_MASK; + ocp_data |= 0x08; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION); + ocp_data &= ~(RG_PWRDN_EN | ALL_SPEED_OFF); + ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, ocp_data); + + ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, 0x00600400); + + if (tp->saved_wolopts != __rtl_get_wol(tp)) { + netif_warn(tp, ifup, tp->netdev, "wol setting is changed\n"); + __rtl_set_wol(tp, tp->saved_wolopts); + } + + r8153_aldps_en(tp, true); + r8153_u2p3en(tp, true); + + if (tp->udev->speed >= USB_SPEED_SUPER) + r8153b_u1u2en(tp, true); +} + +static void rtl8156_down(struct r8152 *tp) +{ + u32 ocp_data; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + rtl_drop_queued_tx(tp); + return; + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data |= PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + r8153b_power_cut_en(tp, false); + r8153_aldps_en(tp, false); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data &= ~NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + rtl_disable(tp); + rtl_reset_bmu(tp); + + /* Clear teredo wake event. bit[15:8] is the teredo wakeup + * type. Set it to zero. bits[7:0] are the W1C bits about + * the events. Set them to all 1 to clear them. + */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data |= NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + rtl_rx_vlan_en(tp, true); + rxdy_gated_en(tp, false); + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data |= RCR_APM | RCR_AM | RCR_AB; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + r8153_aldps_en(tp, true); +} + static bool rtl8152_in_nway(struct r8152 *tp) { u16 nway_state; @@ -5094,7 +6550,7 @@ static void set_carrier(struct r8152 *tp) { struct net_device *netdev = tp->netdev; struct napi_struct *napi = &tp->napi; - u8 speed; + u16 speed; speed = rtl8152_get_speed(tp); @@ -5107,7 +6563,7 @@ static void set_carrier(struct r8152 *tp) rtl_start_rx(tp); clear_bit(RTL8152_SET_RX_MODE, &tp->flags); _rtl8152_set_rx_mode(netdev); - napi_enable(&tp->napi); + napi_enable(napi); netif_wake_queue(netdev); netif_info(tp, link, netdev, "carrier on\n"); } else if (netif_queue_stopped(netdev) && @@ -5468,14 +6924,9 @@ static void r8153_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); - /* MAC clock speed down */ - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); - r8153_power_cut_en(tp, false); rtl_runtime_suspend_enable(tp, false); + r8153_mac_clk_speed_down(tp, false); r8153_u1u2en(tp, true); usb_enable_lpm(tp->udev); @@ -5566,9 +7017,7 @@ static void r8153b_init(struct r8152 *tp) usb_enable_lpm(tp->udev); /* MAC clock speed down */ - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); - ocp_data |= MAC_CLK_SPDWN_EN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + r8153_mac_clk_speed_down(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; @@ -5595,6 +7044,1102 @@ static void r8153b_init(struct r8152 *tp) tp->coalesce = 15000; /* 15 us */ } +static void r8153c_init(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + int i; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + r8153b_u1u2en(tp, false); + + /* Disable spi_en */ + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); + ocp_data &= ~BIT(3); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data); + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcbf0); + ocp_data |= BIT(1); + ocp_write_word(tp, MCU_TYPE_USB, 0xcbf0, ocp_data); + + for (i = 0; i < 500; i++) { + if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & + AUTOLOAD_DONE) + break; + + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + } + + data = r8153_phy_status(tp, 0); + + data = r8152_mdio_read(tp, MII_BMCR); + if (data & BMCR_PDOWN) { + data &= ~BMCR_PDOWN; + r8152_mdio_write(tp, MII_BMCR, data); + } + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + + r8153_u2p3en(tp, false); + + /* MSC timer = 0xfff * 8ms = 32760 ms */ + ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); + + r8153b_power_cut_en(tp, false); + r8153c_ups_en(tp, false); + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); + + r8153b_u1u2en(tp, true); + + usb_enable_lpm(tp->udev); + + /* MAC clock speed down */ + r8153_mac_clk_speed_down(tp, true); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); + ocp_data &= ~BIT(7); + ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); + + set_bit(GREEN_ETHERNET, &tp->flags); + + /* rx aggregation */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + + rtl_tally_reset(tp); + + tp->coalesce = 15000; /* 15 us */ +} + +static void r8156_hw_phy_cfg(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); + if (ocp_data & PCUT_STATUS) { + ocp_data &= ~PCUT_STATUS; + ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); + } + + data = r8153_phy_status(tp, 0); + switch (data) { + case PHY_STAT_EXT_INIT: + rtl8152_apply_firmware(tp, true); + + data = ocp_reg_read(tp, 0xa468); + data &= ~(BIT(3) | BIT(1)); + ocp_reg_write(tp, 0xa468, data); + break; + case PHY_STAT_LAN_ON: + case PHY_STAT_PWRDN: + default: + rtl8152_apply_firmware(tp, false); + break; + } + + /* disable ALDPS before updating the PHY parameters */ + r8153_aldps_en(tp, false); + + /* disable EEE before updating the PHY parameters */ + rtl_eee_enable(tp, false); + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + WARN_ON_ONCE(data != PHY_STAT_LAN_ON); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); + ocp_data |= PFM_PWM_SWITCH; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + + switch (tp->version) { + case RTL_VER_10: + data = ocp_reg_read(tp, 0xad40); + data &= ~0x3ff; + data |= BIT(7) | BIT(2); + ocp_reg_write(tp, 0xad40, data); + + data = ocp_reg_read(tp, 0xad4e); + data |= BIT(4); + ocp_reg_write(tp, 0xad4e, data); + data = ocp_reg_read(tp, 0xad16); + data &= ~0x3ff; + data |= 0x6; + ocp_reg_write(tp, 0xad16, data); + data = ocp_reg_read(tp, 0xad32); + data &= ~0x3f; + data |= 6; + ocp_reg_write(tp, 0xad32, data); + data = ocp_reg_read(tp, 0xac08); + data &= ~(BIT(12) | BIT(8)); + ocp_reg_write(tp, 0xac08, data); + data = ocp_reg_read(tp, 0xac8a); + data |= BIT(12) | BIT(13) | BIT(14); + data &= ~BIT(15); + ocp_reg_write(tp, 0xac8a, data); + data = ocp_reg_read(tp, 0xad18); + data |= BIT(10); + ocp_reg_write(tp, 0xad18, data); + data = ocp_reg_read(tp, 0xad1a); + data |= 0x3ff; + ocp_reg_write(tp, 0xad1a, data); + data = ocp_reg_read(tp, 0xad1c); + data |= 0x3ff; + ocp_reg_write(tp, 0xad1c, data); + + data = sram_read(tp, 0x80ea); + data &= ~0xff00; + data |= 0xc400; + sram_write(tp, 0x80ea, data); + data = sram_read(tp, 0x80eb); + data &= ~0x0700; + data |= 0x0300; + sram_write(tp, 0x80eb, data); + data = sram_read(tp, 0x80f8); + data &= ~0xff00; + data |= 0x1c00; + sram_write(tp, 0x80f8, data); + data = sram_read(tp, 0x80f1); + data &= ~0xff00; + data |= 0x3000; + sram_write(tp, 0x80f1, data); + + data = sram_read(tp, 0x80fe); + data &= ~0xff00; + data |= 0xa500; + sram_write(tp, 0x80fe, data); + data = sram_read(tp, 0x8102); + data &= ~0xff00; + data |= 0x5000; + sram_write(tp, 0x8102, data); + data = sram_read(tp, 0x8015); + data &= ~0xff00; + data |= 0x3300; + sram_write(tp, 0x8015, data); + data = sram_read(tp, 0x8100); + data &= ~0xff00; + data |= 0x7000; + sram_write(tp, 0x8100, data); + data = sram_read(tp, 0x8014); + data &= ~0xff00; + data |= 0xf000; + sram_write(tp, 0x8014, data); + data = sram_read(tp, 0x8016); + data &= ~0xff00; + data |= 0x6500; + sram_write(tp, 0x8016, data); + data = sram_read(tp, 0x80dc); + data &= ~0xff00; + data |= 0xed00; + sram_write(tp, 0x80dc, data); + data = sram_read(tp, 0x80df); + data |= BIT(8); + sram_write(tp, 0x80df, data); + data = sram_read(tp, 0x80e1); + data &= ~BIT(8); + sram_write(tp, 0x80e1, data); + + data = ocp_reg_read(tp, 0xbf06); + data &= ~0x003f; + data |= 0x0038; + ocp_reg_write(tp, 0xbf06, data); + + sram_write(tp, 0x819f, 0xddb6); + + ocp_reg_write(tp, 0xbc34, 0x5555); + data = ocp_reg_read(tp, 0xbf0a); + data &= ~0x0e00; + data |= 0x0a00; + ocp_reg_write(tp, 0xbf0a, data); + + data = ocp_reg_read(tp, 0xbd2c); + data &= ~BIT(13); + ocp_reg_write(tp, 0xbd2c, data); + break; + case RTL_VER_11: + data = ocp_reg_read(tp, 0xad16); + data |= 0x3ff; + ocp_reg_write(tp, 0xad16, data); + data = ocp_reg_read(tp, 0xad32); + data &= ~0x3f; + data |= 6; + ocp_reg_write(tp, 0xad32, data); + data = ocp_reg_read(tp, 0xac08); + data &= ~(BIT(12) | BIT(8)); + ocp_reg_write(tp, 0xac08, data); + data = ocp_reg_read(tp, 0xacc0); + data &= ~0x3; + data |= BIT(1); + ocp_reg_write(tp, 0xacc0, data); + data = ocp_reg_read(tp, 0xad40); + data &= ~0xe7; + data |= BIT(6) | BIT(2); + ocp_reg_write(tp, 0xad40, data); + data = ocp_reg_read(tp, 0xac14); + data &= ~BIT(7); + ocp_reg_write(tp, 0xac14, data); + data = ocp_reg_read(tp, 0xac80); + data &= ~(BIT(8) | BIT(9)); + ocp_reg_write(tp, 0xac80, data); + data = ocp_reg_read(tp, 0xac5e); + data &= ~0x7; + data |= BIT(1); + ocp_reg_write(tp, 0xac5e, data); + ocp_reg_write(tp, 0xad4c, 0x00a8); + ocp_reg_write(tp, 0xac5c, 0x01ff); + data = ocp_reg_read(tp, 0xac8a); + data &= ~0xf0; + data |= BIT(4) | BIT(5); + ocp_reg_write(tp, 0xac8a, data); + ocp_reg_write(tp, 0xb87c, 0x8157); + data = ocp_reg_read(tp, 0xb87e); + data &= ~0xff00; + data |= 0x0500; + ocp_reg_write(tp, 0xb87e, data); + ocp_reg_write(tp, 0xb87c, 0x8159); + data = ocp_reg_read(tp, 0xb87e); + data &= ~0xff00; + data |= 0x0700; + ocp_reg_write(tp, 0xb87e, data); + + /* AAGC */ + ocp_reg_write(tp, 0xb87c, 0x80a2); + ocp_reg_write(tp, 0xb87e, 0x0153); + ocp_reg_write(tp, 0xb87c, 0x809c); + ocp_reg_write(tp, 0xb87e, 0x0153); + + /* EEE parameter */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS_2P5G, 0x0056); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_USB_CFG); + ocp_data |= EN_XG_LIP | EN_G_LIP; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); + + sram_write(tp, 0x8257, 0x020f); /* XG PLL */ + sram_write(tp, 0x80ea, 0x7843); /* GIGA Master */ + + if (rtl_phy_patch_request(tp, true, true)) + return; + + /* Advance EEE */ + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data |= EEE_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + + data = ocp_reg_read(tp, OCP_DOWN_SPEED); + data &= ~(EN_EEE_100 | EN_EEE_1000); + data |= EN_10M_CLKDIV; + ocp_reg_write(tp, OCP_DOWN_SPEED, data); + tp->ups_info._10m_ckdiv = true; + tp->ups_info.eee_plloff_100 = false; + tp->ups_info.eee_plloff_giga = false; + + data = ocp_reg_read(tp, OCP_POWER_CFG); + data &= ~EEE_CLKDIV_EN; + ocp_reg_write(tp, OCP_POWER_CFG, data); + tp->ups_info.eee_ckdiv = false; + + ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); + ocp_reg_write(tp, OCP_SYSCLK_CFG, sysclk_div_expo(5)); + tp->ups_info._250m_ckdiv = false; + + rtl_phy_patch_request(tp, false, true); + + /* enable ADC Ibias Cal */ + data = ocp_reg_read(tp, 0xd068); + data |= BIT(13); + ocp_reg_write(tp, 0xd068, data); + + /* enable Thermal Sensor */ + data = sram_read(tp, 0x81a2); + data &= ~BIT(8); + sram_write(tp, 0x81a2, data); + data = ocp_reg_read(tp, 0xb54c); + data &= ~0xff00; + data |= 0xdb00; + ocp_reg_write(tp, 0xb54c, data); + + /* Nway 2.5G Lite */ + data = ocp_reg_read(tp, 0xa454); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa454, data); + + /* CS DSP solution */ + data = ocp_reg_read(tp, OCP_10GBT_CTRL); + data |= RTL_ADV2_5G_F_R; + ocp_reg_write(tp, OCP_10GBT_CTRL, data); + data = ocp_reg_read(tp, 0xad4e); + data &= ~BIT(4); + ocp_reg_write(tp, 0xad4e, data); + data = ocp_reg_read(tp, 0xa86a); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa86a, data); + + /* MDI SWAP */ + if ((ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CFG) & MID_REVERSE) && + (ocp_reg_read(tp, 0xd068) & BIT(1))) { + u16 swap_a, swap_b; + + data = ocp_reg_read(tp, 0xd068); + data &= ~0x1f; + data |= 0x1; /* p0 */ + ocp_reg_write(tp, 0xd068, data); + swap_a = ocp_reg_read(tp, 0xd06a); + data &= ~0x18; + data |= 0x18; /* p3 */ + ocp_reg_write(tp, 0xd068, data); + swap_b = ocp_reg_read(tp, 0xd06a); + data &= ~0x18; /* p0 */ + ocp_reg_write(tp, 0xd068, data); + ocp_reg_write(tp, 0xd06a, + (swap_a & ~0x7ff) | (swap_b & 0x7ff)); + data |= 0x18; /* p3 */ + ocp_reg_write(tp, 0xd068, data); + ocp_reg_write(tp, 0xd06a, + (swap_b & ~0x7ff) | (swap_a & 0x7ff)); + data &= ~0x18; + data |= 0x08; /* p1 */ + ocp_reg_write(tp, 0xd068, data); + swap_a = ocp_reg_read(tp, 0xd06a); + data &= ~0x18; + data |= 0x10; /* p2 */ + ocp_reg_write(tp, 0xd068, data); + swap_b = ocp_reg_read(tp, 0xd06a); + data &= ~0x18; + data |= 0x08; /* p1 */ + ocp_reg_write(tp, 0xd068, data); + ocp_reg_write(tp, 0xd06a, + (swap_a & ~0x7ff) | (swap_b & 0x7ff)); + data &= ~0x18; + data |= 0x10; /* p2 */ + ocp_reg_write(tp, 0xd068, data); + ocp_reg_write(tp, 0xd06a, + (swap_b & ~0x7ff) | (swap_a & 0x7ff)); + swap_a = ocp_reg_read(tp, 0xbd5a); + swap_b = ocp_reg_read(tp, 0xbd5c); + ocp_reg_write(tp, 0xbd5a, (swap_a & ~0x1f1f) | + ((swap_b & 0x1f) << 8) | + ((swap_b >> 8) & 0x1f)); + ocp_reg_write(tp, 0xbd5c, (swap_b & ~0x1f1f) | + ((swap_a & 0x1f) << 8) | + ((swap_a >> 8) & 0x1f)); + swap_a = ocp_reg_read(tp, 0xbc18); + swap_b = ocp_reg_read(tp, 0xbc1a); + ocp_reg_write(tp, 0xbc18, (swap_a & ~0x1f1f) | + ((swap_b & 0x1f) << 8) | + ((swap_b >> 8) & 0x1f)); + ocp_reg_write(tp, 0xbc1a, (swap_b & ~0x1f1f) | + ((swap_a & 0x1f) << 8) | + ((swap_a >> 8) & 0x1f)); + } + break; + default: + break; + } + + rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); + + data = ocp_reg_read(tp, 0xa428); + data &= ~BIT(9); + ocp_reg_write(tp, 0xa428, data); + data = ocp_reg_read(tp, 0xa5ea); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa5ea, data); + tp->ups_info.lite_mode = 0; + + if (tp->eee_en) + rtl_eee_enable(tp, true); + + r8153_aldps_en(tp, true); + r8152b_enable_fc(tp); + r8153_u2p3en(tp, true); + + set_bit(PHY_RESET, &tp->flags); +} + +static void r8156b_hw_phy_cfg(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + + switch (tp->version) { + case RTL_VER_12: + ocp_reg_write(tp, 0xbf86, 0x9000); + data = ocp_reg_read(tp, 0xc402); + data |= BIT(10); + ocp_reg_write(tp, 0xc402, data); + data &= ~BIT(10); + ocp_reg_write(tp, 0xc402, data); + ocp_reg_write(tp, 0xbd86, 0x1010); + ocp_reg_write(tp, 0xbd88, 0x1010); + data = ocp_reg_read(tp, 0xbd4e); + data &= ~(BIT(10) | BIT(11)); + data |= BIT(11); + ocp_reg_write(tp, 0xbd4e, data); + data = ocp_reg_read(tp, 0xbf46); + data &= ~0xf00; + data |= 0x700; + ocp_reg_write(tp, 0xbf46, data); + break; + case RTL_VER_13: + case RTL_VER_15: + r8156b_wait_loading_flash(tp); + break; + default: + break; + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); + if (ocp_data & PCUT_STATUS) { + ocp_data &= ~PCUT_STATUS; + ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); + } + + data = r8153_phy_status(tp, 0); + switch (data) { + case PHY_STAT_EXT_INIT: + rtl8152_apply_firmware(tp, true); + + data = ocp_reg_read(tp, 0xa466); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa466, data); + + data = ocp_reg_read(tp, 0xa468); + data &= ~(BIT(3) | BIT(1)); + ocp_reg_write(tp, 0xa468, data); + break; + case PHY_STAT_LAN_ON: + case PHY_STAT_PWRDN: + default: + rtl8152_apply_firmware(tp, false); + break; + } + + data = r8152_mdio_read(tp, MII_BMCR); + if (data & BMCR_PDOWN) { + data &= ~BMCR_PDOWN; + r8152_mdio_write(tp, MII_BMCR, data); + } + + /* disable ALDPS before updating the PHY parameters */ + r8153_aldps_en(tp, false); + + /* disable EEE before updating the PHY parameters */ + rtl_eee_enable(tp, false); + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + WARN_ON_ONCE(data != PHY_STAT_LAN_ON); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); + ocp_data |= PFM_PWM_SWITCH; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + + switch (tp->version) { + case RTL_VER_12: + data = ocp_reg_read(tp, 0xbc08); + data |= BIT(3) | BIT(2); + ocp_reg_write(tp, 0xbc08, data); + + data = sram_read(tp, 0x8fff); + data &= ~0xff00; + data |= 0x0400; + sram_write(tp, 0x8fff, data); + + data = ocp_reg_read(tp, 0xacda); + data |= 0xff00; + ocp_reg_write(tp, 0xacda, data); + data = ocp_reg_read(tp, 0xacde); + data |= 0xf000; + ocp_reg_write(tp, 0xacde, data); + ocp_reg_write(tp, 0xac8c, 0x0ffc); + ocp_reg_write(tp, 0xac46, 0xb7b4); + ocp_reg_write(tp, 0xac50, 0x0fbc); + ocp_reg_write(tp, 0xac3c, 0x9240); + ocp_reg_write(tp, 0xac4e, 0x0db4); + ocp_reg_write(tp, 0xacc6, 0x0707); + ocp_reg_write(tp, 0xacc8, 0xa0d3); + ocp_reg_write(tp, 0xad08, 0x0007); + + ocp_reg_write(tp, 0xb87c, 0x8560); + ocp_reg_write(tp, 0xb87e, 0x19cc); + ocp_reg_write(tp, 0xb87c, 0x8562); + ocp_reg_write(tp, 0xb87e, 0x19cc); + ocp_reg_write(tp, 0xb87c, 0x8564); + ocp_reg_write(tp, 0xb87e, 0x19cc); + ocp_reg_write(tp, 0xb87c, 0x8566); + ocp_reg_write(tp, 0xb87e, 0x147d); + ocp_reg_write(tp, 0xb87c, 0x8568); + ocp_reg_write(tp, 0xb87e, 0x147d); + ocp_reg_write(tp, 0xb87c, 0x856a); + ocp_reg_write(tp, 0xb87e, 0x147d); + ocp_reg_write(tp, 0xb87c, 0x8ffe); + ocp_reg_write(tp, 0xb87e, 0x0907); + ocp_reg_write(tp, 0xb87c, 0x80d6); + ocp_reg_write(tp, 0xb87e, 0x2801); + ocp_reg_write(tp, 0xb87c, 0x80f2); + ocp_reg_write(tp, 0xb87e, 0x2801); + ocp_reg_write(tp, 0xb87c, 0x80f4); + ocp_reg_write(tp, 0xb87e, 0x6077); + ocp_reg_write(tp, 0xb506, 0x01e7); + + ocp_reg_write(tp, 0xb87c, 0x8013); + ocp_reg_write(tp, 0xb87e, 0x0700); + ocp_reg_write(tp, 0xb87c, 0x8fb9); + ocp_reg_write(tp, 0xb87e, 0x2801); + ocp_reg_write(tp, 0xb87c, 0x8fba); + ocp_reg_write(tp, 0xb87e, 0x0100); + ocp_reg_write(tp, 0xb87c, 0x8fbc); + ocp_reg_write(tp, 0xb87e, 0x1900); + ocp_reg_write(tp, 0xb87c, 0x8fbe); + ocp_reg_write(tp, 0xb87e, 0xe100); + ocp_reg_write(tp, 0xb87c, 0x8fc0); + ocp_reg_write(tp, 0xb87e, 0x0800); + ocp_reg_write(tp, 0xb87c, 0x8fc2); + ocp_reg_write(tp, 0xb87e, 0xe500); + ocp_reg_write(tp, 0xb87c, 0x8fc4); + ocp_reg_write(tp, 0xb87e, 0x0f00); + ocp_reg_write(tp, 0xb87c, 0x8fc6); + ocp_reg_write(tp, 0xb87e, 0xf100); + ocp_reg_write(tp, 0xb87c, 0x8fc8); + ocp_reg_write(tp, 0xb87e, 0x0400); + ocp_reg_write(tp, 0xb87c, 0x8fca); + ocp_reg_write(tp, 0xb87e, 0xf300); + ocp_reg_write(tp, 0xb87c, 0x8fcc); + ocp_reg_write(tp, 0xb87e, 0xfd00); + ocp_reg_write(tp, 0xb87c, 0x8fce); + ocp_reg_write(tp, 0xb87e, 0xff00); + ocp_reg_write(tp, 0xb87c, 0x8fd0); + ocp_reg_write(tp, 0xb87e, 0xfb00); + ocp_reg_write(tp, 0xb87c, 0x8fd2); + ocp_reg_write(tp, 0xb87e, 0x0100); + ocp_reg_write(tp, 0xb87c, 0x8fd4); + ocp_reg_write(tp, 0xb87e, 0xf400); + ocp_reg_write(tp, 0xb87c, 0x8fd6); + ocp_reg_write(tp, 0xb87e, 0xff00); + ocp_reg_write(tp, 0xb87c, 0x8fd8); + ocp_reg_write(tp, 0xb87e, 0xf600); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG); + ocp_data |= EN_XG_LIP | EN_G_LIP; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); + ocp_reg_write(tp, 0xb87c, 0x813d); + ocp_reg_write(tp, 0xb87e, 0x390e); + ocp_reg_write(tp, 0xb87c, 0x814f); + ocp_reg_write(tp, 0xb87e, 0x790e); + ocp_reg_write(tp, 0xb87c, 0x80b0); + ocp_reg_write(tp, 0xb87e, 0x0f31); + data = ocp_reg_read(tp, 0xbf4c); + data |= BIT(1); + ocp_reg_write(tp, 0xbf4c, data); + data = ocp_reg_read(tp, 0xbcca); + data |= BIT(9) | BIT(8); + ocp_reg_write(tp, 0xbcca, data); + ocp_reg_write(tp, 0xb87c, 0x8141); + ocp_reg_write(tp, 0xb87e, 0x320e); + ocp_reg_write(tp, 0xb87c, 0x8153); + ocp_reg_write(tp, 0xb87e, 0x720e); + ocp_reg_write(tp, 0xb87c, 0x8529); + ocp_reg_write(tp, 0xb87e, 0x050e); + data = ocp_reg_read(tp, OCP_EEE_CFG); + data &= ~CTAP_SHORT_EN; + ocp_reg_write(tp, OCP_EEE_CFG, data); + + sram_write(tp, 0x816c, 0xc4a0); + sram_write(tp, 0x8170, 0xc4a0); + sram_write(tp, 0x8174, 0x04a0); + sram_write(tp, 0x8178, 0x04a0); + sram_write(tp, 0x817c, 0x0719); + sram_write(tp, 0x8ff4, 0x0400); + sram_write(tp, 0x8ff1, 0x0404); + + ocp_reg_write(tp, 0xbf4a, 0x001b); + ocp_reg_write(tp, 0xb87c, 0x8033); + ocp_reg_write(tp, 0xb87e, 0x7c13); + ocp_reg_write(tp, 0xb87c, 0x8037); + ocp_reg_write(tp, 0xb87e, 0x7c13); + ocp_reg_write(tp, 0xb87c, 0x803b); + ocp_reg_write(tp, 0xb87e, 0xfc32); + ocp_reg_write(tp, 0xb87c, 0x803f); + ocp_reg_write(tp, 0xb87e, 0x7c13); + ocp_reg_write(tp, 0xb87c, 0x8043); + ocp_reg_write(tp, 0xb87e, 0x7c13); + ocp_reg_write(tp, 0xb87c, 0x8047); + ocp_reg_write(tp, 0xb87e, 0x7c13); + + ocp_reg_write(tp, 0xb87c, 0x8145); + ocp_reg_write(tp, 0xb87e, 0x370e); + ocp_reg_write(tp, 0xb87c, 0x8157); + ocp_reg_write(tp, 0xb87e, 0x770e); + ocp_reg_write(tp, 0xb87c, 0x8169); + ocp_reg_write(tp, 0xb87e, 0x0d0a); + ocp_reg_write(tp, 0xb87c, 0x817b); + ocp_reg_write(tp, 0xb87e, 0x1d0a); + + data = sram_read(tp, 0x8217); + data &= ~0xff00; + data |= 0x5000; + sram_write(tp, 0x8217, data); + data = sram_read(tp, 0x821a); + data &= ~0xff00; + data |= 0x5000; + sram_write(tp, 0x821a, data); + sram_write(tp, 0x80da, 0x0403); + data = sram_read(tp, 0x80dc); + data &= ~0xff00; + data |= 0x1000; + sram_write(tp, 0x80dc, data); + sram_write(tp, 0x80b3, 0x0384); + sram_write(tp, 0x80b7, 0x2007); + data = sram_read(tp, 0x80ba); + data &= ~0xff00; + data |= 0x6c00; + sram_write(tp, 0x80ba, data); + sram_write(tp, 0x80b5, 0xf009); + data = sram_read(tp, 0x80bd); + data &= ~0xff00; + data |= 0x9f00; + sram_write(tp, 0x80bd, data); + sram_write(tp, 0x80c7, 0xf083); + sram_write(tp, 0x80dd, 0x03f0); + data = sram_read(tp, 0x80df); + data &= ~0xff00; + data |= 0x1000; + sram_write(tp, 0x80df, data); + sram_write(tp, 0x80cb, 0x2007); + data = sram_read(tp, 0x80ce); + data &= ~0xff00; + data |= 0x6c00; + sram_write(tp, 0x80ce, data); + sram_write(tp, 0x80c9, 0x8009); + data = sram_read(tp, 0x80d1); + data &= ~0xff00; + data |= 0x8000; + sram_write(tp, 0x80d1, data); + sram_write(tp, 0x80a3, 0x200a); + sram_write(tp, 0x80a5, 0xf0ad); + sram_write(tp, 0x809f, 0x6073); + sram_write(tp, 0x80a1, 0x000b); + data = sram_read(tp, 0x80a9); + data &= ~0xff00; + data |= 0xc000; + sram_write(tp, 0x80a9, data); + + if (rtl_phy_patch_request(tp, true, true)) + return; + + data = ocp_reg_read(tp, 0xb896); + data &= ~BIT(0); + ocp_reg_write(tp, 0xb896, data); + data = ocp_reg_read(tp, 0xb892); + data &= ~0xff00; + ocp_reg_write(tp, 0xb892, data); + ocp_reg_write(tp, 0xb88e, 0xc23e); + ocp_reg_write(tp, 0xb890, 0x0000); + ocp_reg_write(tp, 0xb88e, 0xc240); + ocp_reg_write(tp, 0xb890, 0x0103); + ocp_reg_write(tp, 0xb88e, 0xc242); + ocp_reg_write(tp, 0xb890, 0x0507); + ocp_reg_write(tp, 0xb88e, 0xc244); + ocp_reg_write(tp, 0xb890, 0x090b); + ocp_reg_write(tp, 0xb88e, 0xc246); + ocp_reg_write(tp, 0xb890, 0x0c0e); + ocp_reg_write(tp, 0xb88e, 0xc248); + ocp_reg_write(tp, 0xb890, 0x1012); + ocp_reg_write(tp, 0xb88e, 0xc24a); + ocp_reg_write(tp, 0xb890, 0x1416); + data = ocp_reg_read(tp, 0xb896); + data |= BIT(0); + ocp_reg_write(tp, 0xb896, data); + + rtl_phy_patch_request(tp, false, true); + + data = ocp_reg_read(tp, 0xa86a); + data |= BIT(0); + ocp_reg_write(tp, 0xa86a, data); + data = ocp_reg_read(tp, 0xa6f0); + data |= BIT(0); + ocp_reg_write(tp, 0xa6f0, data); + + ocp_reg_write(tp, 0xbfa0, 0xd70d); + ocp_reg_write(tp, 0xbfa2, 0x4100); + ocp_reg_write(tp, 0xbfa4, 0xe868); + ocp_reg_write(tp, 0xbfa6, 0xdc59); + ocp_reg_write(tp, 0xb54c, 0x3c18); + data = ocp_reg_read(tp, 0xbfa4); + data &= ~BIT(5); + ocp_reg_write(tp, 0xbfa4, data); + data = sram_read(tp, 0x817d); + data |= BIT(12); + sram_write(tp, 0x817d, data); + break; + case RTL_VER_13: + /* 2.5G INRX */ + data = ocp_reg_read(tp, 0xac46); + data &= ~0x00f0; + data |= 0x0090; + ocp_reg_write(tp, 0xac46, data); + data = ocp_reg_read(tp, 0xad30); + data &= ~0x0003; + data |= 0x0001; + ocp_reg_write(tp, 0xad30, data); + fallthrough; + case RTL_VER_15: + /* EEE parameter */ + ocp_reg_write(tp, 0xb87c, 0x80f5); + ocp_reg_write(tp, 0xb87e, 0x760e); + ocp_reg_write(tp, 0xb87c, 0x8107); + ocp_reg_write(tp, 0xb87e, 0x360e); + ocp_reg_write(tp, 0xb87c, 0x8551); + data = ocp_reg_read(tp, 0xb87e); + data &= ~0xff00; + data |= 0x0800; + ocp_reg_write(tp, 0xb87e, data); + + /* ADC_PGA parameter */ + data = ocp_reg_read(tp, 0xbf00); + data &= ~0xe000; + data |= 0xa000; + ocp_reg_write(tp, 0xbf00, data); + data = ocp_reg_read(tp, 0xbf46); + data &= ~0x0f00; + data |= 0x0300; + ocp_reg_write(tp, 0xbf46, data); + + /* Green Table-PGA, 1G full viterbi */ + sram_write(tp, 0x8044, 0x2417); + sram_write(tp, 0x804a, 0x2417); + sram_write(tp, 0x8050, 0x2417); + sram_write(tp, 0x8056, 0x2417); + sram_write(tp, 0x805c, 0x2417); + sram_write(tp, 0x8062, 0x2417); + sram_write(tp, 0x8068, 0x2417); + sram_write(tp, 0x806e, 0x2417); + sram_write(tp, 0x8074, 0x2417); + sram_write(tp, 0x807a, 0x2417); + + /* XG PLL */ + data = ocp_reg_read(tp, 0xbf84); + data &= ~0xe000; + data |= 0xa000; + ocp_reg_write(tp, 0xbf84, data); + break; + default: + break; + } + + if (rtl_phy_patch_request(tp, true, true)) + return; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data |= EEE_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + + data = ocp_reg_read(tp, OCP_DOWN_SPEED); + data &= ~(EN_EEE_100 | EN_EEE_1000); + data |= EN_10M_CLKDIV; + ocp_reg_write(tp, OCP_DOWN_SPEED, data); + tp->ups_info._10m_ckdiv = true; + tp->ups_info.eee_plloff_100 = false; + tp->ups_info.eee_plloff_giga = false; + + data = ocp_reg_read(tp, OCP_POWER_CFG); + data &= ~EEE_CLKDIV_EN; + ocp_reg_write(tp, OCP_POWER_CFG, data); + tp->ups_info.eee_ckdiv = false; + + rtl_phy_patch_request(tp, false, true); + + rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); + + data = ocp_reg_read(tp, 0xa428); + data &= ~BIT(9); + ocp_reg_write(tp, 0xa428, data); + data = ocp_reg_read(tp, 0xa5ea); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa5ea, data); + tp->ups_info.lite_mode = 0; + + if (tp->eee_en) + rtl_eee_enable(tp, true); + + r8153_aldps_en(tp, true); + r8152b_enable_fc(tp); + r8153_u2p3en(tp, true); + + set_bit(PHY_RESET, &tp->flags); +} + +static void r8156_init(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + int i; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); + ocp_data &= ~EN_ALL_SPEED; + ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); + + ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); + ocp_data |= BYPASS_MAC_RESET; + ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); + + r8153b_u1u2en(tp, false); + + for (i = 0; i < 500; i++) { + if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & + AUTOLOAD_DONE) + break; + + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + } + + data = r8153_phy_status(tp, 0); + if (data == PHY_STAT_EXT_INIT) { + data = ocp_reg_read(tp, 0xa468); + data &= ~(BIT(3) | BIT(1)); + ocp_reg_write(tp, 0xa468, data); + } + + data = r8152_mdio_read(tp, MII_BMCR); + if (data & BMCR_PDOWN) { + data &= ~BMCR_PDOWN; + r8152_mdio_write(tp, MII_BMCR, data); + } + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + WARN_ON_ONCE(data != PHY_STAT_LAN_ON); + + r8153_u2p3en(tp, false); + + /* MSC timer = 0xfff * 8ms = 32760 ms */ + ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); + + /* U1/U2/L1 idle timer. 500 us */ + ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); + + r8153b_power_cut_en(tp, false); + r8156_ups_en(tp, false); + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); + + if (tp->udev->speed >= USB_SPEED_SUPER) + r8153b_u1u2en(tp, true); + + usb_enable_lpm(tp->udev); + + r8156_mac_clk_spd(tp, true); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); + + set_bit(GREEN_ETHERNET, &tp->flags); + + /* rx aggregation */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG); + ocp_data |= ACT_ODMA; + ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); + + rtl_tally_reset(tp); + + tp->coalesce = 15000; /* 15 us */ +} + +static void r8156b_init(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + int i; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); + ocp_data &= ~EN_ALL_SPEED; + ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); + + ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); + ocp_data |= BYPASS_MAC_RESET; + ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); + ocp_data |= RX_DETECT8; + ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); + + r8153b_u1u2en(tp, false); + + switch (tp->version) { + case RTL_VER_13: + case RTL_VER_15: + r8156b_wait_loading_flash(tp); + break; + default: + break; + } + + for (i = 0; i < 500; i++) { + if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & + AUTOLOAD_DONE) + break; + + msleep(20); + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + } + + data = r8153_phy_status(tp, 0); + if (data == PHY_STAT_EXT_INIT) { + data = ocp_reg_read(tp, 0xa468); + data &= ~(BIT(3) | BIT(1)); + ocp_reg_write(tp, 0xa468, data); + + data = ocp_reg_read(tp, 0xa466); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa466, data); + } + + data = r8152_mdio_read(tp, MII_BMCR); + if (data & BMCR_PDOWN) { + data &= ~BMCR_PDOWN; + r8152_mdio_write(tp, MII_BMCR, data); + } + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + + r8153_u2p3en(tp, false); + + /* MSC timer = 0xfff * 8ms = 32760 ms */ + ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); + + /* U1/U2/L1 idle timer. 500 us */ + ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); + + r8153b_power_cut_en(tp, false); + r8156_ups_en(tp, false); + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); + + if (tp->udev->speed >= USB_SPEED_SUPER) + r8153b_u1u2en(tp, true); + + usb_enable_lpm(tp->udev); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data &= ~SLOT_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); + ocp_data |= FLOW_CTRL_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + + /* enable fc timer and set timer to 600 ms. */ + ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER, + CTRL_TIMER_EN | (600 / 8)); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); + if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & DACK_DET_EN)) + ocp_data |= FLOW_CTRL_PATCH_2; + ocp_data &= ~AUTO_SPEEDUP; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); + ocp_data |= FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + + r8156_mac_clk_spd(tp, true); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); + + set_bit(GREEN_ETHERNET, &tp->flags); + + /* rx aggregation */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + + rtl_tally_reset(tp); + + tp->coalesce = 15000; /* 15 us */ +} + +static bool rtl_vendor_mode(struct usb_interface *intf) +{ + struct usb_host_interface *alt = intf->cur_altsetting; + struct usb_device *udev; + struct usb_host_config *c; + int i, num_configs; + + if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) + return true; + + /* The vendor mode is not always config #1, so to find it out. */ + udev = interface_to_usbdev(intf); + c = udev->config; + num_configs = udev->descriptor.bNumConfigurations; + for (i = 0; i < num_configs; (i++, c++)) { + struct usb_interface_descriptor *desc = NULL; + + if (c->desc.bNumInterfaces > 0) + desc = &c->intf_cache[0]->altsetting->desc; + else + continue; + + if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) { + usb_driver_set_configuration(udev, c->desc.bConfigurationValue); + break; + } + } + + WARN_ON_ONCE(i == num_configs); + + return false; +} + static int rtl8152_pre_reset(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); @@ -5958,6 +8503,22 @@ int rtl8152_get_link_ksettings(struct net_device *netdev, mii_ethtool_get_link_ksettings(&tp->mii, cmd); + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + cmd->link_modes.supported, tp->support_2500full); + + if (tp->support_2500full) { + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + cmd->link_modes.advertising, + ocp_reg_read(tp, OCP_10GBT_CTRL) & MDIO_AN_10GBT_CTRL_ADV2_5G); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + cmd->link_modes.lp_advertising, + ocp_reg_read(tp, OCP_10GBT_STAT) & MDIO_AN_10GBT_STAT_LP2_5G); + + if (is_speed_2500(rtl8152_get_speed(tp))) + cmd->base.speed = SPEED_2500; + } + mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); @@ -6001,6 +8562,10 @@ static int rtl8152_set_link_ksettings(struct net_device *dev, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_1000_FULL; + if (test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_2500_FULL; + mutex_lock(&tp->control); ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed, @@ -6459,12 +9024,21 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) dev->mtu = new_mtu; if (netif_running(dev)) { - u32 rms = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; - - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms); + if (tp->rtl_ops.change_mtu) + tp->rtl_ops.change_mtu(tp); - if (netif_carrier_ok(dev)) - r8153_set_rx_early_size(tp); + if (netif_carrier_ok(dev)) { + netif_stop_queue(dev); + napi_disable(&tp->napi); + tasklet_disable(&tp->tx_tl); + tp->rtl_ops.disable(tp); + tp->rtl_ops.enable(tp); + rtl_start_rx(tp); + tasklet_enable(&tp->tx_tl); + napi_enable(&tp->napi); + rtl8152_set_rx_mode(dev); + netif_wake_queue(dev); + } } mutex_unlock(&tp->control); @@ -6553,6 +9127,7 @@ static int rtl_ops_init(struct r8152 *tp) ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; + ops->change_mtu = rtl8153_change_mtu; if (tp->udev->speed < USB_SPEED_SUPER) tp->rx_buf_sz = 16 * 1024; else @@ -6574,6 +9149,68 @@ static int rtl_ops_init(struct r8152 *tp) ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153b_hw_phy_cfg; ops->autosuspend_en = rtl8153b_runtime_enable; + ops->change_mtu = rtl8153_change_mtu; + tp->rx_buf_sz = 32 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; + break; + + case RTL_VER_11: + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; + fallthrough; + case RTL_VER_10: + ops->init = r8156_init; + ops->enable = rtl8156_enable; + ops->disable = rtl8153_disable; + ops->up = rtl8156_up; + ops->down = rtl8156_down; + ops->unload = rtl8153_unload; + ops->eee_get = r8153_get_eee; + ops->eee_set = r8152_set_eee; + ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8156_hw_phy_cfg; + ops->autosuspend_en = rtl8156_runtime_enable; + ops->change_mtu = rtl8156_change_mtu; + tp->rx_buf_sz = 48 * 1024; + tp->support_2500full = 1; + break; + + case RTL_VER_12: + case RTL_VER_13: + tp->support_2500full = 1; + fallthrough; + case RTL_VER_15: + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; + ops->init = r8156b_init; + ops->enable = rtl8156b_enable; + ops->disable = rtl8153_disable; + ops->up = rtl8156_up; + ops->down = rtl8156_down; + ops->unload = rtl8153_unload; + ops->eee_get = r8153_get_eee; + ops->eee_set = r8152_set_eee; + ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8156b_hw_phy_cfg; + ops->autosuspend_en = rtl8156_runtime_enable; + ops->change_mtu = rtl8156_change_mtu; + tp->rx_buf_sz = 48 * 1024; + break; + + case RTL_VER_14: + ops->init = r8153c_init; + ops->enable = rtl8153_enable; + ops->disable = rtl8153_disable; + ops->up = rtl8153c_up; + ops->down = rtl8153b_down; + ops->unload = rtl8153_unload; + ops->eee_get = r8153_get_eee; + ops->eee_set = r8152_set_eee; + ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8153c_hw_phy_cfg; + ops->autosuspend_en = rtl8153c_runtime_enable; + ops->change_mtu = rtl8153c_change_mtu; tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; @@ -6592,11 +9229,17 @@ static int rtl_ops_init(struct r8152 *tp) #define FIRMWARE_8153A_3 "rtl_nic/rtl8153a-3.fw" #define FIRMWARE_8153A_4 "rtl_nic/rtl8153a-4.fw" #define FIRMWARE_8153B_2 "rtl_nic/rtl8153b-2.fw" +#define FIRMWARE_8153C_1 "rtl_nic/rtl8153c-1.fw" +#define FIRMWARE_8156A_2 "rtl_nic/rtl8156a-2.fw" +#define FIRMWARE_8156B_2 "rtl_nic/rtl8156b-2.fw" MODULE_FIRMWARE(FIRMWARE_8153A_2); MODULE_FIRMWARE(FIRMWARE_8153A_3); MODULE_FIRMWARE(FIRMWARE_8153A_4); MODULE_FIRMWARE(FIRMWARE_8153B_2); +MODULE_FIRMWARE(FIRMWARE_8153C_1); +MODULE_FIRMWARE(FIRMWARE_8156A_2); +MODULE_FIRMWARE(FIRMWARE_8156B_2); static int rtl_fw_init(struct r8152 *tp) { @@ -6622,6 +9265,19 @@ static int rtl_fw_init(struct r8152 *tp) rtl_fw->pre_fw = r8153b_pre_firmware_1; rtl_fw->post_fw = r8153b_post_firmware_1; break; + case RTL_VER_11: + rtl_fw->fw_name = FIRMWARE_8156A_2; + rtl_fw->post_fw = r8156a_post_firmware_1; + break; + case RTL_VER_13: + case RTL_VER_15: + rtl_fw->fw_name = FIRMWARE_8156B_2; + break; + case RTL_VER_14: + rtl_fw->fw_name = FIRMWARE_8153C_1; + rtl_fw->pre_fw = r8153b_pre_firmware_1; + rtl_fw->post_fw = r8153c_post_firmware_1; + break; default: break; } @@ -6677,6 +9333,27 @@ u8 rtl8152_get_version(struct usb_interface *intf) case 0x6010: version = RTL_VER_09; break; + case 0x7010: + version = RTL_TEST_01; + break; + case 0x7020: + version = RTL_VER_10; + break; + case 0x7030: + version = RTL_VER_11; + break; + case 0x7400: + version = RTL_VER_12; + break; + case 0x7410: + version = RTL_VER_13; + break; + case 0x6400: + version = RTL_VER_14; + break; + case 0x7420: + version = RTL_VER_15; + break; default: version = RTL_VER_UNKNOWN; dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data); @@ -6701,10 +9378,8 @@ static int rtl8152_probe(struct usb_interface *intf, if (version == RTL_VER_UNKNOWN) return -ENODEV; - if (udev->actconfig->desc.bConfigurationValue != 1) { - usb_driver_set_configuration(udev, 1); + if (!rtl_vendor_mode(intf)) return -ENODEV; - } if (intf->cur_altsetting->desc.bNumEndpoints < 3) return -ENODEV; @@ -6789,12 +9464,29 @@ static int rtl8152_probe(struct usb_interface *intf, /* MTU range: 68 - 1500 or 9194 */ netdev->min_mtu = ETH_MIN_MTU; switch (tp->version) { + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + case RTL_VER_08: + case RTL_VER_09: + case RTL_VER_14: + netdev->max_mtu = size_to_mtu(9 * 1024); + break; + case RTL_VER_10: + case RTL_VER_11: + netdev->max_mtu = size_to_mtu(15 * 1024); + break; + case RTL_VER_12: + case RTL_VER_13: + case RTL_VER_15: + netdev->max_mtu = size_to_mtu(16 * 1024); + break; case RTL_VER_01: case RTL_VER_02: - netdev->max_mtu = ETH_DATA_LEN; - break; + case RTL_VER_07: default: - netdev->max_mtu = RTL8153_MAX_MTU; + netdev->max_mtu = ETH_DATA_LEN; break; } @@ -6810,7 +9502,13 @@ static int rtl8152_probe(struct usb_interface *intf, tp->advertising = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; if (tp->mii.supports_gmii) { - tp->speed = SPEED_1000; + if (tp->support_2500full && + tp->udev->speed >= USB_SPEED_SUPER) { + tp->speed = SPEED_2500; + tp->advertising |= RTL_ADVERTISED_2500_FULL; + } else { + tp->speed = SPEED_1000; + } tp->advertising |= RTL_ADVERTISED_1000_FULL; } tp->duplex = DUPLEX_FULL; @@ -6834,7 +9532,11 @@ static int rtl8152_probe(struct usb_interface *intf, set_ethernet_addr(tp); usb_set_intfdata(intf, tp); - netif_napi_add(netdev, &tp->napi, r8152_poll, RTL8152_NAPI_WEIGHT); + + if (tp->support_2500full) + netif_napi_add(netdev, &tp->napi, r8152_poll, 256); + else + netif_napi_add(netdev, &tp->napi, r8152_poll, 64); ret = register_netdev(netdev); if (ret != 0) { @@ -6870,7 +9572,8 @@ static void rtl8152_disconnect(struct usb_interface *intf) unregister_netdev(tp->netdev); tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); - tp->rtl_ops.unload(tp); + if (tp->rtl_ops.unload) + tp->rtl_ops.unload(tp); rtl8152_release_firmware(tp); free_netdev(tp->netdev); } @@ -6890,13 +9593,28 @@ static void rtl8152_disconnect(struct usb_interface *intf) .idProduct = (prod), \ .bInterfaceClass = USB_CLASS_COMM, \ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE \ +}, \ +{ \ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | \ + USB_DEVICE_ID_MATCH_DEVICE, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE /* table of devices that work with this driver */ static const struct usb_device_id rtl8152_table[] = { + /* Realtek */ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050)}, + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8053)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8155)}, + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8156)}, + + /* Microsoft */ {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 15b2e3923c47..bdb7ce3cb054 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -486,11 +486,10 @@ static int veth_xdp_xmit(struct net_device *dev, int n, rcv_priv = netdev_priv(rcv); rq = &rcv_priv->rq[veth_select_rxq(rcv)]; - /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive - * side. This means an XDP program is loaded on the peer and the peer - * device is up. + /* The napi pointer is set if NAPI is enabled, which ensures that + * xdp_ring is initialized on receive side and the peer device is up. */ - if (!rcu_access_pointer(rq->xdp_prog)) + if (!rcu_access_pointer(rq->napi)) goto out; max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 101659cd4b87..8cd76037c724 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -379,21 +379,17 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, unsigned int len, unsigned int truesize, - bool hdr_valid, unsigned int metasize) + bool hdr_valid, unsigned int metasize, + unsigned int headroom) { struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; unsigned int copy, hdr_len, hdr_padded_len; - char *p; + int tailroom, shinfo_size; + char *p, *hdr_p; p = page_address(page) + offset; - - /* copy small packet so we can reuse these pages for small data */ - skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); - if (unlikely(!skb)) - return NULL; - - hdr = skb_vnet_hdr(skb); + hdr_p = p; hdr_len = vi->hdr_len; if (vi->mergeable_rx_bufs) @@ -401,14 +397,38 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, else hdr_padded_len = sizeof(struct padded_vnet_hdr); - /* hdr_valid means no XDP, so we can copy the vnet header */ - if (hdr_valid) - memcpy(hdr, p, hdr_len); + /* If headroom is not 0, there is an offset between the beginning of the + * data and the allocated space, otherwise the data and the allocated + * space are aligned. + */ + if (headroom) { + /* The actual allocated space size is PAGE_SIZE. */ + truesize = PAGE_SIZE; + tailroom = truesize - len - offset; + } else { + tailroom = truesize - len; + } len -= hdr_len; offset += hdr_padded_len; p += hdr_padded_len; + shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + if (len > GOOD_COPY_LEN && tailroom >= shinfo_size) { + skb = build_skb(p, truesize); + if (unlikely(!skb)) + return NULL; + + skb_put(skb, len); + goto ok; + } + + /* copy small packet so we can reuse these pages for small data */ + skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); + if (unlikely(!skb)) + return NULL; + /* Copy all frame if it fits skb->head, otherwise * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. */ @@ -418,11 +438,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, copy = ETH_HLEN + metasize; skb_put_data(skb, p, copy); - if (metasize) { - __skb_pull(skb, metasize); - skb_metadata_set(skb, metasize); - } - len -= copy; offset += copy; @@ -431,7 +446,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, skb_add_rx_frag(skb, 0, page, offset, len, truesize); else put_page(page); - return skb; + goto ok; } /* @@ -458,6 +473,18 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, if (page) give_pages(rq, page); +ok: + /* hdr_valid means no XDP, so we can copy the vnet header */ + if (hdr_valid) { + hdr = skb_vnet_hdr(skb); + memcpy(hdr, hdr_p, hdr_len); + } + + if (metasize) { + __skb_pull(skb, metasize); + skb_metadata_set(skb, metasize); + } + return skb; } @@ -808,7 +835,7 @@ static struct sk_buff *receive_big(struct net_device *dev, { struct page *page = buf; struct sk_buff *skb = - page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0); + page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0); stats->bytes += len - vi->hdr_len; if (unlikely(!skb)) @@ -922,7 +949,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(page); head_skb = page_to_skb(vi, rq, xdp_page, offset, len, PAGE_SIZE, false, - metasize); + metasize, headroom); return head_skb; } break; @@ -980,7 +1007,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, - metasize); + metasize, headroom); curr_skb = head_skb; if (unlikely(!curr_skb)) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 6d9130859c55..503e2fd7ce51 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -471,9 +471,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, skb_dst_drop(skb); - /* if dst.dev is loopback or the VRF device again this is locally - * originated traffic destined to a local address. Short circuit - * to Rx path + /* if dst.dev is the VRF device again this is locally originated traffic + * destined to a local address. Short circuit to Rx path. */ if (dst->dev == dev) return vrf_local_xmit(skb, dev, dst); @@ -547,9 +546,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, skb_dst_drop(skb); - /* if dst.dev is loopback or the VRF device again this is locally - * originated traffic destined to a local address. Short circuit - * to Rx path + /* if dst.dev is the VRF device again this is locally originated traffic + * destined to a local address. Short circuit to Rx path. */ if (rt->dst.dev == vrf_dev) return vrf_local_xmit(skb, vrf_dev, &rt->dst); diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index d66593f0950f..ea00fbb15601 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1759,17 +1759,11 @@ err_core_destroy: return ret; } -static int ath10k_snoc_remove(struct platform_device *pdev) +static int ath10k_snoc_free_resources(struct ath10k *ar) { - struct ath10k *ar = platform_get_drvdata(pdev); struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); - ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n"); - - reinit_completion(&ar->driver_recovery); - - if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags)) - wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ); + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc free resources\n"); set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags); @@ -1783,12 +1777,29 @@ static int ath10k_snoc_remove(struct platform_device *pdev) return 0; } +static int ath10k_snoc_remove(struct platform_device *pdev) +{ + struct ath10k *ar = platform_get_drvdata(pdev); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n"); + + reinit_completion(&ar->driver_recovery); + + if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags)) + wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ); + + ath10k_snoc_free_resources(ar); + + return 0; +} + static void ath10k_snoc_shutdown(struct platform_device *pdev) { struct ath10k *ar = platform_get_drvdata(pdev); ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc shutdown\n"); - ath10k_snoc_remove(pdev); + ath10k_snoc_free_resources(ar); } static struct platform_driver ath10k_snoc_driver = { diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index d4ef45cd0685..8c9c781afc3e 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -373,7 +373,7 @@ static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab) cfg->tgt_ce = ab->hw_params.target_ce_config; cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; - ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074; + ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id; } static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab) diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c index 987c65010272..de8b632b058c 100644 --- a/drivers/net/wireless/ath/ath11k/ce.c +++ b/drivers/net/wireless/ath/ath11k/ce.c @@ -187,6 +187,59 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = { }; +const struct ce_attr ath11k_host_ce_config_qcn9074[] = { + /* CE0: host->target HTC control and raw streams */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 16, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE1: target->host HTT + HTC control */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath11k_htc_rx_completion_handler, + }, + + /* CE2: target->host WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 32, + .recv_cb = ath11k_htc_rx_completion_handler, + }, + + /* CE3: host->target WMI (mac0) */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE4: host->target HTT */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 2048, + .src_sz_max = 256, + .dest_nentries = 0, + }, + + /* CE5: target->host pktlog */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, + }, +}; + static bool ath11k_ce_need_shadow_fix(int ce_id) { /* only ce4 needs shadow workaroud*/ @@ -455,7 +508,7 @@ static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_ struct hal_srng_params *ring_params) { u32 msi_data_start; - u32 msi_data_count; + u32 msi_data_count, msi_data_idx; u32 msi_irq_start; u32 addr_lo; u32 addr_hi; @@ -469,10 +522,11 @@ static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_ return; ath11k_get_msi_address(ab, &addr_lo, &addr_hi); + ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx); ring_params->msi_addr = addr_lo; ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); - ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start; + ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start; ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; } diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h index d6eeef919349..713f766cac22 100644 --- a/drivers/net/wireless/ath/ath11k/ce.h +++ b/drivers/net/wireless/ath/ath11k/ce.h @@ -173,6 +173,7 @@ struct ath11k_ce { extern const struct ce_attr ath11k_host_ce_config_ipq8074[]; extern const struct ce_attr ath11k_host_ce_config_qca6390[]; +extern const struct ce_attr ath11k_host_ce_config_qcn9074[]; void ath11k_ce_cleanup_pipes(struct ath11k_base *ab); void ath11k_ce_rx_replenish_retry(struct timer_list *t); diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 350b7913622c..77ce3347ab86 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -45,6 +45,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ring_mask = &ath11k_hw_ring_mask_ipq8074, .internal_sleep_clock = false, .regs = &ipq8074_regs, + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074, .host_ce_config = ath11k_host_ce_config_ipq8074, .ce_count = 12, .target_ce_config = ath11k_target_ce_config_wlan_ipq8074, @@ -68,6 +69,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .idle_ps = false, .cold_boot_calib = true, .supports_suspend = false, + .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -83,6 +85,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ring_mask = &ath11k_hw_ring_mask_ipq8074, .internal_sleep_clock = false, .regs = &ipq8074_regs, + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074, .host_ce_config = ath11k_host_ce_config_ipq8074, .ce_count = 12, .target_ce_config = ath11k_target_ce_config_wlan_ipq8074, @@ -106,6 +109,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .idle_ps = false, .cold_boot_calib = true, .supports_suspend = false, + .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), }, { .name = "qca6390 hw2.0", @@ -121,6 +125,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &qca6390_regs, + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, @@ -143,6 +148,44 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .idle_ps = true, .cold_boot_calib = false, .supports_suspend = true, + .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), + }, + { + .name = "qcn9074 hw1.0", + .hw_rev = ATH11K_HW_QCN9074_HW10, + .fw = { + .dir = "QCN9074/hw1.0", + .board_size = 256 * 1024, + .cal_size = 256 * 1024, + }, + .max_radios = 1, + .single_pdev_only = false, + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074, + .hw_ops = &qcn9074_ops, + .ring_mask = &ath11k_hw_ring_mask_qcn9074, + .internal_sleep_clock = false, + .regs = &qcn9074_regs, + .host_ce_config = ath11k_host_ce_config_qcn9074, + .ce_count = 6, + .target_ce_config = ath11k_target_ce_config_wlan_qcn9074, + .target_ce_count = 9, + .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074, + .svc_to_ce_map_len = 18, + .rxdma1_enable = true, + .num_rxmda_per_pdev = 1, + .rx_mac_buf_ring = false, + .vdev_start_delay = false, + .htt_peer_map_v2 = true, + .tcl_0_only = false, + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT), + .supports_monitor = true, + .supports_shadow_regs = false, + .idle_ps = false, + .cold_boot_calib = false, + .supports_suspend = false, + .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), }, }; @@ -974,7 +1017,7 @@ static int ath11k_init_hw_params(struct ath11k_base *ab) ab->hw_params = *hw_params; - ath11k_dbg(ab, ATH11K_DBG_BOOT, "Hardware name %s\n", ab->hw_params.name); + ath11k_info(ab, "%s\n", ab->hw_params.name); return 0; } diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 8d29845774df..55af982deca7 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -34,6 +34,7 @@ #define ATH11K_PRB_RSP_DROP_THRESHOLD ((ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4) #define ATH11K_INVALID_HW_MAC_ID 0xFF +#define ATH11K_CONNECTION_LOSS_HZ (3 * HZ) extern unsigned int ath11k_frame_mode; @@ -105,6 +106,7 @@ enum ath11k_hw_rev { ATH11K_HW_IPQ8074, ATH11K_HW_QCA6390_HW20, ATH11K_HW_IPQ6018_HW10, + ATH11K_HW_QCN9074_HW10, }; enum ath11k_firmware_mode { @@ -234,6 +236,7 @@ struct ath11k_vif { u32 aid; u8 bssid[ETH_ALEN]; struct cfg80211_bitrate_mask bitrate_mask; + struct delayed_work connection_loss_work; int num_legacy_stations; int rtscts_prot_mode; int txpower; @@ -607,6 +610,7 @@ struct ath11k_bus_params { bool m3_fw_support; bool fixed_bdf_addr; bool fixed_mem_region; + bool static_window_map; }; /* IPQ8074 HW channel counters frequency value in hertz */ @@ -876,6 +880,8 @@ extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018 extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[]; extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[]; +extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[]; +extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[]; int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab); int ath11k_core_pre_init(struct ath11k_base *ab); int ath11k_core_init(struct ath11k_base *ath11k); diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c index e13684343ec3..ec93f14e6d2a 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c +++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c @@ -3851,7 +3851,7 @@ htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf, htt_stats_buf->num_non_srg_ppdu_tried); len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n", htt_stats_buf->num_non_srg_ppdu_success); - len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunies = %u\n", + len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunities = %u\n", htt_stats_buf->num_srg_opportunities); len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG tried PPDU = %u\n", htt_stats_buf->num_srg_ppdu_tried); diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 850ad38b888f..1d9aa1bb6b6e 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -20,95 +20,102 @@ #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) -static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) +static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) { - return desc->hdr_status; + return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc); } -static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) +static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - if (!(__le32_to_cpu(desc->mpdu_start.info1) & - RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID)) + if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc)) return HAL_ENCRYPT_TYPE_OPEN; - return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, - __le32_to_cpu(desc->mpdu_start.info2)); + return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, - __le32_to_cpu(desc->msdu_start.info2)); + return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT, - __le32_to_cpu(desc->msdu_start.info2)); + return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc); } -static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID, - __le32_to_cpu(desc->mpdu_start.info1)); + return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); } -static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID, - __le32_to_cpu(desc->mpdu_start.info1)); + return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc); } -static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb) +static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, + struct sk_buff *skb) { struct ieee80211_hdr *hdr; - hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); return ieee80211_has_morefrags(hdr->frame_control); } -static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb) +static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, + struct sk_buff *skb) { struct ieee80211_hdr *hdr; - hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; } -static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc) +static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM, - __le32_to_cpu(desc->mpdu_start.info1)); + return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc); } -static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) +static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params.hw_ops->rx_desc_get_attention(desc); +} + +static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, - __le32_to_cpu(desc->attention.info2)); + __le32_to_cpu(attn->info2)); } -static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, - __le32_to_cpu(desc->attention.info1)); + __le32_to_cpu(attn->info1)); } -static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, - __le32_to_cpu(desc->attention.info1)); + __le32_to_cpu(attn->info1)); } -static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) { return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, - __le32_to_cpu(desc->attention.info2)) == + __le32_to_cpu(attn->info2)) == RX_DESC_DECRYPT_STATUS_CODE_OK); } -static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) +static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn) { - u32 info = __le32_to_cpu(desc->attention.info1); + u32 info = __le32_to_cpu(attn->info1); u32 errmap = 0; if (info & RX_ATTENTION_INFO1_FCS_ERR) @@ -135,131 +142,122 @@ static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) return errmap; } -static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) +static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, - __le32_to_cpu(desc->msdu_start.info1)); + return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc); } -static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO3_SGI, - __le32_to_cpu(desc->msdu_start.info3)); + return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc); } -static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, - __le32_to_cpu(desc->msdu_start.info3)); + return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc); } -static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, - __le32_to_cpu(desc->msdu_start.info3)); + return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc); } -static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) +static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return __le32_to_cpu(desc->msdu_start.phy_meta_data); + return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc); } -static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, - __le32_to_cpu(desc->msdu_start.info3)); + return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, - __le32_to_cpu(desc->msdu_start.info3)); - - return hweight8(mimo_ss_bitmap); + return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc)); } -static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MPDU_START_INFO2_TID, - __le32_to_cpu(desc->mpdu_start.info2)); + return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc); } -static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc) +static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return __le16_to_cpu(desc->mpdu_start.sw_peer_id); + return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc); } -static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) +static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, - __le32_to_cpu(desc->msdu_end.info2)); + return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc); } -static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, - __le32_to_cpu(desc->msdu_end.info2)); + return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc); } -static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) +static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab, + struct hal_rx_desc *desc) { - return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, - __le32_to_cpu(desc->msdu_end.info2)); + return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc); } -static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, +static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab, + struct hal_rx_desc *fdesc, struct hal_rx_desc *ldesc) { - memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, - sizeof(struct rx_msdu_end)); - memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, - sizeof(struct rx_attention)); - memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, - sizeof(struct rx_mpdu_end)); + ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc); } -static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) +static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) { - struct rx_attention *rx_attn; - - rx_attn = &rx_desc->attention; - return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, - __le32_to_cpu(rx_attn->info1)); + __le32_to_cpu(attn->info1)); } -static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) -{ - struct rx_msdu_start *rx_msdu_start; - - rx_msdu_start = &rx_desc->msdu_start; - - return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, - __le32_to_cpu(rx_msdu_start->info2)); -} - -static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) +static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) { u8 *rx_pkt_hdr; - rx_pkt_hdr = &rx_desc->msdu_payload[0]; + rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc); return rx_pkt_hdr; } -static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) +static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) { u32 tlv_tag; - tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, - __le32_to_cpu(rx_desc->mpdu_start_tag)); + tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc); return tlv_tag == HAL_RX_MPDU_START; } -static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) +static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) +{ + return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); +} + +static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, + struct hal_rx_desc *desc, + u16 len) { - return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); + ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len); } static void ath11k_dp_service_mon_ring(struct timer_list *t) @@ -1722,19 +1720,19 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, struct sk_buff *first, struct sk_buff *last, u8 l3pad_bytes, int msdu_len) { + struct ath11k_base *ab = ar->ab; struct sk_buff *skb; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); int buf_first_hdr_len, buf_first_len; struct hal_rx_desc *ldesc; - int space_extra; - int rem_len; - int buf_len; + int space_extra, rem_len, buf_len; + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; /* As the msdu is spread across multiple rx buffers, * find the offset to the start of msdu for computing * the length of the msdu in the first buffer. */ - buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes; + buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { @@ -1744,8 +1742,8 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, } ldesc = (struct hal_rx_desc *)last->data; - rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); - rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); + rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc); + rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc); /* MSDU spans over multiple buffers because the length of the MSDU * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data @@ -1757,7 +1755,7 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, /* When an MSDU spread over multiple buffers attention, MSDU_END and * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. */ - ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); + ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); if (space_extra > 0 && @@ -1778,18 +1776,18 @@ static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { rxcb = ATH11K_SKB_RXCB(skb); if (rxcb->is_continuation) - buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE; + buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; else buf_len = rem_len; - if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) { + if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { WARN_ON_ONCE(1); dev_kfree_skb_any(skb); return -EINVAL; } - skb_put(skb, buf_len + HAL_RX_DESC_SIZE); - skb_pull(skb, HAL_RX_DESC_SIZE); + skb_put(skb, buf_len + hal_rx_desc_sz); + skb_pull(skb, hal_rx_desc_sz); skb_copy_from_linear_data(skb, skb_put(first, buf_len), buf_len); dev_kfree_skb_any(skb); @@ -1820,13 +1818,15 @@ static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_ return NULL; } -static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) +static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); + struct rx_attention *rx_attention; bool ip_csum_fail, l4_csum_fail; - ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); - l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); + rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc); + ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention); + l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention); msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? CHECKSUM_NONE : CHECKSUM_UNNECESSARY; @@ -1957,7 +1957,7 @@ static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, qos_ctl = rxcb->tid; - if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc)) + if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc)) qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; /* TODO Add other QoS ctl fields when required */ @@ -2061,7 +2061,7 @@ static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, bool is_amsdu; is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); - hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); + hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc); rfc1042 = hdr; if (rxcb->is_first_msdu) { @@ -2134,8 +2134,8 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, u8 *first_hdr; u8 decap; - first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); - decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc); + first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); + decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc); switch (decap) { case DP_RX_DECAP_TYPE_NATIVE_WIFI: @@ -2167,6 +2167,7 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, bool is_decrypted = false; struct ieee80211_hdr *hdr; struct ath11k_peer *peer; + struct rx_attention *rx_attention; u32 err_bitmap; hdr = (struct ieee80211_hdr *)msdu->data; @@ -2188,9 +2189,10 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, } spin_unlock_bh(&ar->ab->base_lock); - err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc); + rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); + err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) - is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); + is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); /* Clear per-MPDU flags while leaving per-PPDU flags intact */ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | @@ -2215,7 +2217,7 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, RX_FLAG_PN_VALIDATED; } - ath11k_dp_rx_h_csum_offload(msdu); + ath11k_dp_rx_h_csum_offload(ar, msdu); ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, enctype, rx_status, is_decrypted); @@ -2236,11 +2238,11 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, u8 sgi; bool is_cck; - pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); - bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); - rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); - nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); - sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); + pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc); + bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc); + rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc); + nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc); + sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc); switch (pkt_type) { case RX_MSDU_START_PKT_TYPE_11A: @@ -2297,7 +2299,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, struct ieee80211_rx_status *rx_status) { u8 channel_num; - u32 center_freq; + u32 center_freq, meta_data; struct ieee80211_channel *channel; rx_status->freq = 0; @@ -2308,8 +2310,9 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; - channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); - center_freq = ath11k_dp_rx_h_msdu_start_freq(rx_desc) >> 16; + meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc); + channel_num = meta_data; + center_freq = meta_data >> 16; if (center_freq >= 5935 && center_freq <= 7105) { rx_status->band = NL80211_BAND_6GHZ; @@ -2409,7 +2412,9 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar, struct sk_buff *msdu, struct sk_buff_head *msdu_list) { + struct ath11k_base *ab = ar->ab; struct hal_rx_desc *rx_desc, *lrx_desc; + struct rx_attention *rx_attention; struct ieee80211_rx_status rx_status = {0}; struct ieee80211_rx_status *status; struct ath11k_skb_rxcb *rxcb; @@ -2419,10 +2424,11 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar, u8 *hdr_status; u16 msdu_len; int ret; + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); if (!last_buf) { - ath11k_warn(ar->ab, + ath11k_warn(ab, "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); ret = -EIO; goto free_out; @@ -2430,38 +2436,39 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar, rx_desc = (struct hal_rx_desc *)msdu->data; lrx_desc = (struct hal_rx_desc *)last_buf->data; - if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { - ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n"); + rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc); + if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { + ath11k_warn(ab, "msdu_done bit in attention is not set\n"); ret = -EIO; goto free_out; } rxcb = ATH11K_SKB_RXCB(msdu); rxcb->rx_desc = rx_desc; - msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); - l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc); + l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc); if (rxcb->is_frag) { - skb_pull(msdu, HAL_RX_DESC_SIZE); + skb_pull(msdu, hal_rx_desc_sz); } else if (!rxcb->is_continuation) { - if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { - hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); + if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { + hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc); ret = -EINVAL; - ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len); - ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, + ath11k_warn(ab, "invalid msdu len %u\n", msdu_len); + ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status, sizeof(struct ieee80211_hdr)); - ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, + ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc, sizeof(struct hal_rx_desc)); goto free_out; } - skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); - skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes); + skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); } else { ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, msdu, last_buf, l3_pad_bytes, msdu_len); if (ret) { - ath11k_warn(ar->ab, + ath11k_warn(ab, "failed to coalesce msdu rx buffer%d\n", ret); goto free_out; } @@ -3090,16 +3097,17 @@ static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer u8 mic[IEEE80211_CCMP_MIC_LEN]; int head_len, tail_len, ret; size_t data_len; - u32 hdr_len; + u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; u8 *key, *data; u8 key_idx; - if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) + if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) != + HAL_ENCRYPT_TYPE_TKIP_MIC) return 0; - hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); hdr_len = ieee80211_hdrlen(hdr->frame_control); - head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN; + head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; if (!is_multicast_ether_addr(hdr->addr1)) @@ -3125,7 +3133,7 @@ mic_fail: rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; - skb_pull(msdu, HAL_RX_DESC_SIZE); + skb_pull(msdu, hal_rx_desc_sz); ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, @@ -3140,11 +3148,12 @@ static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, struct ieee80211_hdr *hdr; size_t hdr_len; size_t crypto_len; + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; if (!flags) return; - hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); if (flags & RX_FLAG_MIC_STRIPPED) skb_trim(msdu, msdu->len - @@ -3158,8 +3167,8 @@ static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, hdr_len = ieee80211_hdrlen(hdr->frame_control); crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); - memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len, - (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len); + memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len, + (void *)msdu->data + hal_rx_desc_sz, hdr_len); skb_pull(msdu, crypto_len); } } @@ -3172,11 +3181,12 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar, struct hal_rx_desc *rx_desc; struct sk_buff *skb, *first_frag, *last_frag; struct ieee80211_hdr *hdr; + struct rx_attention *rx_attention; enum hal_encrypt_type enctype; bool is_decrypted = false; int msdu_len = 0; int extra_space; - u32 flags; + u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; first_frag = skb_peek(&rx_tid->rx_frags); last_frag = skb_peek_tail(&rx_tid->rx_frags); @@ -3184,11 +3194,13 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar, skb_queue_walk(&rx_tid->rx_frags, skb) { flags = 0; rx_desc = (struct hal_rx_desc *)skb->data; - hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); - enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); - if (enctype != HAL_ENCRYPT_TYPE_OPEN) - is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); + enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); + if (enctype != HAL_ENCRYPT_TYPE_OPEN) { + rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); + is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); + } if (is_decrypted) { if (skb != first_frag) @@ -3204,7 +3216,7 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar, ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); if (skb != first_frag) - skb_pull(skb, HAL_RX_DESC_SIZE + + skb_pull(skb, hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control)); msdu_len += skb->len; } @@ -3220,7 +3232,7 @@ static int ath11k_dp_rx_h_defrag(struct ath11k *ar, dev_kfree_skb_any(skb); } - hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE); + hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); ATH11K_SKB_RXCB(first_frag)->is_frag = 1; @@ -3246,10 +3258,10 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti struct hal_srng *srng; dma_addr_t paddr; u32 desc_bank, msdu_info, mpdu_info; - u32 dst_idx, cookie; - u32 *msdu_len_offset; + u32 dst_idx, cookie, hal_rx_desc_sz; int ret, buf_id; + hal_rx_desc_sz = ab->hw_params.hal_desc_sz; link_desc_banks = ab->dp.link_desc_banks; reo_dest_ring = rx_tid->dst_ring_desc; @@ -3264,16 +3276,14 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, - defrag_skb->len - HAL_RX_DESC_SIZE) | + defrag_skb->len - hal_rx_desc_sz) | FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); msdu0->rx_msdu_info.info0 = msdu_info; /* change msdu len in hal rx desc */ - msdu_len_offset = (u32 *)&rx_desc->msdu_start; - *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH); - *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE; + ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); paddr = dma_map_single(ab->dev, defrag_skb->data, defrag_skb->len + skb_tailroom(defrag_skb), @@ -3346,24 +3356,26 @@ err_unmap_dma: return ret; } -static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b) +static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar, + struct sk_buff *a, struct sk_buff *b) { int frag1, frag2; - frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a); - frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b); + frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a); + frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b); return frag1 - frag2; } -static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list, +static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar, + struct sk_buff_head *frag_list, struct sk_buff *cur_frag) { struct sk_buff *skb; int cmp; skb_queue_walk(frag_list, skb) { - cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag); + cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag); if (cmp < 0) continue; __skb_queue_before(frag_list, skb, cur_frag); @@ -3372,14 +3384,15 @@ static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list, __skb_queue_tail(frag_list, cur_frag); } -static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb) +static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb) { struct ieee80211_hdr *hdr; u64 pn = 0; u8 *ehdr; + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; - hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); - ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control); + hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); + ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); pn = ehdr[0]; pn |= (u64)ehdr[1] << 8; @@ -3403,19 +3416,19 @@ ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_t first_frag = skb_peek(&rx_tid->rx_frags); desc = (struct hal_rx_desc *)first_frag->data; - encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc); + encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc); if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) return true; - last_pn = ath11k_dp_rx_h_get_pn(first_frag); + last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag); skb_queue_walk(&rx_tid->rx_frags, skb) { if (skb == first_frag) continue; - cur_pn = ath11k_dp_rx_h_get_pn(skb); + cur_pn = ath11k_dp_rx_h_get_pn(ar, skb); if (cur_pn != last_pn + 1) return false; last_pn = cur_pn; @@ -3439,14 +3452,14 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, bool more_frags; rx_desc = (struct hal_rx_desc *)msdu->data; - peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc); - tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc); - seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc); - frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu); - more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu); - - if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) || - !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) || + peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); + tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc); + seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); + frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu); + more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu); + + if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) || + !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) || tid > IEEE80211_NUM_TIDS) return -EINVAL; @@ -3484,7 +3497,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, if (frag_no > __fls(rx_tid->rx_frag_bitmap)) __skb_queue_tail(&rx_tid->rx_frags, msdu); else - ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu); + ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu); rx_tid->rx_frag_bitmap |= BIT(frag_no); if (!more_frags) @@ -3551,6 +3564,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool struct hal_rx_desc *rx_desc; u8 *hdr_status; u16 msdu_len; + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); @@ -3586,9 +3600,9 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool } rx_desc = (struct hal_rx_desc *)msdu->data; - msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); - if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { - hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc); + if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { + hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, sizeof(struct ieee80211_hdr)); @@ -3598,7 +3612,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool goto exit; } - skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); + skb_put(msdu, hal_rx_desc_sz + msdu_len); if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { dev_kfree_skb_any(msdu); @@ -3732,7 +3746,7 @@ static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, int n_buffs; n_buffs = DIV_ROUND_UP(msdu_len, - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)); + (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz)); skb_queue_walk_safe(msdu_list, skb, tmp) { rxcb = ATH11K_SKB_RXCB(skb); @@ -3753,19 +3767,22 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, { u16 msdu_len; struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; + struct rx_attention *rx_attention; u8 l3pad_bytes; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; - msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); - if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) { + if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { /* First buffer will be freed by the caller, so deduct it's length */ - msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE); + msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); return -EINVAL; } - if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { + rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc); + if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { ath11k_warn(ar->ab, "msdu_done bit not set in null_q_des processing\n"); __skb_queue_purge(msdu_list); @@ -3781,25 +3798,25 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, * This error can show up both in a REO destination or WBM release ring. */ - rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); - rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); + rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); + rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); if (rxcb->is_frag) { - skb_pull(msdu, HAL_RX_DESC_SIZE); + skb_pull(msdu, hal_rx_desc_sz); } else { - l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); + l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); - if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) + if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) return -EINVAL; - skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); - skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); + skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); } ath11k_dp_rx_h_ppdu(ar, desc, status); ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); - rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc); + rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc); /* Please note that caller will having the access to msdu and completing * rx with mac80211. Need not worry about cleaning up amsdu_list. @@ -3846,14 +3863,15 @@ static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; u8 l3pad_bytes; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); + u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; - rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); - rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); + rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); + rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); - l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); - msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); - skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); - skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); + l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); + skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); ath11k_dp_rx_h_ppdu(ar, desc, status); @@ -4595,10 +4613,10 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, rx_desc = (struct hal_rx_desc *)msdu->data; rx_pkt_offset = sizeof(struct hal_rx_desc); - l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); + l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); if (is_first_msdu) { - if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { + if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { drop_mpdu = true; dev_kfree_skb_any(msdu); msdu = NULL; @@ -4607,7 +4625,7 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, } msdu_ppdu_id = - ath11k_dp_rxdesc_get_ppduid(rx_desc); + ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, ppdu_id, @@ -4676,12 +4694,13 @@ next_msdu: return rx_bufs_used; } -static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) +static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu) { u32 rx_pkt_offset, l2_hdr_offset; - rx_pkt_offset = sizeof(struct hal_rx_desc); - l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); + rx_pkt_offset = ar->ab->hw_params.hal_desc_sz; + l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, + (struct hal_rx_desc *)msdu->data); skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); } @@ -4691,12 +4710,14 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, struct sk_buff *last_msdu, struct ieee80211_rx_status *rxs) { + struct ath11k_base *ab = ar->ab; struct sk_buff *msdu, *mpdu_buf, *prev_buf; - u32 decap_format, wifi_hdr_len; + u32 wifi_hdr_len; struct hal_rx_desc *rx_desc; char *hdr_desc; - u8 *dest; + u8 *dest, decap_format; struct ieee80211_hdr_3addr *wh; + struct rx_attention *rx_attention; mpdu_buf = NULL; @@ -4704,22 +4725,23 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, goto err_merge_fail; rx_desc = (struct hal_rx_desc *)head_msdu->data; + rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc); - if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) + if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention)) return NULL; - decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); + decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc); ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); if (decap_format == DP_RX_DECAP_TYPE_RAW) { - ath11k_dp_rx_msdus_set_payload(head_msdu); + ath11k_dp_rx_msdus_set_payload(ar, head_msdu); prev_buf = head_msdu; msdu = head_msdu->next; while (msdu) { - ath11k_dp_rx_msdus_set_payload(msdu); + ath11k_dp_rx_msdus_set_payload(ar, msdu); prev_buf = msdu; msdu = msdu->next; @@ -4733,7 +4755,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, u8 qos_pkt = 0; rx_desc = (struct hal_rx_desc *)head_msdu->data; - hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); + hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); /* Base size */ wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); @@ -4750,7 +4772,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, while (msdu) { rx_desc = (struct hal_rx_desc *)msdu->data; - hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); + hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); if (qos_pkt) { dest = skb_push(msdu, sizeof(__le16)); @@ -4760,7 +4782,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, memcpy(dest + wifi_hdr_len, (u8 *)&qos_field, sizeof(__le16)); } - ath11k_dp_rx_msdus_set_payload(msdu); + ath11k_dp_rx_msdus_set_payload(ar, msdu); prev_buf = msdu; msdu = msdu->next; } @@ -4768,11 +4790,11 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, if (!dest) goto err_merge_fail; - ath11k_dbg(ar->ab, ATH11K_DBG_DATA, + ath11k_dbg(ab, ATH11K_DBG_DATA, "mpdu_buf %pK mpdu_buf->len %u", prev_buf, prev_buf->len); } else { - ath11k_dbg(ar->ab, ATH11K_DBG_DATA, + ath11k_dbg(ab, ATH11K_DBG_DATA, "decap format %d is not supported!\n", decap_format); goto err_merge_fail; @@ -4782,7 +4804,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, err_merge_fail: if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { - ath11k_dbg(ar->ab, ATH11K_DBG_DATA, + ath11k_dbg(ab, ATH11K_DBG_DATA, "err_merge_fail mpdu_buf %pK", mpdu_buf); /* Free the head buffer */ dev_kfree_skb_any(mpdu_buf); diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 1a0b9be9ce6a..8bba5234f81f 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -178,7 +178,7 @@ tcl_ring_sel: } if (ieee80211_vif_is_mesh(arvif->vif)) - ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE, 1); + ti.enable_mesh = true; ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1); @@ -792,8 +792,8 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id, cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >> HAL_ADDR_MSB_REG_SHIFT; - cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff; - cmd->ring_msi_addr_hi = ((uint64_t)(params.msi_addr) >> 32) & 0xffffffff; + cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr); + cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr); cmd->msi_data = params.msi_data; cmd->intr_info = FIELD_PREP( diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 9904c0eb7587..08e3c72d9237 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -89,17 +89,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + - HAL_CE_DST_RING_BASE_LSB), - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP, - }, - .reg_size = { - (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG), - (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG), - }, .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE, }, { /* CE_DST */ @@ -108,17 +97,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_CE0_DST_REG + - HAL_CE_DST_RING_BASE_LSB), - HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP, - }, - .reg_size = { - (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG), - (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG), - }, .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE, }, { /* CE_DST_STATUS */ @@ -127,18 +105,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_CE0_DST_REG + - HAL_CE_DST_STATUS_RING_BASE_LSB), - (HAL_SEQ_WCSS_UMAC_CE0_DST_REG + - HAL_CE_DST_STATUS_RING_HP), - }, - .reg_size = { - (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG), - (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG), - }, .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE, }, { /* WBM_IDLE_LINK */ @@ -147,11 +113,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_wbm_link_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_WBM_REG + - HAL_WBM_IDLE_LINK_RING_BASE_LSB), - (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP), - }, .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE, }, { /* SW2WBM_RELEASE */ @@ -160,11 +121,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_WBM_REG + - HAL_WBM_RELEASE_RING_BASE_LSB), - (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP), - }, .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE, }, { /* WBM2SW_RELEASE */ @@ -173,16 +129,6 @@ static const struct hal_srng_config hw_srng_config_template[] = { .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, - .reg_start = { - (HAL_SEQ_WCSS_UMAC_WBM_REG + - HAL_WBM0_RELEASE_RING_BASE_LSB), - (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP), - }, - .reg_size = { - (HAL_WBM1_RELEASE_RING_BASE_LSB - - HAL_WBM0_RELEASE_RING_BASE_LSB), - (HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP), - }, .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE, }, { /* RXDMA_BUF */ @@ -955,7 +901,7 @@ void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, /* Enable the SRNG */ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + - HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40); + HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40); } int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, @@ -1234,6 +1180,46 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab) s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; + s = &hal->srng_config[HAL_CE_SRC]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); + + s = &hal->srng_config[HAL_CE_DST]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + + s = &hal->srng_config[HAL_CE_DST_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + + HAL_CE_DST_STATUS_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + + s = &hal->srng_config[HAL_WBM_IDLE_LINK]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP; + + s = &hal->srng_config[HAL_SW2WBM_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP; + + s = &hal->srng_config[HAL_WBM2SW_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP; + s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) - + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); + s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; + return 0; } diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 1f1b29cd0aa3..91d1428b8b94 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -39,14 +39,22 @@ struct ath11k_base; #define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x))) /* WCSS Relative address */ +#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000 #define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000 #define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000 -#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x00a00000 -#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x00a01000 -#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x00a02000 -#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x00a03000 +#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(x) \ + (ab->hw_params.regs->hal_seq_wcss_umac_ce0_src_reg) +#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(x) \ + (ab->hw_params.regs->hal_seq_wcss_umac_ce0_dst_reg) +#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(x) \ + (ab->hw_params.regs->hal_seq_wcss_umac_ce1_src_reg) +#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(x) \ + (ab->hw_params.regs->hal_seq_wcss_umac_ce1_dst_reg) #define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000 +#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000 +#define HAL_WLAON_REG_BASE 0x01f80000 + /* SW2TCL(x) R0 ring configuration address */ #define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014 #define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c @@ -197,8 +205,10 @@ struct ath11k_base; #define HAL_REO_STATUS_HP(ab) ab->hw_params.regs->hal_reo_status_hp /* WBM Idle R0 address */ -#define HAL_WBM_IDLE_LINK_RING_BASE_LSB 0x00000860 -#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR 0x00000870 +#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(x) \ + (ab->hw_params.regs->hal_wbm_idle_link_ring_base_lsb) +#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(x) \ + (ab->hw_params.regs->hal_wbm_idle_link_ring_misc) #define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048 #define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c #define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058 @@ -213,14 +223,17 @@ struct ath11k_base; #define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0 /* SW2WBM R0 release address */ -#define HAL_WBM_RELEASE_RING_BASE_LSB 0x000001d8 +#define HAL_WBM_RELEASE_RING_BASE_LSB(x) \ + (ab->hw_params.regs->hal_wbm_release_ring_base_lsb) /* SW2WBM R2 release address */ #define HAL_WBM_RELEASE_RING_HP 0x00003018 /* WBM2SW R0 release address */ -#define HAL_WBM0_RELEASE_RING_BASE_LSB 0x00000910 -#define HAL_WBM1_RELEASE_RING_BASE_LSB 0x00000968 +#define HAL_WBM0_RELEASE_RING_BASE_LSB(x) \ + (ab->hw_params.regs->hal_wbm0_release_ring_base_lsb) +#define HAL_WBM1_RELEASE_RING_BASE_LSB(x) \ + (ab->hw_params.regs->hal_wbm1_release_ring_base_lsb) /* WBM2SW R2 release address */ #define HAL_WBM0_RELEASE_RING_HP 0x000030c0 @@ -303,8 +316,6 @@ struct ath11k_base; #define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff #define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff -#define HAL_RX_DESC_SIZE (sizeof(struct hal_rx_desc)) - /* Add any other errors here and return them in * ath11k_hal_rx_desc_get_err(). */ diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h index 1b713cb13b90..d54ec6aa6281 100644 --- a/drivers/net/wireless/ath/ath11k/hal_desc.h +++ b/drivers/net/wireless/ath/ath11k/hal_desc.h @@ -949,16 +949,17 @@ struct hal_reo_flush_cache { #define HAL_TCL_DATA_CMD_INFO1_TO_FW BIT(21) #define HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET GENMASK(31, 23) -#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP GENMASK(18, 0) -#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID BIT(19) -#define HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE BIT(20) -#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE BIT(21) -#define HAL_TCL_DATA_CMD_INFO2_TID GENMASK(25, 22) -#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID GENMASK(27, 26) +#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP GENMASK(18, 0) +#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID BIT(19) +#define HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE BIT(20) +#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE BIT(21) +#define HAL_TCL_DATA_CMD_INFO2_TID GENMASK(25, 22) +#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID GENMASK(27, 26) #define HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX GENMASK(5, 0) #define HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX GENMASK(25, 6) #define HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM GENMASK(29, 26) +#define HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE GENMASK(31, 30) #define HAL_TCL_DATA_CMD_INFO4_RING_ID GENMASK(27, 20) #define HAL_TCL_DATA_CMD_INFO4_LOOPING_COUNT GENMASK(31, 28) diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c index 569e790d83a1..c8929de8ce6c 100644 --- a/drivers/net/wireless/ath/ath11k/hal_tx.c +++ b/drivers/net/wireless/ath/ath11k/hal_tx.c @@ -75,6 +75,9 @@ void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd, FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM, ti->bss_ast_hash); tcl_cmd->info4 = 0; + + if (ti->enable_mesh) + ab->hw_params.hw_ops->tx_mesh_enable(ab, tcl_cmd); } void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id) diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h index c291e59c3ca6..36f4f6f6cbc2 100644 --- a/drivers/net/wireless/ath/ath11k/hal_tx.h +++ b/drivers/net/wireless/ath/ath11k/hal_tx.h @@ -34,6 +34,7 @@ struct hal_tx_info { u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */ u8 lmac_id; u8 dscp_tid_tbl_idx; + bool enable_mesh; }; /* TODO: Check if the actual desc macros can be used instead */ diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h index 6285c52afc44..e9366f786fbb 100644 --- a/drivers/net/wireless/ath/ath11k/hif.h +++ b/drivers/net/wireless/ath/ath11k/hif.h @@ -28,6 +28,7 @@ struct ath11k_hif_ops { u32 *msi_addr_hi); void (*ce_irq_enable)(struct ath11k_base *ab); void (*ce_irq_disable)(struct ath11k_base *ab); + void (*get_ce_msi_idx)(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx); }; static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab) @@ -124,4 +125,13 @@ static inline void ath11k_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_ ab->hif.ops->get_msi_address(ab, msi_addr_lo, msi_addr_hi); } + +static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, + u32 *msi_data_idx) +{ + if (ab->hif.ops->get_ce_msi_idx) + ab->hif.ops->get_ce_msi_idx(ab, ce_id, msi_data_idx); + else + *msi_data_idx = ce_id; +} #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index 66331da35012..377ae8d5b58f 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -31,6 +31,20 @@ static u8 ath11k_hw_ipq6018_mac_from_pdev_id(int pdev_idx) return pdev_idx; } +static void ath11k_hw_ipq8074_tx_mesh_enable(struct ath11k_base *ab, + struct hal_tcl_data_cmd *tcl_cmd) +{ + tcl_cmd->info2 |= FIELD_PREP(HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE, + true); +} + +static void ath11k_hw_qcn9074_tx_mesh_enable(struct ath11k_base *ab, + struct hal_tcl_data_cmd *tcl_cmd) +{ + tcl_cmd->info3 |= FIELD_PREP(HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE, + true); +} + static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab, struct target_resource_config *config) { @@ -155,11 +169,358 @@ static int ath11k_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw, return mac_id; } +static bool ath11k_hw_ipq8074_rx_desc_get_first_msdu(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, + __le32_to_cpu(desc->u.ipq8074.msdu_end.info2)); +} + +static bool ath11k_hw_ipq8074_rx_desc_get_last_msdu(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, + __le32_to_cpu(desc->u.ipq8074.msdu_end.info2)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, + __le32_to_cpu(desc->u.ipq8074.msdu_end.info2)); +} + +static u8 *ath11k_hw_ipq8074_rx_desc_get_hdr_status(struct hal_rx_desc *desc) +{ + return desc->u.ipq8074.hdr_status; +} + +static bool ath11k_hw_ipq8074_rx_desc_encrypt_valid(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) & + RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID; +} + +static u32 ath11k_hw_ipq8074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, + __le32_to_cpu(desc->u.ipq8074.mpdu_start.info2)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_decap_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info2)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info2)); +} + +static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID, + __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1)); +} + +static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID, + __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1)); +} + +static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM, + __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1)); +} + +static u16 ath11k_hw_ipq8074_rx_desc_get_msdu_len(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info1)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_SGI, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info3)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info3)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info3)); +} + +static u32 ath11k_hw_ipq8074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.ipq8074.msdu_start.phy_meta_data); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info3)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, + __le32_to_cpu(desc->u.ipq8074.msdu_start.info3)); +} + +static u8 ath11k_hw_ipq8074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO2_TID, + __le32_to_cpu(desc->u.ipq8074.mpdu_start.info2)); +} + +static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.ipq8074.mpdu_start.sw_peer_id); +} + +static void ath11k_hw_ipq8074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc) +{ + memcpy((u8 *)&fdesc->u.ipq8074.msdu_end, (u8 *)&ldesc->u.ipq8074.msdu_end, + sizeof(struct rx_msdu_end_ipq8074)); + memcpy((u8 *)&fdesc->u.ipq8074.attention, (u8 *)&ldesc->u.ipq8074.attention, + sizeof(struct rx_attention)); + memcpy((u8 *)&fdesc->u.ipq8074.mpdu_end, (u8 *)&ldesc->u.ipq8074.mpdu_end, + sizeof(struct rx_mpdu_end)); +} + +static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc) +{ + return FIELD_GET(HAL_TLV_HDR_TAG, + __le32_to_cpu(desc->u.ipq8074.mpdu_start_tag)); +} + +static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.ipq8074.mpdu_start.phy_ppdu_id); +} + +static void ath11k_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len) +{ + u32 info = __le32_to_cpu(desc->u.ipq8074.msdu_start.info1); + + info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH; + info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len); + + desc->u.ipq8074.msdu_start.info1 = __cpu_to_le32(info); +} + +static +struct rx_attention *ath11k_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc) +{ + return &desc->u.ipq8074.attention; +} + +static u8 *ath11k_hw_ipq8074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) +{ + return &desc->u.ipq8074.msdu_payload[0]; +} + +static bool ath11k_hw_qcn9074_rx_desc_get_first_msdu(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MSDU_END_INFO4_FIRST_MSDU, + __le16_to_cpu(desc->u.qcn9074.msdu_end.info4)); +} + +static bool ath11k_hw_qcn9074_rx_desc_get_last_msdu(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MSDU_END_INFO4_LAST_MSDU, + __le16_to_cpu(desc->u.qcn9074.msdu_end.info4)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_END_INFO4_L3_HDR_PADDING, + __le16_to_cpu(desc->u.qcn9074.msdu_end.info4)); +} + +static u8 *ath11k_hw_qcn9074_rx_desc_get_hdr_status(struct hal_rx_desc *desc) +{ + return desc->u.qcn9074.hdr_status; +} + +static bool ath11k_hw_qcn9074_rx_desc_encrypt_valid(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) & + RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID; +} + +static u32 ath11k_hw_qcn9074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO9_ENC_TYPE, + __le32_to_cpu(desc->u.qcn9074.mpdu_start.info9)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_decap_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info2)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info2)); +} + +static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID, + __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11)); +} + +static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc) +{ + return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_FCTRL_VALID, + __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11)); +} + +static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_NUM, + __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11)); +} + +static u16 ath11k_hw_qcn9074_rx_desc_get_msdu_len(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info1)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_SGI, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info3)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info3)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info3)); +} + +static u32 ath11k_hw_qcn9074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.qcn9074.msdu_start.phy_meta_data); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info3)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, + __le32_to_cpu(desc->u.qcn9074.msdu_start.info3)); +} + +static u8 ath11k_hw_qcn9074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) +{ + return FIELD_GET(RX_MPDU_START_INFO9_TID, + __le32_to_cpu(desc->u.qcn9074.mpdu_start.info9)); +} + +static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.qcn9074.mpdu_start.sw_peer_id); +} + +static void ath11k_hw_qcn9074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc) +{ + memcpy((u8 *)&fdesc->u.qcn9074.msdu_end, (u8 *)&ldesc->u.qcn9074.msdu_end, + sizeof(struct rx_msdu_end_qcn9074)); + memcpy((u8 *)&fdesc->u.qcn9074.attention, (u8 *)&ldesc->u.qcn9074.attention, + sizeof(struct rx_attention)); + memcpy((u8 *)&fdesc->u.qcn9074.mpdu_end, (u8 *)&ldesc->u.qcn9074.mpdu_end, + sizeof(struct rx_mpdu_end)); +} + +static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc) +{ + return FIELD_GET(HAL_TLV_HDR_TAG, + __le32_to_cpu(desc->u.qcn9074.mpdu_start_tag)); +} + +static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.qcn9074.mpdu_start.phy_ppdu_id); +} + +static void ath11k_hw_qcn9074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len) +{ + u32 info = __le32_to_cpu(desc->u.qcn9074.msdu_start.info1); + + info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH; + info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len); + + desc->u.qcn9074.msdu_start.info1 = __cpu_to_le32(info); +} + +static +struct rx_attention *ath11k_hw_qcn9074_rx_desc_get_attention(struct hal_rx_desc *desc) +{ + return &desc->u.qcn9074.attention; +} + +static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) +{ + return &desc->u.qcn9074.msdu_payload[0]; +} + const struct ath11k_hw_ops ipq8074_ops = { .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id, .wmi_init_config = ath11k_init_wmi_config_ipq8074, .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, + .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable, + .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes, + .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status, + .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl, + .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end, + .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag, + .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len, + .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention, + .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload, }; const struct ath11k_hw_ops ipq6018_ops = { @@ -167,6 +528,33 @@ const struct ath11k_hw_ops ipq6018_ops = { .wmi_init_config = ath11k_init_wmi_config_ipq8074, .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, + .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable, + .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes, + .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status, + .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl, + .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end, + .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag, + .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len, + .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention, + .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload, }; const struct ath11k_hw_ops qca6390_ops = { @@ -174,6 +562,67 @@ const struct ath11k_hw_ops qca6390_ops = { .wmi_init_config = ath11k_init_wmi_config_qca6390, .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390, .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390, + .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable, + .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes, + .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status, + .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl, + .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end, + .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag, + .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len, + .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention, + .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload, +}; + +const struct ath11k_hw_ops qcn9074_ops = { + .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id, + .wmi_init_config = ath11k_init_wmi_config_ipq8074, + .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, + .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, + .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable, + .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes, + .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status, + .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl, + .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end, + .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag, + .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len, + .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention, + .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload, }; #define ATH11K_TX_RING_MASK_0 0x1 @@ -792,6 +1241,241 @@ const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[] = { }, }; +/* Target firmware's Copy Engine configuration. */ +const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[] = { + /* CE0: host->target HTC control and raw streams */ + { + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE1: target->host HTT + HTC control */ + { + .pipenum = __cpu_to_le32(1), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE2: target->host WMI */ + { + .pipenum = __cpu_to_le32(2), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE3: host->target WMI */ + { + .pipenum = __cpu_to_le32(3), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE4: host->target HTT */ + { + .pipenum = __cpu_to_le32(4), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(256), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE5: target->host Pktlog */ + { + .pipenum = __cpu_to_le32(5), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE6: Reserved for target autonomous hif_memcpy */ + { + .pipenum = __cpu_to_le32(6), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE7 used only by Host */ + { + .pipenum = __cpu_to_le32(7), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H), + .nentries = __cpu_to_le32(0), + .nbytes_max = __cpu_to_le32(0), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE8 target->host used only by IPA */ + { + .pipenum = __cpu_to_le32(8), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + /* CE 9, 10, 11 are used by MHI driver */ +}; + +/* Map from service/endpoint to Copy Engine. + * This table is derived from the CE_PCI TABLE, above. + * It is passed to the Target at startup for use by firmware. + */ +const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[] = { + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(4), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(5), + }, + + /* (Additions here) */ + + { /* must be last */ + __cpu_to_le32(0), + __cpu_to_le32(0), + __cpu_to_le32(0), + }, +}; + +const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = { + .tx = { + ATH11K_TX_RING_MASK_0, + ATH11K_TX_RING_MASK_1, + ATH11K_TX_RING_MASK_2, + }, + .rx_mon_status = { + 0, 0, 0, + ATH11K_RX_MON_STATUS_RING_MASK_0, + ATH11K_RX_MON_STATUS_RING_MASK_1, + ATH11K_RX_MON_STATUS_RING_MASK_2, + }, + .rx = { + 0, 0, 0, 0, + ATH11K_RX_RING_MASK_0, + ATH11K_RX_RING_MASK_1, + ATH11K_RX_RING_MASK_2, + ATH11K_RX_RING_MASK_3, + }, + .rx_err = { + 0, 0, 0, + ATH11K_RX_ERR_RING_MASK_0, + }, + .rx_wbm_rel = { + 0, 0, 0, + ATH11K_RX_WBM_REL_RING_MASK_0, + }, + .reo_status = { + 0, 0, 0, + ATH11K_REO_STATUS_RING_MASK_0, + }, + .rxdma2host = { + 0, 0, 0, + ATH11K_RXDMA2HOST_RING_MASK_0, + }, + .host2rxdma = { + 0, 0, 0, + ATH11K_HOST2RXDMA_RING_MASK_0, + }, +}; + const struct ath11k_hw_regs ipq8074_regs = { /* SW2TCL(x) R0 ring configuration address */ .hal_tcl1_ring_base_lsb = 0x00000510, @@ -841,6 +1525,26 @@ const struct ath11k_hw_regs ipq8074_regs = { .hal_reo_status_ring_base_lsb = 0x00000504, .hal_reo_status_hp = 0x00003070, + /* WCSS relative address */ + .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000, + .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000, + .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000, + .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000, + + /* WBM Idle address */ + .hal_wbm_idle_link_ring_base_lsb = 0x00000860, + .hal_wbm_idle_link_ring_misc = 0x00000870, + + /* SW2WBM release address */ + .hal_wbm_release_ring_base_lsb = 0x000001d8, + + /* WBM2SW release address */ + .hal_wbm0_release_ring_base_lsb = 0x00000910, + .hal_wbm1_release_ring_base_lsb = 0x00000968, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x0, + .pcie_pcs_osc_dtct_config_base = 0x0, }; const struct ath11k_hw_regs qca6390_regs = { @@ -891,4 +1595,96 @@ const struct ath11k_hw_regs qca6390_regs = { /* REO status address */ .hal_reo_status_ring_base_lsb = 0x000004ac, .hal_reo_status_hp = 0x00003068, + + /* WCSS relative address */ + .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000, + .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000, + .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000, + .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000, + + /* WBM Idle address */ + .hal_wbm_idle_link_ring_base_lsb = 0x00000860, + .hal_wbm_idle_link_ring_misc = 0x00000870, + + /* SW2WBM release address */ + .hal_wbm_release_ring_base_lsb = 0x000001d8, + + /* WBM2SW release address */ + .hal_wbm0_release_ring_base_lsb = 0x00000910, + .hal_wbm1_release_ring_base_lsb = 0x00000968, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac, + .pcie_pcs_osc_dtct_config_base = 0x01e0c628, +}; + +const struct ath11k_hw_regs qcn9074_regs = { + /* SW2TCL(x) R0 ring configuration address */ + .hal_tcl1_ring_base_lsb = 0x000004f0, + .hal_tcl1_ring_base_msb = 0x000004f4, + .hal_tcl1_ring_id = 0x000004f8, + .hal_tcl1_ring_misc = 0x00000500, + .hal_tcl1_ring_tp_addr_lsb = 0x0000050c, + .hal_tcl1_ring_tp_addr_msb = 0x00000510, + .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000520, + .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000524, + .hal_tcl1_ring_msi1_base_lsb = 0x00000538, + .hal_tcl1_ring_msi1_base_msb = 0x0000053c, + .hal_tcl1_ring_msi1_data = 0x00000540, + .hal_tcl2_ring_base_lsb = 0x00000548, + .hal_tcl_ring_base_lsb = 0x000005f8, + + /* TCL STATUS ring address */ + .hal_tcl_status_ring_base_lsb = 0x00000700, + + /* REO2SW(x) R0 ring configuration address */ + .hal_reo1_ring_base_lsb = 0x0000029c, + .hal_reo1_ring_base_msb = 0x000002a0, + .hal_reo1_ring_id = 0x000002a4, + .hal_reo1_ring_misc = 0x000002ac, + .hal_reo1_ring_hp_addr_lsb = 0x000002b0, + .hal_reo1_ring_hp_addr_msb = 0x000002b4, + .hal_reo1_ring_producer_int_setup = 0x000002c0, + .hal_reo1_ring_msi1_base_lsb = 0x000002e4, + .hal_reo1_ring_msi1_base_msb = 0x000002e8, + .hal_reo1_ring_msi1_data = 0x000002ec, + .hal_reo2_ring_base_lsb = 0x000002f4, + .hal_reo1_aging_thresh_ix_0 = 0x00000564, + .hal_reo1_aging_thresh_ix_1 = 0x00000568, + .hal_reo1_aging_thresh_ix_2 = 0x0000056c, + .hal_reo1_aging_thresh_ix_3 = 0x00000570, + + /* REO2SW(x) R2 ring pointers (head/tail) address */ + .hal_reo1_ring_hp = 0x00003038, + .hal_reo1_ring_tp = 0x0000303c, + .hal_reo2_ring_hp = 0x00003040, + + /* REO2TCL R0 ring configuration address */ + .hal_reo_tcl_ring_base_lsb = 0x000003fc, + .hal_reo_tcl_ring_hp = 0x00003058, + + /* REO status address */ + .hal_reo_status_ring_base_lsb = 0x00000504, + .hal_reo_status_hp = 0x00003070, + + /* WCSS relative address */ + .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000, + .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000, + .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000, + .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000, + + /* WBM Idle address */ + .hal_wbm_idle_link_ring_base_lsb = 0x00000874, + .hal_wbm_idle_link_ring_misc = 0x00000884, + + /* SW2WBM release address */ + .hal_wbm_release_ring_base_lsb = 0x000001ec, + + /* WBM2SW release address */ + .hal_wbm0_release_ring_base_lsb = 0x00000924, + .hal_wbm1_release_ring_base_lsb = 0x0000097c, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8, + .pcie_pcs_osc_dtct_config_base = 0x01e0f45c, }; diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 8af0034fdb05..c81a6328361d 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -105,6 +105,9 @@ enum ath11k_bus { #define ATH11K_EXT_IRQ_GRP_NUM_MAX 11 +struct hal_rx_desc; +struct hal_tcl_data_cmd; + struct ath11k_hw_ring_mask { u8 tx[ATH11K_EXT_IRQ_GRP_NUM_MAX]; u8 rx_mon_status[ATH11K_EXT_IRQ_GRP_NUM_MAX]; @@ -134,6 +137,7 @@ struct ath11k_hw_params { bool internal_sleep_clock; const struct ath11k_hw_regs *regs; + u32 qmi_service_ins_id; const struct ce_attr *host_ce_config; u32 ce_count; const struct ce_pipe_config *target_ce_config; @@ -157,6 +161,7 @@ struct ath11k_hw_params { bool idle_ps; bool cold_boot_calib; bool supports_suspend; + u32 hal_desc_sz; }; struct ath11k_hw_ops { @@ -165,14 +170,45 @@ struct ath11k_hw_ops { struct target_resource_config *config); int (*mac_id_to_pdev_id)(struct ath11k_hw_params *hw, int mac_id); int (*mac_id_to_srng_id)(struct ath11k_hw_params *hw, int mac_id); + void (*tx_mesh_enable)(struct ath11k_base *ab, + struct hal_tcl_data_cmd *tcl_cmd); + bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc); + bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc); + u8 *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc); + bool (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc); + u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc); + bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc); + bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc); + u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc); + u16 (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc); + u32 (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc); + u16 (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc); + void (*rx_desc_copy_attn_end_tlv)(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc); + u32 (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc); + u32 (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc); + void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, u16 len); + struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc); + u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc); }; extern const struct ath11k_hw_ops ipq8074_ops; extern const struct ath11k_hw_ops ipq6018_ops; extern const struct ath11k_hw_ops qca6390_ops; +extern const struct ath11k_hw_ops qcn9074_ops; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390; +extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074; static inline int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw, @@ -261,9 +297,26 @@ struct ath11k_hw_regs { u32 hal_reo_status_ring_base_lsb; u32 hal_reo_status_hp; + + u32 hal_seq_wcss_umac_ce0_src_reg; + u32 hal_seq_wcss_umac_ce0_dst_reg; + u32 hal_seq_wcss_umac_ce1_src_reg; + u32 hal_seq_wcss_umac_ce1_dst_reg; + + u32 hal_wbm_idle_link_ring_base_lsb; + u32 hal_wbm_idle_link_ring_misc; + + u32 hal_wbm_release_ring_base_lsb; + + u32 hal_wbm0_release_ring_base_lsb; + u32 hal_wbm1_release_ring_base_lsb; + + u32 pcie_qserdes_sysclk_en_sel; + u32 pcie_pcs_osc_dtct_config_base; }; extern const struct ath11k_hw_regs ipq8074_regs; extern const struct ath11k_hw_regs qca6390_regs; +extern const struct ath11k_hw_regs qcn9074_regs; #endif diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index faa2e678e63e..8719e8550a40 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -829,6 +829,75 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif, ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); } +static void ath11k_mac_handle_beacon_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct sk_buff *skb = data; + struct ieee80211_mgmt *mgmt = (void *)skb->data; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) + return; + + cancel_delayed_work(&arvif->connection_loss_work); +} + +void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb) +{ + ieee80211_iterate_active_interfaces_atomic(ar->hw, + IEEE80211_IFACE_ITER_NORMAL, + ath11k_mac_handle_beacon_iter, + skb); +} + +static void ath11k_mac_handle_beacon_miss_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + u32 *vdev_id = data; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k *ar = arvif->ar; + struct ieee80211_hw *hw = ar->hw; + + if (arvif->vdev_id != *vdev_id) + return; + + if (!arvif->is_up) + return; + + ieee80211_beacon_loss(vif); + + /* Firmware doesn't report beacon loss events repeatedly. If AP probe + * (done by mac80211) succeeds but beacons do not resume then it + * doesn't make sense to continue operation. Queue connection loss work + * which can be cancelled when beacon is received. + */ + ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, + ATH11K_CONNECTION_LOSS_HZ); +} + +void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id) +{ + ieee80211_iterate_active_interfaces_atomic(ar->hw, + IEEE80211_IFACE_ITER_NORMAL, + ath11k_mac_handle_beacon_miss_iter, + &vdev_id); +} + +static void ath11k_mac_vif_sta_connection_loss_work(struct work_struct *work) +{ + struct ath11k_vif *arvif = container_of(work, struct ath11k_vif, + connection_loss_work.work); + struct ieee80211_vif *vif = arvif->vif; + + if (!arvif->is_up) + return; + + ieee80211_connection_loss(vif); +} + static void ath11k_peer_assoc_h_basic(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -1760,7 +1829,7 @@ static void ath11k_bss_disassoc(struct ieee80211_hw *hw, arvif->is_up = false; - /* TODO: cancel connection_loss_work */ + cancel_delayed_work_sync(&arvif->connection_loss_work); } static u32 ath11k_mac_get_rate_hw_value(int bitrate) @@ -3919,8 +3988,6 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar, he_cap_elem->phy_cap_info[5] &= ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK; - he_cap_elem->phy_cap_info[5] &= - ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK; he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1; switch (i) { @@ -4213,7 +4280,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb, return -ENOSPC; } - if (skb_queue_len(q) == ATH11K_TX_MGMT_NUM_PENDING_MAX) { + if (skb_queue_len_lockless(q) >= ATH11K_TX_MGMT_NUM_PENDING_MAX) { ath11k_warn(ar->ab, "mgmt tx queue is full\n"); return -ENOSPC; } @@ -4617,10 +4684,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, arvif->vif = vif; INIT_LIST_HEAD(&arvif->list); - - /* Should we initialize any worker to handle connection loss indication - * from firmware in sta mode? - */ + INIT_DELAYED_WORK(&arvif->connection_loss_work, + ath11k_mac_vif_sta_connection_loss_work); for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { arvif->bitrate_mask.control[i].legacy = 0xffffffff; @@ -4829,6 +4894,8 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, int ret; int i; + cancel_delayed_work_sync(&arvif->connection_loss_work); + mutex_lock(&ar->conf_mutex); ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n", @@ -5096,13 +5163,15 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, arg.channel.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); + arg.channel.freq2_radar = + !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); + arg.channel.passive = arg.channel.chan_radar; spin_lock_bh(&ab->base_lock); arg.regdomain = ar->ab->dfs_region; spin_unlock_bh(&ab->base_lock); - /* TODO: Notify if secondary 80Mhz also needs radar detection */ if (he_support) { ret = ath11k_set_he_mu_sounding_mode(ar, arvif); if (ret) { @@ -6082,6 +6151,7 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, /* TODO: Use real NF instead of default one. */ sinfo->signal = arsta->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } static const struct ieee80211_ops ath11k_ops = { diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h index 455577905505..4bc59bdaf244 100644 --- a/drivers/net/wireless/ath/ath11k/mac.h +++ b/drivers/net/wireless/ath/ath11k/mac.h @@ -150,4 +150,6 @@ int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx); u8 ath11k_mac_bw_to_mac80211_bw(u8 bw); enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw); enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher); +void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb); +void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id); #endif diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 09858e516903..626764da4d6f 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -7,10 +7,11 @@ #include "core.h" #include "debug.h" #include "mhi.h" +#include "pci.h" #define MHI_TIMEOUT_DEFAULT_MS 90000 -static struct mhi_channel_config ath11k_mhi_channels[] = { +static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = { { .num = 0, .name = "LOOPBACK", @@ -69,7 +70,7 @@ static struct mhi_channel_config ath11k_mhi_channels[] = { }, }; -static struct mhi_event_config ath11k_mhi_events[] = { +static struct mhi_event_config ath11k_mhi_events_qca6390[] = { { .num_elements = 32, .irq_moderation_ms = 0, @@ -92,15 +93,108 @@ static struct mhi_event_config ath11k_mhi_events[] = { }, }; -static struct mhi_controller_config ath11k_mhi_config = { +static struct mhi_controller_config ath11k_mhi_config_qca6390 = { .max_channels = 128, .timeout_ms = 2000, .use_bounce_buf = false, .buf_len = 0, - .num_channels = ARRAY_SIZE(ath11k_mhi_channels), - .ch_cfg = ath11k_mhi_channels, - .num_events = ARRAY_SIZE(ath11k_mhi_events), - .event_cfg = ath11k_mhi_events, + .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390), + .ch_cfg = ath11k_mhi_channels_qca6390, + .num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390), + .event_cfg = ath11k_mhi_events_qca6390, +}; + +static struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = { + { + .num = 0, + .name = "LOOPBACK", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_TO_DEVICE, + .ee_mask = 0x14, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 1, + .name = "LOOPBACK", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_FROM_DEVICE, + .ee_mask = 0x14, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 20, + .name = "IPCR", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_TO_DEVICE, + .ee_mask = 0x14, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 21, + .name = "IPCR", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_FROM_DEVICE, + .ee_mask = 0x14, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = true, + }, +}; + +static struct mhi_event_config ath11k_mhi_events_qcn9074[] = { + { + .num_elements = 32, + .irq_moderation_ms = 0, + .irq = 1, + .data_type = MHI_ER_CTRL, + .mode = MHI_DB_BRST_DISABLE, + .hardware_event = false, + .client_managed = false, + .offload_channel = false, + }, + { + .num_elements = 256, + .irq_moderation_ms = 1, + .irq = 2, + .mode = MHI_DB_BRST_DISABLE, + .priority = 1, + .hardware_event = false, + .client_managed = false, + .offload_channel = false, + }, +}; + +static struct mhi_controller_config ath11k_mhi_config_qcn9074 = { + .max_channels = 30, + .timeout_ms = 10000, + .use_bounce_buf = false, + .buf_len = 0, + .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qcn9074), + .ch_cfg = ath11k_mhi_channels_qcn9074, + .num_events = ARRAY_SIZE(ath11k_mhi_events_qcn9074), + .event_cfg = ath11k_mhi_events_qcn9074, }; void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab) @@ -221,6 +315,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) { struct ath11k_base *ab = ab_pci->ab; struct mhi_controller *mhi_ctrl; + struct mhi_controller_config *ath11k_mhi_config; int ret; mhi_ctrl = mhi_alloc_controller(); @@ -254,7 +349,12 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) mhi_ctrl->read_reg = ath11k_mhi_op_read_reg; mhi_ctrl->write_reg = ath11k_mhi_op_write_reg; - ret = mhi_register_controller(mhi_ctrl, &ath11k_mhi_config); + if (ab->hw_rev == ATH11K_HW_QCA6390_HW20) + ath11k_mhi_config = &ath11k_mhi_config_qca6390; + else if (ab->hw_rev == ATH11K_HW_QCN9074_HW10) + ath11k_mhi_config = &ath11k_mhi_config_qcn9074; + + ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config); if (ret) { ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret); mhi_free_controller(mhi_ctrl); diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index d14416816acc..0f31eb566fb6 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -35,9 +35,11 @@ #define ACCESS_ALWAYS_OFF 0xFE0 #define QCA6390_DEVICE_ID 0x1101 +#define QCN9074_DEVICE_ID 0x1104 static const struct pci_device_id ath11k_pci_id_table[] = { { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) }, + /* TODO: add QCN9074_DEVICE_ID) once firmware issues are resolved */ {0} }; @@ -50,14 +52,25 @@ static const struct ath11k_bus_params ath11k_pci_bus_params = { .fixed_mem_region = false, }; -static const struct ath11k_msi_config msi_config = { - .total_vectors = 32, - .total_users = 4, - .users = (struct ath11k_msi_user[]) { - { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, - { .name = "CE", .num_vectors = 10, .base_vector = 3 }, - { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, - { .name = "DP", .num_vectors = 18, .base_vector = 14 }, +static const struct ath11k_msi_config ath11k_msi_config[] = { + { + .total_vectors = 32, + .total_users = 4, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, + }, + { + .total_vectors = 16, + .total_users = 3, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 5, .base_vector = 3 }, + { .name = "DP", .num_vectors = 8, .base_vector = 8 }, + }, }, }; @@ -131,9 +144,38 @@ static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offse } } +static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci) +{ + u32 umac_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET); + u32 ce_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE); + u32 window; + + window = (umac_window << 12) | (ce_window << 6); + + iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS); +} + +static inline u32 ath11k_pci_get_window_start(struct ath11k_base *ab, + u32 offset) +{ + u32 window_start; + + /* If offset lies within DP register range, use 3rd window */ + if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK) + window_start = 3 * WINDOW_START; + /* If offset lies within CE register range, use 2nd window */ + else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) + window_start = 2 * WINDOW_START; + else + window_start = WINDOW_START; + + return window_start; +} + void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + u32 window_start; /* for offset beyond BAR + 4K - 32, may * need to wakeup MHI to access. @@ -145,10 +187,21 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) if (offset < WINDOW_START) { iowrite32(value, ab->mem + offset); } else { - spin_lock_bh(&ab_pci->window_lock); - ath11k_pci_select_window(ab_pci, offset); - iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK)); - spin_unlock_bh(&ab_pci->window_lock); + if (ab->bus_params.static_window_map) + window_start = ath11k_pci_get_window_start(ab, offset); + else + window_start = WINDOW_START; + + if (window_start == WINDOW_START) { + spin_lock_bh(&ab_pci->window_lock); + ath11k_pci_select_window(ab_pci, offset); + iowrite32(value, ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + spin_unlock_bh(&ab_pci->window_lock); + } else { + iowrite32(value, ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + } } if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && @@ -159,7 +212,7 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - u32 val; + u32 val, window_start; /* for offset beyond BAR + 4K - 32, may * need to wakeup MHI to access. @@ -171,10 +224,21 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) if (offset < WINDOW_START) { val = ioread32(ab->mem + offset); } else { - spin_lock_bh(&ab_pci->window_lock); - ath11k_pci_select_window(ab_pci, offset); - val = ioread32(ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK)); - spin_unlock_bh(&ab_pci->window_lock); + if (ab->bus_params.static_window_map) + window_start = ath11k_pci_get_window_start(ab, offset); + else + window_start = WINDOW_START; + + if (window_start == WINDOW_START) { + spin_lock_bh(&ab_pci->window_lock); + ath11k_pci_select_window(ab_pci, offset); + val = ioread32(ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + spin_unlock_bh(&ab_pci->window_lock); + } else { + val = ioread32(ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + } } if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && @@ -271,7 +335,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab) int ret; ret = ath11k_pci_set_link_reg(ab, - PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG, + PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab), PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL, PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK); if (ret) { @@ -280,27 +344,27 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab) } ret = ath11k_pci_set_link_reg(ab, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); + PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab), + PCIE_PCS_OSC_DTCT_CONFIG1_VAL, + PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) { ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret); return ret; } ret = ath11k_pci_set_link_reg(ab, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); + PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab), + PCIE_PCS_OSC_DTCT_CONFIG2_VAL, + PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) { ath11k_warn(ab, "failed to set dtct config2: %d\n", ret); return ret; } ret = ath11k_pci_set_link_reg(ab, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL, - PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); + PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab), + PCIE_PCS_OSC_DTCT_CONFIG4_VAL, + PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) { ath11k_warn(ab, "failed to set dtct config4: %d\n", ret); return ret; @@ -406,14 +470,15 @@ int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_nam u32 *base_vector) { struct ath11k_base *ab = ab_pci->ab; + const struct ath11k_msi_config *msi_config = ab_pci->msi_config; int idx; - for (idx = 0; idx < msi_config.total_users; idx++) { - if (strcmp(user_name, msi_config.users[idx].name) == 0) { - *num_vectors = msi_config.users[idx].num_vectors; - *user_base_data = msi_config.users[idx].base_vector + for (idx = 0; idx < msi_config->total_users; idx++) { + if (strcmp(user_name, msi_config->users[idx].name) == 0) { + *num_vectors = msi_config->users[idx].num_vectors; + *user_base_data = msi_config->users[idx].base_vector + ab_pci->msi_ep_base_data; - *base_vector = msi_config.users[idx].base_vector; + *base_vector = msi_config->users[idx].base_vector; ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", user_name, *num_vectors, *user_base_data, @@ -428,6 +493,23 @@ int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_nam return -EINVAL; } +static void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, + u32 *msi_idx) +{ + u32 i, msi_data_idx; + + for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + if (ce_id == i) + break; + + msi_data_idx++; + } + *msi_idx = msi_data_idx; +} + static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, u32 *base_vector) @@ -521,6 +603,9 @@ static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg) { struct ath11k_ce_pipe *ce_pipe = arg; + /* last interrupt received for this CE */ + ce_pipe->timestamp = jiffies; + ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); tasklet_schedule(&ce_pipe->intr_tq); @@ -615,6 +700,9 @@ static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg) ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq); + /* last interrupt received for this group */ + irq_grp->timestamp = jiffies; + ath11k_pci_ext_grp_disable(irq_grp); napi_schedule(&irq_grp->napi); @@ -625,8 +713,9 @@ static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg) static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) { int i, j, ret, num_vectors = 0; - u32 user_base_data = 0, base_vector = 0; + u32 user_base_data = 0, base_vector = 0, base_idx; + base_idx = ATH11K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP", &num_vectors, &user_base_data, @@ -656,7 +745,7 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) } irq_grp->num_irq = num_irq; - irq_grp->irqs[0] = base_vector + i; + irq_grp->irqs[0] = base_idx + i; for (j = 0; j < irq_grp->num_irq; j++) { int irq_idx = irq_grp->irqs[j]; @@ -667,6 +756,8 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) ath11k_dbg(ab, ATH11K_DBG_PCI, "irq:%d group:%d\n", irq, i); + + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); ret = request_irq(irq, ath11k_pci_ext_interrupt_handler, IRQF_SHARED, "DP_EXT_IRQ", irq_grp); @@ -687,7 +778,7 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab) { struct ath11k_ce_pipe *ce_pipe; u32 msi_data_start; - u32 msi_data_count; + u32 msi_data_count, msi_data_idx; u32 msi_irq_start; unsigned int msi_data; int irq, i, ret, irq_idx; @@ -699,14 +790,14 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab) return ret; /* Configure CE irqs */ - for (i = 0; i < ab->hw_params.ce_count; i++) { - msi_data = (i % msi_data_count) + msi_irq_start; - irq = ath11k_pci_get_msi_irq(ab->dev, msi_data); - ce_pipe = &ab->ce.ce_pipe[i]; - + for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; + msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; + irq = ath11k_pci_get_msi_irq(ab->dev, msi_data); + ce_pipe = &ab->ce.ce_pipe[i]; + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet); @@ -721,6 +812,8 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab) } ab->irq_num[irq_idx] = irq; + msi_data_idx++; + ath11k_pci_ce_irq_disable(ab, i); } @@ -740,7 +833,7 @@ static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; - ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390; + ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id; ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2, &cfg->shadow_reg_v2_len); @@ -760,17 +853,18 @@ static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab) static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci) { struct ath11k_base *ab = ab_pci->ab; + const struct ath11k_msi_config *msi_config = ab_pci->msi_config; struct msi_desc *msi_desc; int num_vectors; int ret; num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, - msi_config.total_vectors, - msi_config.total_vectors, + msi_config->total_vectors, + msi_config->total_vectors, PCI_IRQ_MSI); - if (num_vectors != msi_config.total_vectors) { + if (num_vectors != msi_config->total_vectors) { ath11k_err(ab, "failed to get %d MSI vectors, only %d available", - msi_config.total_vectors, num_vectors); + msi_config->total_vectors, num_vectors); if (num_vectors >= 0) return -EINVAL; @@ -932,6 +1026,9 @@ static int ath11k_pci_power_up(struct ath11k_base *ab) return ret; } + if (ab->bus_params.static_window_map) + ath11k_pci_select_static_window(ab_pci); + return 0; } @@ -1076,6 +1173,7 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = { .map_service_to_pipe = ath11k_pci_map_service_to_pipe, .ce_irq_enable = ath11k_pci_hif_ce_irq_enable, .ce_irq_disable = ath11k_pci_hif_ce_irq_disable, + .get_ce_msi_idx = ath11k_pci_get_ce_msi_idx, }; static int ath11k_pci_probe(struct pci_dev *pdev, @@ -1130,6 +1228,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ret = -EOPNOTSUPP; goto err_pci_free_region; } + ab_pci->msi_config = &ath11k_msi_config[0]; + break; + case QCN9074_DEVICE_ID: + ab_pci->msi_config = &ath11k_msi_config[1]; + ab->bus_params.static_window_map = true; + ab->hw_rev = ATH11K_HW_QCN9074_HW10; break; default: dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index fe44d0dfce19..f3e645891d19 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -34,16 +34,20 @@ #define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2 #define PCIE_INT_CLEAR_ALL 0xffffffff -#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG 0x01e0c0ac +#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(x) \ + (ab->hw_params.regs->pcie_qserdes_sysclk_en_sel) #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10 #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG 0x01e0c628 -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL 0x02 -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG 0x01e0c62c -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL 0x52 -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG 0x01e0c634 -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL 0xff -#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK 0x000000ff +#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(x) \ + (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base) +#define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02 +#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(x) \ + (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4) +#define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52 +#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(x) \ + (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc) +#define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff +#define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff #define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c #define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4 @@ -73,6 +77,7 @@ struct ath11k_pci { char amss_path[100]; u32 msi_ep_base_data; struct mhi_controller *mhi_ctrl; + const struct ath11k_msi_config *msi_config; unsigned long mhi_state; u32 register_window; diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index 7968fe4eda22..a612e279ea5b 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -1556,6 +1556,8 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT; } + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi host cap request\n"); + ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp); if (ret < 0) @@ -1566,7 +1568,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_host_cap_req_msg_v01_ei, &req); if (ret < 0) { - ath11k_warn(ab, "Failed to send host capability request,err = %d\n", ret); + ath11k_warn(ab, "failed to send host capability request: %d\n", ret); goto out; } @@ -1575,7 +1577,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) goto out; if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "Host capability request failed, result: %d, err: %d\n", + ath11k_warn(ab, "host capability request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -1624,24 +1626,26 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab) if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi indication register request\n"); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_IND_REGISTER_REQ_V01, QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_ind_register_req_msg_v01_ei, req); if (ret < 0) { - ath11k_warn(ab, "Failed to send indication register request, err = %d\n", + ath11k_warn(ab, "failed to send indication register request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { - ath11k_warn(ab, "failed to register fw indication %d\n", ret); + ath11k_warn(ab, "failed to register fw indication: %d\n", ret); goto out; } if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "FW Ind register request failed, result: %d, err: %d\n", + ath11k_warn(ab, "firmware indication register request failed: %d %d\n", resp->resp.result, resp->resp.error); ret = -EINVAL; goto out; @@ -1699,19 +1703,22 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi respond memory request delayed %i\n", + delayed); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_RESPOND_MEM_REQ_V01, QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_respond_mem_req_msg_v01_ei, req); if (ret < 0) { - ath11k_warn(ab, "qmi failed to respond memory request, err = %d\n", + ath11k_warn(ab, "failed to respond qmi memory request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { - ath11k_warn(ab, "qmi failed memory request, err = %d\n", ret); + ath11k_warn(ab, "failed to wait qmi memory request: %d\n", ret); goto out; } @@ -1722,7 +1729,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) if (delayed && resp.resp.error == 0) goto out; - ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n", + ath11k_warn(ab, "qmi respond memory request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -1765,7 +1772,7 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) &chunk->paddr, GFP_KERNEL); if (!chunk->vaddr) { - if (ab->qmi.mem_seg_count <= 2) { + if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) { ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi dma allocation failed (%d B type %u), will try later with small size\n", chunk->size, @@ -1774,7 +1781,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) ab->qmi.target_mem_delayed = true; return 0; } - ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n", + + ath11k_err(ab, "failed to allocate dma memory for qmi (%d B type %u)\n", chunk->size, chunk->type); return -EINVAL; @@ -1843,24 +1851,26 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi target cap request\n"); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_CAP_REQ_V01, QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_cap_req_msg_v01_ei, &req); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send target cap request, err = %d\n", + ath11k_warn(ab, "failed to send qmi cap request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { - ath11k_warn(ab, "qmi failed target cap request %d\n", ret); + ath11k_warn(ab, "failed to wait qmi cap request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "qmi targetcap req failed, result: %d, err: %d\n", + ath11k_warn(ab, "qmi cap request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -1923,7 +1933,7 @@ ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type, ret = ath11k_core_fetch_bdf(ab, &bd); if (ret) { - ath11k_warn(ab, "qmi failed to load BDF\n"); + ath11k_warn(ab, "failed to load board file: %d\n", ret); return ret; } @@ -1971,7 +1981,7 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab) bdf_addr = ioremap(ab->hw_params.bdf_addr, ATH11K_QMI_BDF_MAX_SIZE); if (!bdf_addr) { - ath11k_warn(ab, "qmi ioremap error for BDF\n"); + ath11k_warn(ab, "failed ioremap for board file\n"); ret = -EIO; goto out; } @@ -2000,6 +2010,9 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab) if (ret < 0) goto out_qmi_bdf; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download req fixed addr type %d\n", + type); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_BDF_DOWNLOAD_REQ_V01, QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN, @@ -2014,7 +2027,7 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab) goto out_qmi_bdf; if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n", + ath11k_warn(ab, "board file download request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out_qmi_bdf; @@ -2047,7 +2060,7 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) memset(&bd, 0, sizeof(bd)); ret = ath11k_core_fetch_bdf(ab, &bd); if (ret) { - ath11k_warn(ab, "qmi failed to load bdf:\n"); + ath11k_warn(ab, "failed to fetch board file: %d\n", ret); goto out; } @@ -2090,6 +2103,9 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) if (ret < 0) goto out_qmi_bdf; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download request remaining %i\n", + remaining); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_BDF_DOWNLOAD_REQ_V01, QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN, @@ -2104,7 +2120,7 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) goto out_qmi_bdf; if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n", + ath11k_warn(ab, "bdf download request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = resp.resp.result; goto out_qmi_bdf; @@ -2200,24 +2216,26 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi m3 info req\n"); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_M3_INFO_REQ_V01, QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN, qmi_wlanfw_m3_info_req_msg_v01_ei, &req); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send M3 information request, err = %d\n", + ath11k_warn(ab, "failed to send m3 information request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { - ath11k_warn(ab, "qmi failed M3 information request %d\n", ret); + ath11k_warn(ab, "failed to wait m3 information request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "qmi M3 info request failed, result: %d, err: %d\n", + ath11k_warn(ab, "m3 info request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -2246,12 +2264,14 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab, if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wlan mode req mode %d\n", mode); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_WLAN_MODE_REQ_V01, QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n", + ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n", mode, ret); goto out; } @@ -2262,13 +2282,13 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab, ath11k_warn(ab, "WLFW service is dis-connected\n"); return 0; } - ath11k_warn(ab, "qmi failed set mode request, mode: %d, err = %d\n", + ath11k_warn(ab, "failed to wait wlan mode request (mode %d): %d\n", mode, ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "Mode request failed, mode: %d, result: %d err: %d\n", + ath11k_warn(ab, "wlan mode request failed (mode: %d): %d %d\n", mode, resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -2338,24 +2358,26 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab) if (ret < 0) goto out; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wlan cfg req\n"); + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_WLAN_CFG_REQ_V01, QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send wlan config request, err = %d\n", + ath11k_warn(ab, "failed to send wlan config request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { - ath11k_warn(ab, "qmi failed wlan config request, err = %d\n", ret); + ath11k_warn(ab, "failed to wait wlan config request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "qmi wlan config request failed, result: %d, err: %d\n", + ath11k_warn(ab, "wlan config request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; @@ -2370,9 +2392,11 @@ void ath11k_qmi_firmware_stop(struct ath11k_base *ab) { int ret; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware stop\n"); + ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_OFF); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send wlan mode off\n"); + ath11k_warn(ab, "qmi failed to send wlan mode off: %d\n", ret); return; } } @@ -2382,15 +2406,17 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab, { int ret; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware start\n"); + ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret); + ath11k_warn(ab, "qmi failed to send wlan cfg: %d\n", ret); return ret; } ret = ath11k_qmi_wlanfw_mode_send(ab, mode); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret); + ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret); return ret; } @@ -2404,7 +2430,7 @@ static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret); + ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret); return ret; } @@ -2414,7 +2440,7 @@ static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) (ab->qmi.cal_done == 1), ATH11K_COLD_BOOT_FW_RESET_DELAY); if (timeout <= 0) { - ath11k_warn(ab, "Coldboot Calibration failed - wait ended\n"); + ath11k_warn(ab, "coldboot calibration timed out\n"); return 0; } @@ -2453,13 +2479,14 @@ static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi) ret = ath11k_qmi_fw_ind_register_send(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret); + ath11k_warn(ab, "failed to send qmi firmware indication: %d\n", + ret); return ret; } ret = ath11k_qmi_host_cap_send(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret); + ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret); return ret; } @@ -2473,7 +2500,7 @@ static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi) ret = ath11k_qmi_respond_fw_mem_request(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret); + ath11k_warn(ab, "qmi failed to respond fw mem req: %d\n", ret); return ret; } @@ -2487,7 +2514,8 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) ret = ath11k_qmi_request_target_cap(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to req target capabilities:%d\n", ret); + ath11k_warn(ab, "failed to requeqst qmi target capabilities: %d\n", + ret); return ret; } @@ -2496,13 +2524,13 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) else ret = ath11k_qmi_load_bdf_qmi(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to load board data file:%d\n", ret); + ath11k_warn(ab, "failed to load board data file: %d\n", ret); return ret; } ret = ath11k_qmi_wlanfw_m3_info_send(ab); if (ret < 0) { - ath11k_warn(ab, "qmi failed to send m3 info req:%d\n", ret); + ath11k_warn(ab, "failed to send qmi m3 info req: %d\n", ret); return ret; } @@ -2523,7 +2551,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, if (msg->mem_seg_len == 0 || msg->mem_seg_len > ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01) - ath11k_warn(ab, "Invalid memory segment length: %u\n", + ath11k_warn(ab, "invalid memory segment length: %u\n", msg->mem_seg_len); ab->qmi.mem_seg_count = msg->mem_seg_len; @@ -2538,14 +2566,14 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, if (ab->bus_params.fixed_mem_region) { ret = ath11k_qmi_assign_target_mem_chunk(ab); if (ret) { - ath11k_warn(ab, "qmi failed to assign target memory: %d\n", + ath11k_warn(ab, "failed to assign qmi target memory: %d\n", ret); return; } } else { ret = ath11k_qmi_alloc_target_mem_chunk(ab); if (ret) { - ath11k_warn(ab, "qmi failed to alloc target memory: %d\n", + ath11k_warn(ab, "failed to allocate qmi target memory: %d\n", ret); return; } @@ -2639,7 +2667,7 @@ static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq, sizeof(*sq), 0); if (ret) { - ath11k_warn(ab, "qmi failed to connect to remote service %d\n", ret); + ath11k_warn(ab, "failed to connect to qmi remote service: %d\n", ret); return ret; } @@ -2725,7 +2753,7 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work) case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE: break; default: - ath11k_warn(ab, "invalid event type: %d", event->type); + ath11k_warn(ab, "invalid qmi event type: %d", event->type); break; } kfree(event); @@ -2746,7 +2774,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab) ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX, &ath11k_qmi_ops, ath11k_qmi_msg_handlers); if (ret < 0) { - ath11k_warn(ab, "failed to initialize qmi handle\n"); + ath11k_warn(ab, "failed to initialize qmi handle: %d\n", ret); return ret; } @@ -2765,7 +2793,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab) ATH11K_QMI_WLFW_SERVICE_VERS_V01, ab->qmi.service_ins_id); if (ret < 0) { - ath11k_warn(ab, "failed to add qmi lookup\n"); + ath11k_warn(ab, "failed to add qmi lookup: %d\n", ret); destroy_workqueue(ab->qmi.event_wq); return ret; } diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index 7bad374cc23a..3d5930330703 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -21,11 +21,13 @@ #define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01 0x02 #define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390 0x01 #define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074 0x02 +#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074 0x07 #define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32 #define ATH11K_QMI_RESP_LEN_MAX 8192 -#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 32 +#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52 #define ATH11K_QMI_CALDB_SIZE 0x480000 #define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20 +#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 3 #define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 #define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037 @@ -141,6 +143,7 @@ struct ath11k_qmi { #define QMI_IPQ8074_FW_MEM_MODE 0xFF #define HOST_DDR_REGION_TYPE 0x1 #define BDF_MEM_REGION_TYPE 0x2 +#define M3_DUMP_REGION_TYPE 0x3 #define CALDB_MEM_REGION_TYPE 0x4 struct qmi_wlanfw_host_cap_req_msg_v01 { @@ -216,8 +219,8 @@ struct qmi_wlanfw_ind_register_resp_msg_v01 { u64 fw_status; }; -#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1124 -#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 548 +#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1824 +#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 888 #define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7 #define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035 #define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036 diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h index 86494da1069a..0cdb4a1f816e 100644 --- a/drivers/net/wireless/ath/ath11k/rx_desc.h +++ b/drivers/net/wireless/ath/ath11k/rx_desc.h @@ -414,7 +414,7 @@ struct rx_attention { #define RX_MPDU_START_RAW_MPDU BIT(0) -struct rx_mpdu_start { +struct rx_mpdu_start_ipq8074 { __le16 info0; __le16 phy_ppdu_id; __le16 ast_index; @@ -440,6 +440,112 @@ struct rx_mpdu_start { __le32 raw; } __packed; +#define RX_MPDU_START_INFO7_REO_DEST_IND GENMASK(4, 0) +#define RX_MPDU_START_INFO7_LMAC_PEER_ID_MSB GENMASK(6, 5) +#define RX_MPDU_START_INFO7_FLOW_ID_TOEPLITZ BIT(7) +#define RX_MPDU_START_INFO7_PKT_SEL_FP_UCAST_DATA BIT(8) +#define RX_MPDU_START_INFO7_PKT_SEL_FP_MCAST_DATA BIT(9) +#define RX_MPDU_START_INFO7_PKT_SEL_FP_CTRL_BAR BIT(10) +#define RX_MPDU_START_INFO7_RXDMA0_SRC_RING_SEL GENMASK(12, 11) +#define RX_MPDU_START_INFO7_RXDMA0_DST_RING_SEL GENMASK(14, 13) + +#define RX_MPDU_START_INFO8_REO_QUEUE_DESC_HI GENMASK(7, 0) +#define RX_MPDU_START_INFO8_RECV_QUEUE_NUM GENMASK(23, 8) +#define RX_MPDU_START_INFO8_PRE_DELIM_ERR_WARN BIT(24) +#define RX_MPDU_START_INFO8_FIRST_DELIM_ERR BIT(25) + +#define RX_MPDU_START_INFO9_EPD_EN BIT(0) +#define RX_MPDU_START_INFO9_ALL_FRAME_ENCPD BIT(1) +#define RX_MPDU_START_INFO9_ENC_TYPE GENMASK(5, 2) +#define RX_MPDU_START_INFO9_VAR_WEP_KEY_WIDTH GENMASK(7, 6) +#define RX_MPDU_START_INFO9_MESH_STA GENMASK(9, 8) +#define RX_MPDU_START_INFO9_BSSID_HIT BIT(10) +#define RX_MPDU_START_INFO9_BSSID_NUM GENMASK(14, 11) +#define RX_MPDU_START_INFO9_TID GENMASK(18, 15) + +#define RX_MPDU_START_INFO10_RXPCU_MPDU_FLTR GENMASK(1, 0) +#define RX_MPDU_START_INFO10_SW_FRAME_GRP_ID GENMASK(8, 2) +#define RX_MPDU_START_INFO10_NDP_FRAME BIT(9) +#define RX_MPDU_START_INFO10_PHY_ERR BIT(10) +#define RX_MPDU_START_INFO10_PHY_ERR_MPDU_HDR BIT(11) +#define RX_MPDU_START_INFO10_PROTO_VER_ERR BIT(12) +#define RX_MPDU_START_INFO10_AST_LOOKUP_VALID BIT(13) + +#define RX_MPDU_START_INFO11_MPDU_FCTRL_VALID BIT(0) +#define RX_MPDU_START_INFO11_MPDU_DUR_VALID BIT(1) +#define RX_MPDU_START_INFO11_MAC_ADDR1_VALID BIT(2) +#define RX_MPDU_START_INFO11_MAC_ADDR2_VALID BIT(3) +#define RX_MPDU_START_INFO11_MAC_ADDR3_VALID BIT(4) +#define RX_MPDU_START_INFO11_MAC_ADDR4_VALID BIT(5) +#define RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID BIT(6) +#define RX_MPDU_START_INFO11_MPDU_QOS_CTRL_VALID BIT(7) +#define RX_MPDU_START_INFO11_MPDU_HT_CTRL_VALID BIT(8) +#define RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID BIT(9) +#define RX_MPDU_START_INFO11_MPDU_FRAG_NUMBER GENMASK(13, 10) +#define RX_MPDU_START_INFO11_MORE_FRAG_FLAG BIT(14) +#define RX_MPDU_START_INFO11_FROM_DS BIT(16) +#define RX_MPDU_START_INFO11_TO_DS BIT(17) +#define RX_MPDU_START_INFO11_ENCRYPTED BIT(18) +#define RX_MPDU_START_INFO11_MPDU_RETRY BIT(19) +#define RX_MPDU_START_INFO11_MPDU_SEQ_NUM GENMASK(31, 20) + +#define RX_MPDU_START_INFO12_KEY_ID GENMASK(7, 0) +#define RX_MPDU_START_INFO12_NEW_PEER_ENTRY BIT(8) +#define RX_MPDU_START_INFO12_DECRYPT_NEEDED BIT(9) +#define RX_MPDU_START_INFO12_DECAP_TYPE GENMASK(11, 10) +#define RX_MPDU_START_INFO12_VLAN_TAG_C_PADDING BIT(12) +#define RX_MPDU_START_INFO12_VLAN_TAG_S_PADDING BIT(13) +#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_C BIT(14) +#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_S BIT(15) +#define RX_MPDU_START_INFO12_PRE_DELIM_COUNT GENMASK(27, 16) +#define RX_MPDU_START_INFO12_AMPDU_FLAG BIT(28) +#define RX_MPDU_START_INFO12_BAR_FRAME BIT(29) +#define RX_MPDU_START_INFO12_RAW_MPDU BIT(30) + +#define RX_MPDU_START_INFO13_MPDU_LEN GENMASK(13, 0) +#define RX_MPDU_START_INFO13_FIRST_MPDU BIT(14) +#define RX_MPDU_START_INFO13_MCAST_BCAST BIT(15) +#define RX_MPDU_START_INFO13_AST_IDX_NOT_FOUND BIT(16) +#define RX_MPDU_START_INFO13_AST_IDX_TIMEOUT BIT(17) +#define RX_MPDU_START_INFO13_POWER_MGMT BIT(18) +#define RX_MPDU_START_INFO13_NON_QOS BIT(19) +#define RX_MPDU_START_INFO13_NULL_DATA BIT(20) +#define RX_MPDU_START_INFO13_MGMT_TYPE BIT(21) +#define RX_MPDU_START_INFO13_CTRL_TYPE BIT(22) +#define RX_MPDU_START_INFO13_MORE_DATA BIT(23) +#define RX_MPDU_START_INFO13_EOSP BIT(24) +#define RX_MPDU_START_INFO13_FRAGMENT BIT(25) +#define RX_MPDU_START_INFO13_ORDER BIT(26) +#define RX_MPDU_START_INFO13_UAPSD_TRIGGER BIT(27) +#define RX_MPDU_START_INFO13_ENCRYPT_REQUIRED BIT(28) +#define RX_MPDU_START_INFO13_DIRECTED BIT(29) +#define RX_MPDU_START_INFO13_AMSDU_PRESENT BIT(30) + +struct rx_mpdu_start_qcn9074 { + __le32 info7; + __le32 reo_queue_desc_lo; + __le32 info8; + __le32 pn[4]; + __le32 info9; + __le32 peer_meta_data; + __le16 info10; + __le16 phy_ppdu_id; + __le16 ast_index; + __le16 sw_peer_id; + __le32 info11; + __le32 info12; + __le32 info13; + __le16 frame_ctrl; + __le16 duration; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctrl; + u8 addr4[ETH_ALEN]; + __le16 qos_ctrl; + __le32 ht_ctrl; +} __packed; + /* rx_mpdu_start * * rxpcu_mpdu_filter_in_category @@ -672,7 +778,19 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_START_INFO3_RECEPTION_TYPE GENMASK(23, 21) #define RX_MSDU_START_INFO3_MIMO_SS_BITMAP GENMASK(31, 24) -struct rx_msdu_start { +struct rx_msdu_start_ipq8074 { + __le16 info0; + __le16 phy_ppdu_id; + __le32 info1; + __le32 info2; + __le32 toeplitz_hash; + __le32 flow_id_toeplitz; + __le32 info3; + __le32 ppdu_start_timestamp; + __le32 phy_meta_data; +} __packed; + +struct rx_msdu_start_qcn9074 { __le16 info0; __le16 phy_ppdu_id; __le32 info1; @@ -682,6 +800,8 @@ struct rx_msdu_start { __le32 info3; __le32 ppdu_start_timestamp; __le32 phy_meta_data; + __le16 vlan_ctag_c1; + __le16 vlan_stag_c1; } __packed; /* rx_msdu_start @@ -894,7 +1014,7 @@ struct rx_msdu_start { #define RX_MSDU_END_INFO5_REO_DEST_IND GENMASK(5, 1) #define RX_MSDU_END_INFO5_FLOW_IDX GENMASK(25, 6) -struct rx_msdu_end { +struct rx_msdu_end_ipq8074 { __le16 info0; __le16 phy_ppdu_id; __le16 ip_hdr_cksum; @@ -917,6 +1037,58 @@ struct rx_msdu_end { __le16 sa_sw_peer_id; } __packed; +#define RX_MSDU_END_MPDU_LENGTH_INFO GENMASK(13, 0) + +#define RX_MSDU_END_INFO2_DA_OFFSET GENMASK(5, 0) +#define RX_MSDU_END_INFO2_SA_OFFSET GENMASK(11, 6) +#define RX_MSDU_END_INFO2_DA_OFFSET_VALID BIT(12) +#define RX_MSDU_END_INFO2_SA_OFFSET_VALID BIT(13) +#define RX_MSDU_END_INFO2_L3_TYPE GENMASK(31, 16) + +#define RX_MSDU_END_INFO4_SA_IDX_TIMEOUT BIT(0) +#define RX_MSDU_END_INFO4_DA_IDX_TIMEOUT BIT(1) +#define RX_MSDU_END_INFO4_MSDU_LIMIT_ERR BIT(2) +#define RX_MSDU_END_INFO4_FLOW_IDX_TIMEOUT BIT(3) +#define RX_MSDU_END_INFO4_FLOW_IDX_INVALID BIT(4) +#define RX_MSDU_END_INFO4_WIFI_PARSER_ERR BIT(5) +#define RX_MSDU_END_INFO4_AMSDU_PARSER_ERR BIT(6) +#define RX_MSDU_END_INFO4_SA_IS_VALID BIT(7) +#define RX_MSDU_END_INFO4_DA_IS_VALID BIT(8) +#define RX_MSDU_END_INFO4_DA_IS_MCBC BIT(9) +#define RX_MSDU_END_INFO4_L3_HDR_PADDING GENMASK(11, 10) +#define RX_MSDU_END_INFO4_FIRST_MSDU BIT(12) +#define RX_MSDU_END_INFO4_LAST_MSDU BIT(13) + +#define RX_MSDU_END_INFO6_AGGR_COUNT GENMASK(7, 0) +#define RX_MSDU_END_INFO6_FLOW_AGGR_CONTN BIT(8) +#define RX_MSDU_END_INFO6_FISA_TIMEOUT BIT(9) + +struct rx_msdu_end_qcn9074 { + __le16 info0; + __le16 phy_ppdu_id; + __le16 ip_hdr_cksum; + __le16 mpdu_length_info; + __le32 info1; + __le32 rule_indication[2]; + __le32 info2; + __le32 ipv6_options_crc; + __le32 tcp_seq_num; + __le32 tcp_ack_num; + __le16 info3; + __le16 window_size; + __le16 tcp_udp_cksum; + __le16 info4; + __le16 sa_idx; + __le16 da_idx; + __le32 info5; + __le32 fse_metadata; + __le16 cce_metadata; + __le16 sa_sw_peer_id; + __le32 info6; + __le16 cum_l4_cksum; + __le16 cum_ip_length; +} __packed; + /* rx_msdu_end * * rxpcu_mpdu_filter_in_category @@ -1190,16 +1362,16 @@ struct rx_mpdu_end { #define HAL_RX_DESC_HDR_STATUS_LEN 120 -struct hal_rx_desc { +struct hal_rx_desc_ipq8074 { __le32 msdu_end_tag; - struct rx_msdu_end msdu_end; + struct rx_msdu_end_ipq8074 msdu_end; __le32 rx_attn_tag; struct rx_attention attention; __le32 msdu_start_tag; - struct rx_msdu_start msdu_start; + struct rx_msdu_start_ipq8074 msdu_start; u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES]; __le32 mpdu_start_tag; - struct rx_mpdu_start mpdu_start; + struct rx_mpdu_start_ipq8074 mpdu_start; __le32 mpdu_end_tag; struct rx_mpdu_end mpdu_end; u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES]; @@ -1209,6 +1381,32 @@ struct hal_rx_desc { u8 msdu_payload[0]; } __packed; +struct hal_rx_desc_qcn9074 { + __le32 msdu_end_tag; + struct rx_msdu_end_qcn9074 msdu_end; + __le32 rx_attn_tag; + struct rx_attention attention; + __le32 msdu_start_tag; + struct rx_msdu_start_qcn9074 msdu_start; + u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES]; + __le32 mpdu_start_tag; + struct rx_mpdu_start_qcn9074 mpdu_start; + __le32 mpdu_end_tag; + struct rx_mpdu_end mpdu_end; + u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES]; + __le32 hdr_status_tag; + __le32 phy_ppdu_id; + u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN]; + u8 msdu_payload[0]; +} __packed; + +struct hal_rx_desc { + union { + struct hal_rx_desc_ipq8074 ipq8074; + struct hal_rx_desc_qcn9074 qcn9074; + } u; +} __packed; + #define HAL_RX_RU_ALLOC_TYPE_MAX 6 #define RU_26 1 #define RU_52 2 diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index cccfd3bd4d27..5ca2d80679b6 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -5417,31 +5417,6 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, return 0; } -static int -ath11k_pull_pdev_temp_ev(struct ath11k_base *ab, u8 *evt_buf, - u32 len, const struct wmi_pdev_temperature_event *ev) -{ - const void **tb; - int ret; - - tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); - if (IS_ERR(tb)) { - ret = PTR_ERR(tb); - ath11k_warn(ab, "failed to parse tlv: %d\n", ret); - return ret; - } - - ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; - if (!ev) { - ath11k_warn(ab, "failed to fetch pdev temp ev"); - kfree(tb); - return -EPROTO; - } - - kfree(tb); - return 0; -} - size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head) { struct ath11k_fw_stats_vdev *i; @@ -6196,10 +6171,8 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) } } - /* TODO: Pending handle beacon implementation - *if (ieee80211_is_beacon(hdr->frame_control)) - * ath11k_mac_handle_beacon(ar, skb); - */ + if (ieee80211_is_beacon(hdr->frame_control)) + ath11k_mac_handle_beacon(ar, skb); ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", @@ -6418,10 +6391,7 @@ static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb) switch (roam_ev.reason) { case WMI_ROAM_REASON_BEACON_MISS: - /* TODO: Pending beacon miss and connection_loss_work - * implementation - * ath11k_mac_handle_beacon_miss(ar, vdev_id); - */ + ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id); break; case WMI_ROAM_REASON_BETTER_AP: case WMI_ROAM_REASON_LOW_RSSI: @@ -6849,23 +6819,37 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; - struct wmi_pdev_temperature_event ev = {0}; + const void **tb; + const struct wmi_pdev_temperature_event *ev; + int ret; - if (ath11k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) { - ath11k_warn(ab, "failed to extract pdev temperature event"); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch pdev temp ev"); + kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, - "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id); + "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id); - ar = ath11k_mac_get_ar_by_pdev_id(ab, ev.pdev_id); + ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); if (!ar) { - ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id); + ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id); + kfree(tb); return; } - ath11k_thermal_event_temperature(ar, ev.temp); + ath11k_thermal_event_temperature(ar, ev->temp); + + kfree(tb); } static void ath11k_fils_discovery_event(struct ath11k_base *ab, diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 01f9c26f9bf3..e9a36dd7144f 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -617,7 +617,6 @@ static int ath9k_of_init(struct ath_softc *sc) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); enum ath_bus_type bus_type = common->bus_ops->ath_bus_type; - const char *mac; char eeprom_name[100]; int ret; @@ -640,9 +639,7 @@ static int ath9k_of_init(struct ath_softc *sc) ah->ah_flags |= AH_NO_EEP_SWAP; } - mac = of_get_mac_address(np); - if (!IS_ERR(mac)) - ether_addr_copy(common->macaddr, mac); + of_get_mac_address(np, common->macaddr); return 0; } diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index 0d38100d6e4f..84a8ce0784b1 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h @@ -631,14 +631,9 @@ static inline u16 carl9170_get_seq(struct sk_buff *skb) return get_seq_h(carl9170_get_hdr(skb)); } -static inline u16 get_tid_h(struct ieee80211_hdr *hdr) -{ - return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK; -} - static inline u16 carl9170_get_tid(struct sk_buff *skb) { - return get_tid_h(carl9170_get_hdr(skb)); + return ieee80211_get_tid(carl9170_get_hdr(skb)); } static inline struct ieee80211_vif * diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 6b8446ff48c8..88444fe6d1c6 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -394,7 +394,7 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar, if (unlikely(!sta)) goto out_rcu; - tid = get_tid_h(hdr); + tid = ieee80211_get_tid(hdr); sta_info = (void *) sta->drv_priv; tid_info = rcu_dereference(sta_info->agg[tid]); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 823ec6e78a22..02ad44997e87 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1456,7 +1456,7 @@ static void wil_link_stats_store_basic(struct wil6210_vif *vif, u8 cid = basic->cid; struct wil_sta_info *sta; - if (cid < 0 || cid >= wil->max_assoc_sta) { + if (cid >= wil->max_assoc_sta) { wil_err(wil, "invalid cid %d\n", cid); return; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h index 4146faeed344..44ba6f389fa9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h @@ -112,7 +112,6 @@ do { \ extern int brcmf_msg_level; -struct brcmf_bus; struct brcmf_pub; #ifdef DEBUG struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h index ee273e3bb101..e000ef78928c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h @@ -28,7 +28,7 @@ struct brcmf_usbdev { int ntxq, nrxq, rxsize; u32 bus_mtu; int devid; - int chiprev; /* chip revsion number */ + int chiprev; /* chip revision number */ }; /* IO Request Block (IRB) */ diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 60db38c38960..fd37d4d2983b 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -3817,6 +3817,68 @@ static inline void set_auth_type(struct airo_info *local, int auth_type) local->last_auth = auth_type; } +static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int lock) +{ + int i, status; + /* large variables, so don't inline this function, + * maybe change to kmalloc + */ + tdsRssiRid rssi_rid; + CapabilityRid cap_rid; + + kfree(ai->SSID); + ai->SSID = NULL; + // general configuration (read/modify/write) + status = readConfigRid(ai, lock); + if (status != SUCCESS) return ERROR; + + status = readCapabilityRid(ai, &cap_rid, lock); + if (status != SUCCESS) return ERROR; + + status = PC4500_readrid(ai, RID_RSSI, &rssi_rid, sizeof(rssi_rid), lock); + if (status == SUCCESS) { + if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL) + memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */ + } + else { + kfree(ai->rssi); + ai->rssi = NULL; + if (cap_rid.softCap & cpu_to_le16(8)) + ai->config.rmode |= RXMODE_NORMALIZED_RSSI; + else + airo_print_warn(ai->dev->name, "unknown received signal " + "level scale"); + } + ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS; + set_auth_type(ai, AUTH_OPEN); + ai->config.modulation = MOD_CCK; + + if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) && + (cap_rid.extSoftCap & cpu_to_le16(1)) && + micsetup(ai) == SUCCESS) { + ai->config.opmode |= MODE_MIC; + set_bit(FLAG_MIC_CAPABLE, &ai->flags); + } + + /* Save off the MAC */ + for (i = 0; i < ETH_ALEN; i++) { + mac[i] = ai->config.macAddr[i]; + } + + /* Check to see if there are any insmod configured + rates to add */ + if (rates[0]) { + memset(ai->config.rates, 0, sizeof(ai->config.rates)); + for (i = 0; i < 8 && rates[i]; i++) { + ai->config.rates[i] = rates[i]; + } + } + set_bit (FLAG_COMMIT, &ai->flags); + + return SUCCESS; +} + + static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) { Cmd cmd; @@ -3863,58 +3925,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) if (lock) up(&ai->sem); if (ai->config.len == 0) { - int i; - tdsRssiRid rssi_rid; - CapabilityRid cap_rid; - - kfree(ai->SSID); - ai->SSID = NULL; - // general configuration (read/modify/write) - status = readConfigRid(ai, lock); - if (status != SUCCESS) return ERROR; - - status = readCapabilityRid(ai, &cap_rid, lock); - if (status != SUCCESS) return ERROR; - - status = PC4500_readrid(ai, RID_RSSI,&rssi_rid, sizeof(rssi_rid), lock); - if (status == SUCCESS) { - if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL) - memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */ - } - else { - kfree(ai->rssi); - ai->rssi = NULL; - if (cap_rid.softCap & cpu_to_le16(8)) - ai->config.rmode |= RXMODE_NORMALIZED_RSSI; - else - airo_print_warn(ai->dev->name, "unknown received signal " - "level scale"); - } - ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS; - set_auth_type(ai, AUTH_OPEN); - ai->config.modulation = MOD_CCK; - - if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) && - (cap_rid.extSoftCap & cpu_to_le16(1)) && - micsetup(ai) == SUCCESS) { - ai->config.opmode |= MODE_MIC; - set_bit(FLAG_MIC_CAPABLE, &ai->flags); - } - - /* Save off the MAC */ - for (i = 0; i < ETH_ALEN; i++) { - mac[i] = ai->config.macAddr[i]; - } - - /* Check to see if there are any insmod configured - rates to add */ - if (rates[0]) { - memset(ai->config.rates, 0, sizeof(ai->config.rates)); - for (i = 0; i < 8 && rates[i]; i++) { - ai->config.rates[i] = rates[i]; - } - } - set_bit (FLAG_COMMIT, &ai->flags); + status = airo_readconfig(ai, mac, lock); + if (status != SUCCESS) + return ERROR; } /* Setup the SSIDs if present */ diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c index a0cf78c418ac..903de34028ef 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c @@ -633,8 +633,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee, } if (ext->alg != IW_ENCODE_ALG_NONE) { - memcpy(sec.keys[idx], ext->key, ext->key_len); - sec.key_sizes[idx] = ext->key_len; + int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN); + + memcpy(sec.keys[idx], ext->key, key_len); + sec.key_sizes[idx] = key_len; sec.flags |= (1 << idx); if (ext->alg == IW_ENCODE_ALG_WEP) { sec.encode_alg[idx] = SEC_ALG_WEP; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 4ca8212d4fa4..6ff2674f8466 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -751,9 +751,7 @@ il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb) static void il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb) { -#ifdef CONFIG_IWLEGACY_DEBUG struct il_rx_pkt *pkt = rxb_addr(rxb); -#endif D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status); } diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 0651a6a416d1..219fed91cac5 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -1430,10 +1430,8 @@ static void il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) { -#ifdef CONFIG_IWLEGACY_DEBUG struct il_rx_pkt *pkt = rxb_addr(rxb); struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; -#endif D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", scan_notif->scanned_channels, scan_notif->tsf_low, diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h index ea1b1bb7ddcb..40877ef1fbf2 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.h +++ b/drivers/net/wireless/intel/iwlegacy/common.h @@ -2937,7 +2937,7 @@ do { \ } while (0) #else -#define IL_DBG(level, fmt, args...) +#define IL_DBG(level, fmt, args...) no_printk(fmt, ##args) static inline void il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len) { diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 0a0e25a3c681..c2315dea9a23 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -9,7 +9,7 @@ #include "iwl-prph.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 62 +#define IWL_22000_UCODE_API_MAX 63 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -48,6 +48,10 @@ #define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0-" #define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-" #define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-" +#define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0-" +#define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0-" +#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-" +#define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-" #define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \ IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" @@ -91,6 +95,14 @@ IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode" #define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \ IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_BZ_A_HR_B_FW_PRE __stringify(api) ".ucode" +#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_BZ_A_GF_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \ + IWL_BZ_A_GF4_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BZ_A_MR_A_MODULE_FIRMWARE(api) \ + IWL_BZ_A_MR_A_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -357,13 +369,27 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { .umac_prph_offset = 0x300000 }; +const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .mq_rx_supported = true, + .use_tfh = true, + .rf_id = true, + .gen2 = true, + .integrated = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, +}; + const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203"; -const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz"; -const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz"; -const char iwl_ma_name[] = "Intel(R) Wi-Fi 6"; +const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; +const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; +const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; const char iwl_ax200_killer_1650w_name[] = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)"; @@ -373,6 +399,10 @@ const char iwl_ax201_killer_1650s_name[] = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)"; const char iwl_ax201_killer_1650i_name[] = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)"; +const char iwl_ax210_killer_1675w_name[] = + "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)"; +const char iwl_ax210_killer_1675x_name[] = + "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)"; const struct iwl_cfg iwl_qu_b0_hr1_b0 = { .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, @@ -578,7 +608,7 @@ const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg = { .num_rbds = IWL_NUM_RBDS_22000_HE, }; -const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = { +const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = { .name = "Intel(R) Wireless-AC 9560 160MHz", .fw_name_pre = IWL_SO_A_JF_B_FW_PRE, IWL_DEVICE_AX210, @@ -719,6 +749,34 @@ const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = { .num_rbds = IWL_NUM_RBDS_22000_HE, }; +const struct iwl_cfg iwl_cfg_bz_a0_hr_b0 = { + .fw_name_pre = IWL_BZ_A_HR_B_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bz_a0_gf_a0 = { + .fw_name_pre = IWL_BZ_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0 = { + .fw_name_pre = IWL_BZ_A_GF4_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = { + .fw_name_pre = IWL_BZ_A_MR_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); @@ -740,3 +798,7 @@ MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index c4164bf508e5..df1297358379 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -168,7 +168,7 @@ const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz"; const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz"; const char iwl9260_killer_1550_name[] = - "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)"; + "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz"; const char iwl9560_killer_1550i_name[] = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)"; const char iwl9560_killer_1550s_name[] = diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 82a4f7e8ba54..e31bba836c6f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2020 Intel Corporation + * Copyright (C) 2019-2021 Intel Corporation */ #include <linux/uuid.h> #include "iwl-drv.h" @@ -181,14 +181,13 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, /* * We need at least two packages, one for the revision and one * for the data itself. Also check that the revision is valid - * (i.e. it is an integer smaller than 2, as we currently support only - * 2 revisions). + * (i.e. it is an integer (each caller has to check by itself + * if the returned revision is supported)). */ if (data->type != ACPI_TYPE_PACKAGE || data->package.count < 2 || - data->package.elements[0].type != ACPI_TYPE_INTEGER || - data->package.elements[0].integer.value > 1) { - IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n"); + data->package.elements[0].type != ACPI_TYPE_INTEGER) { + IWL_DEBUG_DEV_RADIO(dev, "Invalid packages structure\n"); return ERR_PTR(-EINVAL); } @@ -696,3 +695,70 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, return 0; } IWL_EXPORT_SYMBOL(iwl_sar_geo_init); + +static u32 iwl_acpi_eval_dsm_func(struct device *dev, enum iwl_dsm_funcs_rev_0 eval_func) +{ + union acpi_object *obj; + u32 ret; + + obj = iwl_acpi_get_dsm_object(dev, 0, + eval_func, NULL, + &iwl_guid); + + if (IS_ERR(obj)) { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM func '%d': Got Error in obj = %ld\n", + eval_func, + PTR_ERR(obj)); + return 0; + } + + if (obj->type != ACPI_TYPE_INTEGER) { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM func '%d' did not return a valid object, type=%d\n", + eval_func, + obj->type); + ret = 0; + goto out; + } + + ret = obj->integer.value; + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM method evaluated: func='%d', ret=%d\n", + eval_func, + ret); +out: + ACPI_FREE(obj); + return ret; +} + +__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) +{ + u32 ret; + __le32 config_bitmap = 0; + + /* + ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2' + */ + ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_ENABLE_INDONESIA_5G2); + + if (ret == DSM_VALUE_INDONESIA_ENABLE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + + /* + ** Evaluate func 'DSM_FUNC_DISABLE_SRD' + */ + ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_DISABLE_SRD); + + if (ret == DSM_VALUE_SRD_PASSIVE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); + + else if (ret == DSM_VALUE_SRD_DISABLE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); + + return config_bitmap; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 030c50082568..d16e6ec08c9f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -53,8 +53,8 @@ #define ACPI_WGDS_TABLE_SIZE 3 -#define ACPI_PPAG_WIFI_DATA_SIZE ((IWL_NUM_CHAIN_LIMITS * \ - IWL_NUM_SUB_BANDS) + 2) +#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \ + IWL_NUM_SUB_BANDS_V1) + 2) #define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V2) + 2) @@ -77,6 +77,7 @@ enum iwl_dsm_funcs_rev_0 { DSM_FUNC_QUERY = 0, DSM_FUNC_DISABLE_SRD = 1, DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, + DSM_FUNC_11AX_ENABLEMENT = 6, }; enum iwl_dsm_values_srd { @@ -160,6 +161,8 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array, int *block_list_size); +__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt); + #else /* CONFIG_ACPI */ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) @@ -235,5 +238,11 @@ static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, { return -ENOENT; } + +static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) +{ + return 0; +} + #endif /* CONFIG_ACPI */ #endif /* __iwl_fw_acpi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index ceeef8749765..0e38eb1cd75d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -147,6 +147,10 @@ enum iwl_tof_mcsi_enable { * @IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL: retry on algorithm failure * is valid * @IWL_TOF_RESPONDER_CMD_VALID_STA_ID: station ID is valid + * @IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT: enable/disable NDP ranging support + * is valid + * @IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS: NDP parameters are valid + * @IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK: LMR feedback support is valid */ enum iwl_tof_responder_cmd_valid_field { IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0), @@ -162,6 +166,9 @@ enum iwl_tof_responder_cmd_valid_field { IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = BIT(10), IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = BIT(11), IWL_TOF_RESPONDER_CMD_VALID_STA_ID = BIT(12), + IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT = BIT(22), + IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS = BIT(23), + IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK = BIT(24), }; /** @@ -176,6 +183,9 @@ enum iwl_tof_responder_cmd_valid_field { * @IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT: fast algorithm support * @IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL: retry on algorithm fail * @IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT: TX antenna mask + * @IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT: support NDP ranging + * @IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK: request for LMR feedback if the + * initiator supports it */ enum iwl_tof_responder_cfg_flags { IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0), @@ -188,6 +198,8 @@ enum iwl_tof_responder_cfg_flags { IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9), IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10), IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK, + IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT = BIT(24), + IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK = BIT(25), }; /** @@ -226,7 +238,7 @@ struct iwl_tof_responder_config_cmd_v6 { } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */ /** - * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) + * struct iwl_tof_responder_config_cmd_v7 - ToF AP mode (for debug) * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field * @responder_cfg_flags: &iwl_tof_responder_cfg_flags * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. @@ -245,7 +257,7 @@ struct iwl_tof_responder_config_cmd_v6 { * @bssid: Current AP BSSID * @reserved2: reserved */ -struct iwl_tof_responder_config_cmd { +struct iwl_tof_responder_config_cmd_v7 { __le32 cmd_valid_fields; __le32 responder_cfg_flags; u8 format_bw; @@ -259,7 +271,56 @@ struct iwl_tof_responder_config_cmd { __le16 specific_calib; u8 bssid[ETH_ALEN]; __le16 reserved2; -} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */ +} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_7 */ + +#define IWL_RESPONDER_STS_POS 3 +#define IWL_RESPONDER_TOTAL_LTF_POS 6 + +/** + * struct iwl_tof_responder_config_cmd_v8 - ToF AP mode (for debug) + * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field + * @responder_cfg_flags: &iwl_tof_responder_cfg_flags + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @rate: current AP rate + * @channel_num: current AP Channel + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency, see iwl_mvm_get_ctrl_pos() + * @sta_id: index of the AP STA when in AP mode + * @reserved1: reserved + * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @common_calib: XVT: common calibration value + * @specific_calib: XVT: specific calibration value + * @bssid: Current AP BSSID + * @r2i_ndp_params: parameters for R2I NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: max number of total LTFs + * (&enum ieee80211_range_params_max_total_ltf) + * @i2r_ndp_params: parameters for I2R NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: max number of total LTFs + * (&enum ieee80211_range_params_max_total_ltf) + */ +struct iwl_tof_responder_config_cmd_v8 { + __le32 cmd_valid_fields; + __le32 responder_cfg_flags; + u8 format_bw; + u8 rate; + u8 channel_num; + u8 ctrl_ch_position; + u8 sta_id; + u8 reserved1; + __le16 toa_offset; + __le16 common_calib; + __le16 specific_calib; + u8 bssid[ETH_ALEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; +} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */ #define IWL_LCI_CIVIC_IE_MAX_SIZE 400 @@ -422,10 +483,12 @@ struct iwl_tof_range_req_ap_entry_v2 { * driver. * @IWL_INITIATOR_AP_FLAGS_NON_TB: Use non trigger based flow * @IWL_INITIATOR_AP_FLAGS_TB: Use trigger based flow - * @IWL_INITIATOR_AP_FLAGS_SECURED: request secured measurement + * @IWL_INITIATOR_AP_FLAGS_SECURED: request secure LTF measurement * @IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK: Send LMR feedback * @IWL_INITIATOR_AP_FLAGS_USE_CALIB: Use calibration values from the request * instead of fw internal values. + * @IWL_INITIATOR_AP_FLAGS_PMF: request to protect the negotiation and LMR + * frames with protected management frames. */ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1), @@ -440,6 +503,7 @@ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_SECURED = BIT(11), IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = BIT(12), IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13), + IWL_INITIATOR_AP_FLAGS_PMF = BIT(14), }; /** @@ -657,6 +721,79 @@ struct iwl_tof_range_req_ap_entry_v7 { u8 tx_pn[IEEE80211_CCMP_PN_LEN]; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */ +#define IWL_LOCATION_MAX_STS_POS 3 + +/** + * struct iwl_tof_range_req_ap_entry_v8 - AP configuration parameters + * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. + * @channel_num: AP Channel number + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency, see iwl_mvm_get_ctrl_pos(). + * @ftmr_max_retries: Max number of retries to send the FTMR in case of no + * reply from the AP. + * @bssid: AP's BSSID + * @burst_period: Recommended value to be sent to the AP. Measurement + * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0 + * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of + * the number of measurement iterations (min 2^0 = 1, max 2^14) + * @sta_id: the station id of the AP. Only relevant when associated to the AP, + * otherwise should be set to &IWL_MVM_INVALID_STA. + * @cipher: pairwise cipher suite for secured measurement. + * &enum iwl_location_cipher. + * @hltk: HLTK to be used for secured 11az measurement + * @tk: TK to be used for secured 11az measurement + * @calib: An array of calibration values per FTM rx bandwidth. + * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the + * calibration value that corresponds to the rx bandwidth of the FTM + * frame. + * @beacon_interval: beacon interval of the AP in TUs. Only required if + * &IWL_INITIATOR_AP_FLAGS_TB is set. + * @rx_pn: the next expected PN for protected management frames Rx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + * @tx_pn: the next PN to use for protected management frames Tx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: reserved + * @i2r_ndp_params: parameters for I2R NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: reserved + * @r2i_max_total_ltf: R2I Max Total LTFs for NDP ranging negotiation. + * One of &enum ieee80211_range_params_max_total_ltf. + * @i2r_max_total_ltf: I2R Max Total LTFs for NDP ranging negotiation. + * One of &enum ieee80211_range_params_max_total_ltf. + */ +struct iwl_tof_range_req_ap_entry_v8 { + __le32 initiator_ap_flags; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[ETH_ALEN]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[HLTK_11AZ_LEN]; + u8 tk[TK_11AZ_LEN]; + __le16 calib[IWL_TOF_BW_NUM]; + __le16 beacon_interval; + u8 rx_pn[IEEE80211_CCMP_PN_LEN]; + u8 tx_pn[IEEE80211_CCMP_PN_LEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + u8 r2i_max_total_ltf; + u8 i2r_max_total_ltf; +} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_8 */ + /** * enum iwl_tof_response_mode * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as @@ -878,6 +1015,34 @@ struct iwl_tof_range_req_cmd_v11 { struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */ +/** + * struct iwl_tof_range_req_cmd_v12 - start measurement cmd + * @initiator_flags: see flags @ iwl_tof_initiator_flags + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @range_req_bssid: ranging request BSSID + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @macaddr_template: MAC address template to use for non-randomized bits + * @req_timeout_ms: Requested timeout of the response in units of milliseconds. + * This is the session time for completing the measurement. + * @tsf_mac_id: report the measurement start time for each ap in terms of the + * TSF of this mac id. 0xff to disable TSF reporting. + * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. + */ +struct iwl_tof_range_req_cmd_v12 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + u8 macaddr_template[ETH_ALEN]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */ + /* * enum iwl_tof_range_request_status - status of the sent request * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index fbca9dd872e7..dc8f2777e944 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -414,6 +414,9 @@ enum iwl_lari_config_masks { LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3), }; +#define IWL_11AX_UKRAINE_MASK 3 +#define IWL_11AX_UKRAINE_SHIFT 8 + /** * struct iwl_lari_config_change_cmd_v1 - change LARI configuration * @config_bitmap: bit map of the config commands. each bit will trigger a @@ -435,6 +438,21 @@ struct iwl_lari_config_change_cmd_v2 { } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_2 */ /** + * struct iwl_lari_config_change_cmd_v3 - change LARI configuration + * @config_bitmap: bit map of the config commands. each bit will trigger a + * different predefined FW config operation + * @oem_uhb_allow_bitmap: bitmap of UHB enabled MCC sets + * @oem_11ax_allow_bitmap: bitmap of 11ax allowed MCCs. + * For each supported country, a pair of regulatory override bit and 11ax mode exist + * in the bit field. + */ +struct iwl_lari_config_change_cmd_v3 { + __le32 config_bitmap; + __le32 oem_uhb_allow_bitmap; + __le32 oem_11ax_allow_bitmap; +} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_3 */ + +/** * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete * @status: PNVM image loading status */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 798417182d54..86445385f072 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -274,7 +274,7 @@ enum iwl_dev_tx_power_cmd_mode { #define IWL_NUM_CHAIN_TABLES 1 #define IWL_NUM_CHAIN_TABLES_V2 2 #define IWL_NUM_CHAIN_LIMITS 2 -#define IWL_NUM_SUB_BANDS 5 +#define IWL_NUM_SUB_BANDS_V1 5 #define IWL_NUM_SUB_BANDS_V2 11 /** @@ -300,7 +300,7 @@ struct iwl_dev_tx_power_common { * @per_chain: per chain restrictions */ struct iwl_dev_tx_power_cmd_v3 { - __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; + __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; } __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ #define IWL_DEV_MAX_TX_POWER 0x7FFF @@ -313,7 +313,7 @@ struct iwl_dev_tx_power_cmd_v3 { * @reserved: reserved (padding) */ struct iwl_dev_tx_power_cmd_v4 { - __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; + __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; u8 enable_ack_reduction; u8 reserved[3]; } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ @@ -332,7 +332,7 @@ struct iwl_dev_tx_power_cmd_v4 { * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER */ struct iwl_dev_tx_power_cmd_v5 { - __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; + __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; u8 enable_ack_reduction; u8 per_chain_restriction_changed; u8 reserved[2]; @@ -454,21 +454,23 @@ struct iwl_geo_tx_power_profiles_resp { /** * union iwl_ppag_table_cmd - union for all versions of PPAG command - * @v1: version 1, table revision = 0 - * @v2: version 2, table revision = 1 + * @v1: version 1 + * @v2: version 2 * - * @enabled: 1 if PPAG is enabled, 0 otherwise + * @flags: bit 0 - indicates enablement of PPAG for ETSI + * bit 1 - indicates enablement of PPAG for CHINA BIOS + * bit 1 can be used only in v3 (identical to v2) * @gain: table of antenna gain values per chain and sub-band * @reserved: reserved */ union iwl_ppag_table_cmd { struct { - __le32 enabled; - s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; + __le32 flags; + s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; s8 reserved[2]; } v1; struct { - __le32 enabled; + __le32 flags; s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; s8 reserved[2]; } v2; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 2c74db823778..3f13b572915a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -780,36 +780,6 @@ struct iwl_rxq_sync_notification { } __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ /** - * enum iwl_mvm_rxq_notif_type - Internal message identifier - * - * @IWL_MVM_RXQ_EMPTY: empty sync notification - * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA - * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN - */ -enum iwl_mvm_rxq_notif_type { - IWL_MVM_RXQ_EMPTY, - IWL_MVM_RXQ_NOTIF_DEL_BA, - IWL_MVM_RXQ_NSSN_SYNC, -}; - -/** - * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent - * in &iwl_rxq_sync_cmd. Should be DWORD aligned. - * FW is agnostic to the payload, so there are no endianity requirements. - * - * @type: value from &iwl_mvm_rxq_notif_type - * @sync: ctrl path is waiting for all notifications to be received - * @cookie: internal cookie to identify old notifications - * @data: payload - */ -struct iwl_mvm_internal_rxq_notif { - u16 type; - u16 sync; - u32 cookie; - u8 data[]; -} __packed; - -/** * enum iwl_mvm_pm_event - type of station PM event * @IWL_MVM_PM_EVENT_AWAKE: station woke up * @IWL_MVM_PM_EVENT_ASLEEP: station went to sleep diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 6b8ca35cec1a..b2605aefc290 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -634,6 +634,12 @@ enum iwl_umac_scan_general_flags2 { * @IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN: at the end of 2.4GHz and * 5.2Ghz bands scan, trigger scan on 6GHz band to discover * the reported collocated APs + * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN: at the end of 2.4GHz and 5GHz + * bands scan, if not APs were discovered, allow scan to conitnue and scan + * 6GHz PSC channels in order to discover country information. + * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN: in case + * &IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN is enabled and scan is + * activated over 6GHz PSC channels, filter in beacons and probe responses. */ enum iwl_umac_scan_general_flags_v2 { IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0), @@ -649,6 +655,8 @@ enum iwl_umac_scan_general_flags_v2 { IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID = BIT(10), IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE = BIT(11), IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12), + IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN = BIT(13), + IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN = BIT(14), }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 504729663c35..cc4e18ca9566 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -2559,7 +2559,9 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, fwrt->dump.wks[idx].dump_data = *dump_data; - IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired.\n", tp_id); + IWL_WARN(fwrt, + "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n", + tp_id, (u32)(delay / USEC_PER_MSEC)); schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 35dffcaf5aba..f9c5cf538ad1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -362,6 +362,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames * @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in * reset flow + * @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC + * channels even when these are not enabled. * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -408,6 +410,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54, IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56, IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57, + IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58, /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 1dee4714e505..153a3529e77a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -116,6 +116,9 @@ struct fw_img { #define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0 #define PAGING_TLV_SECURE_MASK 1 +/* FW MSB Mask for regions/cache_control */ +#define FW_ADDR_CACHE_CONTROL 0xC0000000UL + /** * struct iwl_fw_paging * @fw_paging_phys: page phy pointer diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 986913f2fbd5..2ecec00db9da 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -10,6 +10,8 @@ #include "fw/api/soc.h" #include "fw/api/commands.h" +#include "fw/api/rx.h" +#include "fw/api/datapath.h" void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, const struct iwl_fw *fw, @@ -95,3 +97,60 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt) return ret; } IWL_EXPORT_SYMBOL(iwl_set_soc_latency); + +int iwl_configure_rxq(struct iwl_fw_runtime *fwrt) +{ + int i, num_queues, size, ret; + struct iwl_rfh_queue_config *cmd; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + + /* + * The default queue is configured via context info, so if we + * have a single queue, there's nothing to do here. + */ + if (fwrt->trans->num_rx_queues == 1) + return 0; + + if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000) + return 0; + + /* skip the default queue */ + num_queues = fwrt->trans->num_rx_queues - 1; + + size = struct_size(cmd, data, num_queues); + + cmd = kzalloc(size, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->num_queues = num_queues; + + for (i = 0; i < num_queues; i++) { + struct iwl_trans_rxq_dma_data data; + + cmd->data[i].q_num = i + 1; + iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data); + + cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb); + cmd->data[i].urbd_stts_wrptr = + cpu_to_le64(data.urbd_stts_wrptr); + cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb); + cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid); + } + + hcmd.data[0] = cmd; + hcmd.len[0] = size; + + ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); + + kfree(cmd); + + if (ret) + IWL_ERR(fwrt, "Failed to configure RX queues: %d\n", ret); + + return ret; +} +IWL_EXPORT_SYMBOL(iwl_configure_rxq); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 0dba5444f2db..35af85a5430b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -190,5 +190,6 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt); +int iwl_configure_rxq(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index c4f5da76f1c0..b35ffdfdf14b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -416,6 +416,7 @@ struct iwl_cfg { #define IWL_CFG_MAC_TYPE_SNJ 0x42 #define IWL_CFG_MAC_TYPE_SOF 0x43 #define IWL_CFG_MAC_TYPE_MA 0x44 +#define IWL_CFG_MAC_TYPE_BZ 0x46 #define IWL_CFG_RF_TYPE_TH 0x105 #define IWL_CFG_RF_TYPE_TH1 0x108 @@ -477,6 +478,7 @@ extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg; extern const char iwl9162_name[]; extern const char iwl9260_name[]; extern const char iwl9260_1_name[]; @@ -501,8 +503,10 @@ extern const char iwl_ax200_killer_1650w_name[]; extern const char iwl_ax200_killer_1650x_name[]; extern const char iwl_ax201_killer_1650s_name[]; extern const char iwl_ax201_killer_1650i_name[]; -extern const char iwl_ma_name[]; +extern const char iwl_ax210_killer_1675w_name[]; +extern const char iwl_ax210_killer_1675x_name[]; extern const char iwl_ax211_name[]; +extern const char iwl_ax221_name[]; extern const char iwl_ax411_name[]; #if IS_ENABLED(CONFIG_IWLDVM) extern const struct iwl_cfg iwl5300_agn_cfg; @@ -594,7 +598,7 @@ extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg; -extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0; +extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0; extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long; @@ -612,6 +616,10 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0; extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0; extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0; extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0; +extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0; +extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0; +extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0; +extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 6ccde7e30211..db312abd2e09 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -578,6 +578,9 @@ enum msix_fh_int_causes { MSIX_FH_INT_CAUSES_FH_ERR = BIT(21), }; +/* The low 16 bits are for rx data queue indication */ +#define MSIX_FH_INT_CAUSES_DATA_QUEUE 0xffff + /* * Causes for the HW register interrupts */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 579bc81cc0ae..4cd8c39cc3e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include <linux/firmware.h> #include "iwl-drv.h" @@ -426,7 +426,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) const struct firmware *fw; int res; - if (!iwlwifi_mod_params.enable_ini) + if (!iwlwifi_mod_params.enable_ini || + trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000) return; res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index eb168dc535d4..884750bf7840 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -550,8 +550,6 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, return 0; } -#define FW_ADDR_CACHE_CONTROL 0xC0000000 - static int iwl_parse_tlv_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index e6d2e0994317..cf9c64090014 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ @@ -176,6 +176,8 @@ iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state) static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) { + if (WARN_ON_ONCE(!op_mode)) + return; op_mode->ops->free_skb(op_mode, skb); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 60e0db4a5e20..9236f9106826 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2019-2020 Intel Corporation + * Copyright (C) 2019-2021 Intel Corporation */ #include <linux/kernel.h> #include <linux/bsearch.h> @@ -21,7 +21,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans *trans; - int txcmd_size, txcmd_align; #ifdef CONFIG_LOCKDEP static struct lock_class_key __key; #endif @@ -31,10 +30,40 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, return NULL; trans->trans_cfg = cfg_trans; - if (!cfg_trans->gen2) { + +#ifdef CONFIG_LOCKDEP + lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", + &__key, 0); +#endif + + trans->dev = dev; + trans->ops = ops; + trans->num_rx_queues = 1; + + WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty); + + if (trans->trans_cfg->use_tfh) { + trans->txqs.tfd.addr_size = 64; + trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; + trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); + } else { + trans->txqs.tfd.addr_size = 36; + trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; + trans->txqs.tfd.size = sizeof(struct iwl_tfd); + } + trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans); + + return trans; +} + +int iwl_trans_init(struct iwl_trans *trans) +{ + int txcmd_size, txcmd_align; + + if (!trans->trans_cfg->gen2) { txcmd_size = sizeof(struct iwl_tx_cmd); txcmd_align = sizeof(void *); - } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) { + } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { txcmd_size = sizeof(struct iwl_tx_cmd_gen2); txcmd_align = 64; } else { @@ -46,17 +75,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, txcmd_size += 36; /* biggest possible 802.11 header */ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ - if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align)) - return ERR_PTR(-EINVAL); - -#ifdef CONFIG_LOCKDEP - lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", - &__key, 0); -#endif - - trans->dev = dev; - trans->ops = ops; - trans->num_rx_queues = 1; + if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align)) + return -EINVAL; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl); @@ -68,23 +88,16 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, * allocate here. */ if (trans->trans_cfg->gen2) { - trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", dev, + trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev, trans->txqs.bc_tbl_size, 256, 0); if (!trans->txqs.bc_pool) - return NULL; + return -ENOMEM; } - if (trans->trans_cfg->use_tfh) { - trans->txqs.tfd.addr_size = 64; - trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; - trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); - } else { - trans->txqs.tfd.addr_size = 36; - trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; - trans->txqs.tfd.size = sizeof(struct iwl_tfd); - } - trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans); + /* Some things must not change even if the config does */ + WARN_ON(trans->txqs.tfd.addr_size != + (trans->trans_cfg->use_tfh ? 64 : 36)); snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), "iwl_cmd_pool:%s", dev_name(trans->dev)); @@ -93,35 +106,35 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, txcmd_size, txcmd_align, SLAB_HWCACHE_ALIGN, NULL); if (!trans->dev_cmd_pool) - return NULL; - - WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty); + return -ENOMEM; trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); if (!trans->txqs.tso_hdr_page) { kmem_cache_destroy(trans->dev_cmd_pool); - return NULL; + return -ENOMEM; } /* Initialize the wait queue for commands */ init_waitqueue_head(&trans->wait_command_queue); - return trans; + return 0; } void iwl_trans_free(struct iwl_trans *trans) { int i; - for_each_possible_cpu(i) { - struct iwl_tso_hdr_page *p = - per_cpu_ptr(trans->txqs.tso_hdr_page, i); + if (trans->txqs.tso_hdr_page) { + for_each_possible_cpu(i) { + struct iwl_tso_hdr_page *p = + per_cpu_ptr(trans->txqs.tso_hdr_page, i); - if (p->page) - __free_page(p->page); - } + if (p && p->page) + __free_page(p->page); + } - free_percpu(trans->txqs.tso_hdr_page); + free_percpu(trans->txqs.tso_hdr_page); + } kmem_cache_destroy(trans->dev_cmd_pool); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 4a5822c1be13..bf569f856ad8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1267,7 +1267,8 @@ static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty)) return -ENOTSUPP; - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { + /* No need to wait if the firmware is not alive */ + if (trans->state != IWL_TRANS_FW_ALIVE) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } @@ -1438,6 +1439,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, const struct iwl_trans_ops *ops, const struct iwl_cfg_trans_params *cfg_trans); +int iwl_trans_init(struct iwl_trans *trans); void iwl_trans_free(struct iwl_trans *trans); /***************************************************** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 617b41ee5801..1343f25f1090 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -93,6 +93,15 @@ #define IWL_MVM_ENABLE_EBS 1 #define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE #define IWL_MVM_FTM_INITIATOR_DYNACK true +#define IWL_MVM_FTM_R2I_MAX_REP 7 +#define IWL_MVM_FTM_I2R_MAX_REP 7 +#define IWL_MVM_FTM_R2I_MAX_STS 1 +#define IWL_MVM_FTM_I2R_MAX_STS 1 +#define IWL_MVM_FTM_R2I_MAX_TOTAL_LTF 3 +#define IWL_MVM_FTM_I2R_MAX_TOTAL_LTF 3 +#define IWL_MVM_FTM_INITIATOR_SECURE_LTF false +#define IWL_MVM_FTM_RESP_NDP_SUPPORT true +#define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 @@ -108,5 +117,7 @@ #define IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT 20016 #define IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC 2 #define IWL_MVM_DISABLE_AP_FILS false +#define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */ +#define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */ #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index a7dc85c704a9..2e28cf299ef4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -2028,6 +2028,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) mutex_lock(&mvm->mutex); + mvm->last_reset_or_resume_time_jiffies = jiffies; + /* get the BSS vif pointer again */ vif = iwl_mvm_get_bss_vif(mvm); if (IS_ERR_OR_NULL(vif)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 34ddef97b099..63d65018d098 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1210,10 +1210,10 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) return -EINVAL; - rcu_read_lock(); + mutex_lock(&mvm->mutex); for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { - vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, true); + vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false); if (!vif) continue; @@ -1253,18 +1253,16 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) &beacon_cmd.tim_size, beacon->data, beacon->len); - mutex_lock(&mvm->mutex); iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); mutex_unlock(&mvm->mutex); dev_kfree_skb(beacon); - rcu_read_unlock(); return 0; out_err: - rcu_read_unlock(); + mutex_unlock(&mvm->mutex); return -EINVAL; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index a4fd0bf9ba19..a456b8a0ae58 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -490,6 +490,15 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (vif->bss_conf.assoc && !memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_sta *sta; + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); + if (sta->mfp) + FTM_PUT_FLAG(PMF); + + rcu_read_unlock(); target->sta_id = mvmvif->ap_sta_id; } else { @@ -684,6 +693,19 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } } +static int +iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry_v7 *target) +{ + int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); + if (err) + return err; + + iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); + return err; +} + static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) @@ -704,11 +726,67 @@ static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i]; - err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); + err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target); + if (err) + return err; + } + + return iwl_mvm_ftm_send_cmd(mvm, &hcmd); +} + +static void +iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, + struct iwl_tof_range_req_ap_entry_v8 *target) +{ + /* Only 2 STS are supported on Tx */ + u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : + IWL_MVM_FTM_I2R_MAX_STS; + + target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | + (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS); + target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | + (i2r_max_sts << IWL_LOCATION_MAX_STS_POS); + target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF; + target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; +} + +static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *req) +{ + struct iwl_tof_range_req_cmd_v12 cmd; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .dataflags[0] = IWL_HCMD_DFL_DUP, + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + u8 i; + int err; + + iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); + + for (i = 0; i < cmd.num_of_ap; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; + u32 flags; + + err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); if (err) return err; - iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); + iwl_mvm_ftm_set_ndp_params(mvm, target); + + /* + * If secure LTF is turned off, replace the flag with PMF only + */ + flags = le32_to_cpu(target->initiator_ap_flags); + if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) && + !IWL_MVM_FTM_INITIATOR_SECURE_LTF) { + flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; + flags |= IWL_INITIATOR_AP_FLAGS_PMF; + target->initiator_ap_flags = cpu_to_le32(flags); + } } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); @@ -732,6 +810,9 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_FW_CMD_VER_UNKNOWN); switch (cmd_ver) { + case 12: + err = iwl_mvm_ftm_start_v12(mvm, vif, req); + break; case 11: err = iwl_mvm_ftm_start_v11(mvm, vif, req); break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index 996f45c19f10..5a249ea97eb2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -75,6 +75,24 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef, return 0; } +static void +iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm, + struct iwl_tof_responder_config_cmd_v8 *cmd) +{ + /* Up to 2 R2I STS are allowed on the responder */ + u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ? + IWL_MVM_FTM_R2I_MAX_STS : 1; + + cmd->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | + (r2i_max_sts << IWL_RESPONDER_STS_POS) | + (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS); + cmd->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | + (IWL_MVM_FTM_I2R_MAX_STS << IWL_RESPONDER_STS_POS) | + (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS); + cmd->cmd_valid_fields |= + cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS); +} + static int iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -82,11 +100,11 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* - * The command structure is the same for versions 6 and 7, (only the + * The command structure is the same for versions 6, 7 and 8 (only the * field interpretation is different), so the same struct can be use * for all cases. */ - struct iwl_tof_responder_config_cmd cmd = { + struct iwl_tof_responder_config_cmd_v8 cmd = { .channel_num = chandef->chan->hw_value, .cmd_valid_fields = cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO | @@ -100,7 +118,10 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - if (cmd_ver == 7) +if (cmd_ver == 8) + iwl_mvm_ftm_responder_set_ndp(mvm, &cmd); + + if (cmd_ver >= 7) err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw, &cmd.ctrl_ch_position); else diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 5ee64f7f3c85..8aa5f1a2c58c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -29,6 +29,9 @@ #define UCODE_VALID_OK cpu_to_le32(0x1) +#define IWL_PPAG_MASK 3 +#define IWL_PPAG_ETSI_MASK BIT(0) + struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; @@ -70,56 +73,6 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); } -static int iwl_configure_rxq(struct iwl_mvm *mvm) -{ - int i, num_queues, size, ret; - struct iwl_rfh_queue_config *cmd; - struct iwl_host_cmd hcmd = { - .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD), - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - }; - - /* - * The default queue is configured via context info, so if we - * have a single queue, there's nothing to do here. - */ - if (mvm->trans->num_rx_queues == 1) - return 0; - - /* skip the default queue */ - num_queues = mvm->trans->num_rx_queues - 1; - - size = struct_size(cmd, data, num_queues); - - cmd = kzalloc(size, GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->num_queues = num_queues; - - for (i = 0; i < num_queues; i++) { - struct iwl_trans_rxq_dma_data data; - - cmd->data[i].q_num = i + 1; - iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data); - - cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb); - cmd->data[i].urbd_stts_wrptr = - cpu_to_le64(data.urbd_stts_wrptr); - cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb); - cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid); - } - - hcmd.data[0] = cmd; - hcmd.len[0] = size; - - ret = iwl_mvm_send_cmd(mvm, &hcmd); - - kfree(cmd); - - return ret; -} - static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) { struct iwl_dqa_enable_cmd dqa_cmd = { @@ -233,7 +186,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, mvm->trans->dbg.lmac_error_event_table[1] = le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); - umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr); + umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & + ~FW_ADDR_CACHE_CONTROL; if (umac_error_table) { if (umac_error_table >= @@ -773,16 +727,16 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) } else if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) { len = sizeof(cmd.v5); - n_subbands = IWL_NUM_SUB_BANDS; + n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v5.per_chain[0][0]; } else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) { len = sizeof(cmd.v4); - n_subbands = IWL_NUM_SUB_BANDS; + n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v4.per_chain[0][0]; } else { len = sizeof(cmd.v3); - n_subbands = IWL_NUM_SUB_BANDS; + n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v3.per_chain[0][0]; } @@ -909,46 +863,50 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) { - union acpi_object *wifi_pkg, *data, *enabled; + union acpi_object *wifi_pkg, *data, *flags; int i, j, ret, tbl_rev, num_sub_bands; int idx = 2; s8 *gain; /* - * The 'enabled' field is the same in v1 and v2 so we can just + * The 'flags' field is the same in v1 and in v2 so we can just * use v1 to access it. */ - mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0); + mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); + data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD); if (IS_ERR(data)) return PTR_ERR(data); - /* try to read ppag table revision 1 */ + /* try to read ppag table rev 2 or 1 (both have the same data size) */ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev); if (!IS_ERR(wifi_pkg)) { - if (tbl_rev != 1) { + if (tbl_rev == 1 || tbl_rev == 2) { + num_sub_bands = IWL_NUM_SUB_BANDS_V2; + gain = mvm->fwrt.ppag_table.v2.gain[0]; + mvm->fwrt.ppag_ver = tbl_rev; + IWL_DEBUG_RADIO(mvm, + "Reading PPAG table v2 (tbl_rev=%d)\n", + tbl_rev); + goto read_table; + } else { ret = -EINVAL; goto out_free; } - num_sub_bands = IWL_NUM_SUB_BANDS_V2; - gain = mvm->fwrt.ppag_table.v2.gain[0]; - mvm->fwrt.ppag_ver = 2; - IWL_DEBUG_RADIO(mvm, "Reading PPAG table v2 (tbl_rev=1)\n"); - goto read_table; } /* try to read ppag table revision 0 */ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, - ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev); + ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { ret = -EINVAL; goto out_free; } - num_sub_bands = IWL_NUM_SUB_BANDS; + num_sub_bands = IWL_NUM_SUB_BANDS_V1; gain = mvm->fwrt.ppag_table.v1.gain[0]; - mvm->fwrt.ppag_ver = 1; + mvm->fwrt.ppag_ver = 0; IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n"); goto read_table; } @@ -956,15 +914,17 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) goto out_free; read_table: - enabled = &wifi_pkg->package.elements[1]; - if (enabled->type != ACPI_TYPE_INTEGER || - (enabled->integer.value != 0 && enabled->integer.value != 1)) { + flags = &wifi_pkg->package.elements[1]; + + if (flags->type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } - mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value); - if (!mvm->fwrt.ppag_table.v1.enabled) { + mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(flags->integer.value & + IWL_PPAG_MASK); + + if (!mvm->fwrt.ppag_table.v1.flags) { ret = 0; goto out_free; } @@ -992,12 +952,13 @@ read_table: (j != 0 && (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_HB || gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_HB))) { - mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0); + mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); ret = -EINVAL; goto out_free; } } } + ret = 0; out_free: kfree(data); @@ -1015,7 +976,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) "PPAG capability not supported by FW, command not sent.\n"); return 0; } - if (!mvm->fwrt.ppag_table.v1.enabled) { + if (!mvm->fwrt.ppag_table.v1.flags) { IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n"); return 0; } @@ -1024,20 +985,28 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) PER_PLATFORM_ANT_GAIN_CMD, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == 1) { - num_sub_bands = IWL_NUM_SUB_BANDS; + num_sub_bands = IWL_NUM_SUB_BANDS_V1; gain = mvm->fwrt.ppag_table.v1.gain[0]; cmd_size = sizeof(mvm->fwrt.ppag_table.v1); - if (mvm->fwrt.ppag_ver == 2) { + if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) { IWL_DEBUG_RADIO(mvm, - "PPAG table is v2 but FW supports v1, sending truncated table\n"); + "PPAG table rev is %d but FW supports v1, sending truncated table\n", + mvm->fwrt.ppag_ver); + mvm->fwrt.ppag_table.v1.flags &= + cpu_to_le32(IWL_PPAG_ETSI_MASK); } - } else if (cmd_ver == 2) { + } else if (cmd_ver == 2 || cmd_ver == 3) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; gain = mvm->fwrt.ppag_table.v2.gain[0]; cmd_size = sizeof(mvm->fwrt.ppag_table.v2); - if (mvm->fwrt.ppag_ver == 1) { + if (mvm->fwrt.ppag_ver == 0) { IWL_DEBUG_RADIO(mvm, "PPAG table is v1 but FW supports v2, sending padded table\n"); + } else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) { + IWL_DEBUG_RADIO(mvm, + "PPAG table is v3 but FW supports v2, sending partial bitmap.\n"); + mvm->fwrt.ppag_table.v1.flags &= + cpu_to_le32(IWL_PPAG_ETSI_MASK); } } else { IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n"); @@ -1102,7 +1071,7 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "System vendor '%s' is not in the approved list, disabling PPAG.\n", dmi_get_system_info(DMI_SYS_VENDOR)); - mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0); + mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); return 0; } @@ -1144,33 +1113,6 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); } -static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm) -{ - u8 value; - - int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, - DSM_FUNC_ENABLE_INDONESIA_5G2, - &iwl_guid, &value); - - if (ret < 0) - IWL_DEBUG_RADIO(mvm, - "Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n", - ret); - - else if (value >= DSM_VALUE_INDONESIA_MAX) - IWL_DEBUG_RADIO(mvm, - "DSM function ENABLE_INDONESIA_5G2 return invalid value, value=%d\n", - value); - - else if (value == DSM_VALUE_INDONESIA_ENABLE) { - IWL_DEBUG_RADIO(mvm, - "Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n"); - return DSM_VALUE_INDONESIA_ENABLE; - } - /* default behaviour is disabled */ - return DSM_VALUE_INDONESIA_DISABLE; -} - static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) { u8 value; @@ -1195,64 +1137,27 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) return DSM_VALUE_RFI_DISABLE; } -static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm) -{ - u8 value; - int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, - DSM_FUNC_DISABLE_SRD, - &iwl_guid, &value); - - if (ret < 0) - IWL_DEBUG_RADIO(mvm, - "Failed to evaluate DSM function DISABLE_SRD, ret=%d\n", - ret); - - else if (value >= DSM_VALUE_SRD_MAX) - IWL_DEBUG_RADIO(mvm, - "DSM function DISABLE_SRD return invalid value, value=%d\n", - value); - - else if (value == DSM_VALUE_SRD_PASSIVE) { - IWL_DEBUG_RADIO(mvm, - "Evaluated DSM function DISABLE_SRD: setting SRD to passive\n"); - return DSM_VALUE_SRD_PASSIVE; - - } else if (value == DSM_VALUE_SRD_DISABLE) { - IWL_DEBUG_RADIO(mvm, - "Evaluated DSM function DISABLE_SRD: disabling SRD\n"); - return DSM_VALUE_SRD_DISABLE; - } - /* default behaviour is active */ - return DSM_VALUE_SRD_ACTIVE; -} - static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { - u8 ret; int cmd_ret; - struct iwl_lari_config_change_cmd_v2 cmd = {}; - - if (iwl_mvm_eval_dsm_indonesia_5g2(mvm) == DSM_VALUE_INDONESIA_ENABLE) - cmd.config_bitmap |= - cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + struct iwl_lari_config_change_cmd_v3 cmd = {}; - ret = iwl_mvm_eval_dsm_disable_srd(mvm); - if (ret == DSM_VALUE_SRD_PASSIVE) - cmd.config_bitmap |= - cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); - - else if (ret == DSM_VALUE_SRD_DISABLE) - cmd.config_bitmap |= - cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); + cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt); /* apply more config masks here */ if (cmd.config_bitmap) { - size_t cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw, - REGULATORY_AND_NVM_GROUP, - LARI_CONFIG_CHANGE, 1) == 2 ? - sizeof(struct iwl_lari_config_change_cmd_v2) : - sizeof(struct iwl_lari_config_change_cmd_v1); + size_t cmd_size; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + REGULATORY_AND_NVM_GROUP, + LARI_CONFIG_CHANGE, 1); + if (cmd_ver == 3) + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); + else if (cmd_ver == 2) + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); + else + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); + IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n", le32_to_cpu(cmd.config_bitmap)); @@ -1485,14 +1390,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) } /* Init RSS configuration */ - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { - ret = iwl_configure_rxq(mvm); - if (ret) { - IWL_ERR(mvm, "Failed to configure RX queues: %d\n", - ret); - goto error; - } - } + ret = iwl_configure_rxq(&mvm->fwrt); + if (ret) + goto error; if (iwl_mvm_has_new_rx_api(mvm)) { ret = iwl_send_rss_cfg_cmd(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index baf7404c137d..607d5d564928 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1099,6 +1099,8 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, NULL); + mvm->last_reset_or_resume_time_jiffies = jiffies; + if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* Something went wrong - we need to finish some cleanup * that normally iwl_mvm_mac_restart_complete() below @@ -4610,6 +4612,16 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, break; case NL80211_IFTYPE_STATION: + /* + * We haven't configured the firmware to be associated yet since + * we don't know the dtim period. In this case, the firmware can't + * track the beacons. + */ + if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period) { + ret = -EBUSY; + goto out_unlock; + } + if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) schedule_delayed_work(&mvmvif->csa_work, 0); @@ -5134,28 +5146,50 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, } void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, - struct iwl_mvm_internal_rxq_notif *notif, - u32 size) + enum iwl_mvm_rxq_notif_type type, + bool sync, + const void *data, u32 size) { - u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; + struct { + struct iwl_rxq_sync_cmd cmd; + struct iwl_mvm_internal_rxq_notif notif; + } __packed cmd = { + .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1), + .cmd.count = + cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) + + size), + .notif.type = type, + .notif.sync = sync, + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), + .data[0] = &cmd, + .len[0] = sizeof(cmd), + .data[1] = data, + .len[1] = size, + .flags = sync ? 0 : CMD_ASYNC, + }; int ret; + /* size must be a multiple of DWORD */ + if (WARN_ON(cmd.cmd.count & cpu_to_le32(3))) + return; if (!iwl_mvm_has_new_rx_api(mvm)) return; - if (notif->sync) { - notif->cookie = mvm->queue_sync_cookie; + if (sync) { + cmd.notif.cookie = mvm->queue_sync_cookie; mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1; } - ret = iwl_mvm_notify_rx_queue(mvm, qmask, notif, size, !notif->sync); + ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) { IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); goto out; } - if (notif->sync) { + if (sync) { lockdep_assert_held(&mvm->mutex); ret = wait_event_timeout(mvm->rx_sync_waitq, READ_ONCE(mvm->queue_sync_state) == 0 || @@ -5167,21 +5201,18 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, } out: - mvm->queue_sync_state = 0; - if (notif->sync) + if (sync) { + mvm->queue_sync_state = 0; mvm->queue_sync_cookie++; + } } static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_internal_rxq_notif data = { - .type = IWL_MVM_RXQ_EMPTY, - .sync = 1, - }; mutex_lock(&mvm->mutex); - iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data)); + iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); mutex_unlock(&mvm->mutex); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 0a963d01b825..4d9d4d6892fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -591,7 +591,6 @@ struct iwl_mvm_tcm { enum iwl_mvm_traffic_load global_load; bool low_latency[NUM_MAC_INDEX_DRIVER]; bool change[NUM_MAC_INDEX_DRIVER]; - bool global_change; } result; }; @@ -1096,6 +1095,9 @@ struct iwl_mvm { /* sniffer data to include in radiotap */ __le16 cur_aid; u8 cur_bssid[ETH_ALEN]; + + unsigned long last_6ghz_passive_scan_jiffies; + unsigned long last_reset_or_resume_time_jiffies; }; /* Extract MVM priv from op_mode and _hw */ @@ -1570,9 +1572,6 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); -int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, - const struct iwl_mvm_internal_rxq_notif *notif, - u32 notif_size, bool async); void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); @@ -2001,8 +2000,9 @@ void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, - struct iwl_mvm_internal_rxq_notif *notif, - u32 size); + enum iwl_mvm_rxq_notif_type type, + bool sync, + const void *data, u32 size); void iwl_mvm_reorder_timer_expired(struct timer_list *t); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 8772b65c9dab..2d58cb969918 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include "rs.h" #include "fw-api.h" @@ -72,19 +72,15 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, bool vht_ena = vht_cap->vht_supported; u16 flags = 0; + /* get STBC flags */ if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) { - if (he_cap->has_he) { - if (he_cap->he_cap_elem.phy_cap_info[2] & - IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) - flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; - - if (he_cap->he_cap_elem.phy_cap_info[7] & - IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ) - flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK; - } else if ((ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) || - (vht_ena && - (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))) + if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] & + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) + flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; + else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) + flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; + else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 91b6541d579f..b97708cb869d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * - * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2014, 2018 - 2021 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * @@ -1926,9 +1926,7 @@ static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (is_ht(rate)) return index == IWL_RATE_MCS_7_INDEX; if (is_vht(rate)) - return index == IWL_RATE_MCS_7_INDEX || - index == IWL_RATE_MCS_8_INDEX || - index == IWL_RATE_MCS_9_INDEX; + return index == IWL_RATE_MCS_9_INDEX; WARN_ON_ONCE(1); return false; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index af5a6dd81c41..8e26422ca326 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -527,37 +527,6 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, return false; } -int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, - const struct iwl_mvm_internal_rxq_notif *notif, - u32 notif_size, bool async) -{ - u8 buf[sizeof(struct iwl_rxq_sync_cmd) + - sizeof(struct iwl_mvm_rss_sync_notif)]; - struct iwl_rxq_sync_cmd *cmd = (void *)buf; - u32 data_size = sizeof(*cmd) + notif_size; - int ret; - - /* - * size must be a multiple of DWORD - * Ensure we don't overflow buf - */ - if (WARN_ON(notif_size & 3 || - notif_size > sizeof(struct iwl_mvm_rss_sync_notif))) - return -EINVAL; - - cmd->rxq_mask = cpu_to_le32(rxq_mask); - cmd->count = cpu_to_le32(notif_size); - cmd->flags = 0; - memcpy(cmd->payload, notif, notif_size); - - ret = iwl_mvm_send_cmd_pdu(mvm, - WIDE_ID(DATA_PATH_GROUP, - TRIGGER_RX_QUEUES_NOTIF_CMD), - async ? CMD_ASYNC : 0, data_size, cmd); - - return ret; -} - /* * Returns true if sn2 - buffer_size < sn1 < sn2. * To be used only in order to compare reorder buffer head with NSSN. @@ -573,15 +542,13 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) { if (IWL_MVM_USE_NSSN_SYNC) { - struct iwl_mvm_rss_sync_notif notif = { - .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, - .metadata.sync = 0, - .nssn_sync.baid = baid, - .nssn_sync.nssn = nssn, + struct iwl_mvm_nssn_sync_data notif = { + .baid = baid, + .nssn = nssn, }; - iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, - sizeof(notif)); + iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NSSN_SYNC, false, + ¬if, sizeof(notif)); } } @@ -830,8 +797,7 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, "invalid notification size %d (%d)", len, (int)(sizeof(*notif) + sizeof(*internal_notif)))) return; - /* remove only the firmware header, we want all of our payload below */ - len -= sizeof(*notif); + len -= sizeof(*notif) + sizeof(*internal_notif); if (internal_notif->sync && mvm->queue_sync_cookie != internal_notif->cookie) { @@ -841,21 +807,19 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, switch (internal_notif->type) { case IWL_MVM_RXQ_EMPTY: - WARN_ONCE(len != sizeof(*internal_notif), - "invalid empty notification size %d (%d)", - len, (int)sizeof(*internal_notif)); + WARN_ONCE(len, "invalid empty notification size %d", len); break; case IWL_MVM_RXQ_NOTIF_DEL_BA: - if (WARN_ONCE(len != sizeof(struct iwl_mvm_rss_sync_notif), + if (WARN_ONCE(len != sizeof(struct iwl_mvm_delba_data), "invalid delba notification size %d (%d)", - len, (int)sizeof(struct iwl_mvm_rss_sync_notif))) + len, (int)sizeof(struct iwl_mvm_delba_data))) break; iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); break; case IWL_MVM_RXQ_NSSN_SYNC: - if (WARN_ONCE(len != sizeof(struct iwl_mvm_rss_sync_notif), + if (WARN_ONCE(len != sizeof(struct iwl_mvm_nssn_sync_data), "invalid nssn sync notification size %d (%d)", - len, (int)sizeof(struct iwl_mvm_rss_sync_notif))) + len, (int)sizeof(struct iwl_mvm_nssn_sync_data))) break; iwl_mvm_nssn_sync(mvm, napi, queue, (void *)internal_notif->data); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index caf87f320094..5a0696c44f6d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -43,6 +43,9 @@ /* adaptive dwell number of APs override for social channels */ #define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2 +/* minimal number of 2GHz and 5GHz channels in the regular scan request */ +#define IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS 4 + struct iwl_mvm_scan_timing_params { u32 suspend_time; u32 max_out_time; @@ -94,6 +97,7 @@ struct iwl_mvm_scan_params { struct cfg80211_scan_6ghz_params *scan_6ghz_params; u32 n_6ghz_params; bool scan_6ghz; + bool enable_6ghz_passive; }; static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) @@ -1873,6 +1877,98 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm, return flags; } +static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params, + struct ieee80211_vif *vif) +{ + struct ieee80211_supported_band *sband = + &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; + u32 n_disabled, i; + + params->enable_6ghz_passive = false; + + if (params->scan_6ghz) + return; + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN)) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: Not supported by FW\n"); + return; + } + + /* 6GHz passive scan allowed only on station interface */ + if (vif->type != NL80211_IFTYPE_STATION) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: not station interface\n"); + return; + } + + /* + * 6GHz passive scan is allowed while associated in a defined time + * interval following HW reset or resume flow + */ + if (vif->bss_conf.assoc && + (time_before(mvm->last_reset_or_resume_time_jiffies + + (IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ), + jiffies))) { + IWL_DEBUG_SCAN(mvm, "6GHz passive scan: associated\n"); + return; + } + + /* No need for 6GHz passive scan if not enough time elapsed */ + if (time_after(mvm->last_6ghz_passive_scan_jiffies + + (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: timeout did not expire\n"); + return; + } + + /* not enough channels in the regular scan request */ + if (params->n_channels < IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: not enough channels\n"); + return; + } + + for (i = 0; i < params->n_ssids; i++) { + if (!params->ssids[i].ssid_len) + break; + } + + /* not a wildcard scan, so cannot enable passive 6GHz scan */ + if (i == params->n_ssids) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: no wildcard SSID\n"); + return; + } + + if (!sband || !sband->n_channels) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: no 6GHz channels\n"); + return; + } + + for (i = 0, n_disabled = 0; i < sband->n_channels; i++) { + if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED)) + n_disabled++; + } + + /* + * Not all the 6GHz channels are disabled, so no need for 6GHz passive + * scan + */ + if (n_disabled != sband->n_channels) { + IWL_DEBUG_SCAN(mvm, + "6GHz passive scan: 6GHz channels enabled\n"); + return; + } + + /* all conditions to enable 6ghz passive scan are satisfied */ + IWL_DEBUG_SCAN(mvm, "6GHz passive scan: can be enabled\n"); + params->enable_6ghz_passive = true; +} + static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif, @@ -1911,6 +2007,9 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN; + if (params->enable_6ghz_passive) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN; + return flags; } @@ -2183,6 +2282,30 @@ iwl_mvm_scan_umac_fill_ch_p_v6(struct iwl_mvm *mvm, params->n_channels, channel_cfg_flags, vif->type); + + if (params->enable_6ghz_passive) { + struct ieee80211_supported_band *sband = + &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; + u32 i; + + for (i = 0; i < sband->n_channels; i++) { + struct ieee80211_channel *channel = + &sband->channels[i]; + + struct iwl_scan_channel_cfg_umac *cfg = + &cp->channel_config[cp->count]; + + if (!cfg80211_channel_is_psc(channel)) + continue; + + cfg->flags = 0; + cfg->v2.channel_num = channel->hw_value; + cfg->v2.band = PHY_BAND_6; + cfg->v2.iter_count = 1; + cfg->v2.iter_interval = 0; + cp->count++; + } + } } static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -2500,6 +2623,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); + iwl_mvm_scan_6ghz_passive_scan(mvm, ¶ms, vif); + uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, IWL_MVM_SCAN_REGULAR); @@ -2524,6 +2649,9 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm->scan_status |= IWL_MVM_SCAN_REGULAR; mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif); + if (params.enable_6ghz_passive) + mvm->last_6ghz_passive_scan_jiffies = jiffies; + schedule_delayed_work(&mvm->scan_timeout_dwork, msecs_to_jiffies(SCAN_TIMEOUT)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 3a411bbda5fd..f618368eda83 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2441,12 +2441,12 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) { - struct iwl_mvm_rss_sync_notif notif = { - .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, - .metadata.sync = 1, - .delba.baid = baid, + struct iwl_mvm_delba_data notif = { + .baid = baid, }; - iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); + + iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true, + ¬if, sizeof(notif)); }; static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 35a18b96aac5..32b4d1935788 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -281,6 +281,36 @@ struct iwl_mvm_key_pn { } ____cacheline_aligned_in_smp q[]; }; +/** + * enum iwl_mvm_rxq_notif_type - Internal message identifier + * + * @IWL_MVM_RXQ_EMPTY: empty sync notification + * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA + * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN + */ +enum iwl_mvm_rxq_notif_type { + IWL_MVM_RXQ_EMPTY, + IWL_MVM_RXQ_NOTIF_DEL_BA, + IWL_MVM_RXQ_NSSN_SYNC, +}; + +/** + * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent + * in &iwl_rxq_sync_cmd. Should be DWORD aligned. + * FW is agnostic to the payload, so there are no endianity requirements. + * + * @type: value from &iwl_mvm_rxq_notif_type + * @sync: ctrl path is waiting for all notifications to be received + * @cookie: internal cookie to identify old notifications + * @data: payload + */ +struct iwl_mvm_internal_rxq_notif { + u16 type; + u16 sync; + u32 cookie; + u8 data[]; +} __packed; + struct iwl_mvm_delba_data { u32 baid; } __packed; @@ -290,14 +320,6 @@ struct iwl_mvm_nssn_sync_data { u32 nssn; } __packed; -struct iwl_mvm_rss_sync_notif { - struct iwl_mvm_internal_rxq_notif metadata; - union { - struct iwl_mvm_delba_data delba; - struct iwl_mvm_nssn_sync_data nssn_sync; - }; -} __packed; - /** * struct iwl_mvm_rxq_dup_data - per station per rx queue data * @last_seq: last sequence per tid for duplicate packet detection diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 0b012f8c9eb2..83342a6a6d5b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -151,6 +151,16 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, if (errmsg) IWL_ERR(mvm, "%s\n", errmsg); + if (mvmvif->csa_bcn_pending) { + struct iwl_mvm_sta *mvmsta; + + rcu_read_lock(); + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); + if (!WARN_ON(!mvmsta)) + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); + rcu_read_unlock(); + } + iwl_mvm_connection_loss(mvm, vif, errmsg); return true; } @@ -285,6 +295,17 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, break; case NL80211_IFTYPE_STATION: /* + * If we are switching channel, don't disconnect + * if the time event is already done. Beacons can + * be delayed a bit after the switch. + */ + if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { + IWL_DEBUG_TE(mvm, + "No beacon heard and the CS time event is over, don't disconnect\n"); + break; + } + + /* * By now, we should have finished association * and know the dtim period. */ @@ -713,8 +734,8 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, sizeof(time_cmd), &time_cmd); - if (WARN_ON(ret)) - return; + if (ret) + IWL_ERR(mvm, "Couldn't remove the time event\n"); } /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index b6b481ff1518..c566be99a4c7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1030,15 +1030,9 @@ iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed) return IWL_MVM_TRAFFIC_LOW; } -struct iwl_mvm_tcm_iter_data { - struct iwl_mvm *mvm; - bool any_sent; -}; - static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { - struct iwl_mvm_tcm_iter_data *data = _data; - struct iwl_mvm *mvm = data->mvm; + struct iwl_mvm *mvm = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC; @@ -1060,22 +1054,15 @@ static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) } else { iwl_mvm_update_quotas(mvm, false, NULL); } - - data->any_sent = true; } static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) { - struct iwl_mvm_tcm_iter_data data = { - .mvm = mvm, - .any_sent = false, - }; - mutex_lock(&mvm->mutex); ieee80211_iterate_active_interfaces( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_tcm_iter, &data); + iwl_mvm_tcm_iter, mvm); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); @@ -1257,7 +1244,6 @@ static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, } load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed); - mvm->tcm.result.global_change = load != mvm->tcm.result.global_load; mvm->tcm.result.global_load = load; for (i = 0; i < NUM_NL80211_BANDS; i++) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 558a0b2ef0fc..d94bd8d732e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -17,10 +17,20 @@ #include "iwl-prph.h" #include "internal.h" +#define TRANS_CFG_MARKER BIT(0) +#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg), \ + struct _struct) +extern int _invalid_type; +#define _TRANS_CFG_MARKER(cfg) \ + (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params), \ + TRANS_CFG_MARKER, \ + __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type))) +#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg)) + #define IWL_PCI_DEVICE(dev, subdev, cfg) \ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ - .driver_data = (kernel_ulong_t)&(cfg) + .driver_data = _ASSIGN_CFG(cfg) /* Hardware specific file defines the PCI IDs table for that hardware module */ static const struct pci_device_id iwl_hw_card_ids[] = { @@ -490,6 +500,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)}, {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, +/* Bz devices */ + {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, #endif /* CONFIG_IWLMVM */ {0} @@ -607,6 +619,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0, NULL), + IWL_DEV_INFO(0x2725, 0x1673, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675w_name), + IWL_DEV_INFO(0x2725, 0x1674, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675x_name), IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL), @@ -1014,12 +1028,12 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_ma_a0_mr_a0, iwl_ma_name), + iwl_cfg_ma_a0_mr_a0, iwl_ax221_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_snj_a0_mr_a0, iwl_ma_name), + iwl_cfg_snj_a0_mr_a0, iwl_ax221_name), /* So with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, @@ -1067,6 +1081,35 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), +/* Bz */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_bz_a0_hr_b0, iwl_ax201_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_bz_a0_gf_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, + iwl_cfg_bz_a0_gf4_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_bz_a0_mr_a0, iwl_ax211_name), + +/* So with GF */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name) + #endif /* CONFIG_IWLMVM */ }; @@ -1075,19 +1118,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - const struct iwl_cfg_trans_params *trans = - (struct iwl_cfg_trans_params *)(ent->driver_data); + const struct iwl_cfg_trans_params *trans; const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; struct iwl_trans *iwl_trans; struct iwl_trans_pcie *trans_pcie; int i, ret; + const struct iwl_cfg *cfg; + + trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); + /* * This is needed for backwards compatibility with the old * tables, so we don't need to change all the config structs * at the same time. The cfg is used to compare with the old * full cfg structs. */ - const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); /* make sure trans is the first element in iwl_cfg */ BUILD_BUG_ON(offsetof(struct iwl_cfg, trans)); @@ -1165,7 +1211,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { - iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_a0; + iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) { iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0; @@ -1202,11 +1248,19 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #endif /* - * If we didn't set the cfg yet, assume the trans is actually - * a full cfg from the old tables. + * If we didn't set the cfg yet, the PCI ID table entry should have + * been a full config - if yes, use it, otherwise fail. */ - if (!iwl_trans->cfg) + if (!iwl_trans->cfg) { + if (ent->driver_data & TRANS_CFG_MARKER) { + pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", + pdev->device, pdev->subsystem_device, + iwl_trans->hw_rev, iwl_trans->hw_rf_id); + ret = -EINVAL; + goto out_free_trans; + } iwl_trans->cfg = cfg; + } /* if we don't have a name yet, copy name from the old cfg */ if (!iwl_trans->name) @@ -1222,6 +1276,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) trans_pcie->num_rx_bufs = RX_QUEUE_SIZE; } + ret = iwl_trans_init(iwl_trans); + if (ret) + goto out_free_trans; + pci_set_drvdata(pdev, iwl_trans); iwl_trans->drv = iwl_drv_start(iwl_trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index d9688c7bed07..76a512cd2e5c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -447,6 +447,11 @@ struct iwl_trans const struct iwl_cfg_trans_params *cfg_trans); void iwl_trans_pcie_free(struct iwl_trans *trans); +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); +#define _iwl_trans_pcie_grab_nic_access(trans) \ + __cond_lock(nic_access_nobh, \ + likely(__iwl_trans_pcie_grab_nic_access(trans))) + /***************************************************** * RX ******************************************************/ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 2bec97133119..fb8491412be4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2020 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1023,6 +1023,9 @@ static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) ret = iwl_pcie_rx_handle(trans, rxq->id, budget); + IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", + rxq->id, ret, budget); + if (ret < budget) { spin_lock(&trans_pcie->irq_lock); if (test_bit(STATUS_INT_ENABLED, &trans->status)) @@ -1046,33 +1049,19 @@ static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget) trans = trans_pcie->trans; ret = iwl_pcie_rx_handle(trans, rxq->id, budget); + IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret, + budget); if (ret < budget) { - spin_lock(&trans_pcie->irq_lock); - iwl_pcie_clear_irq(trans, rxq->id); - spin_unlock(&trans_pcie->irq_lock); + int irq_line = rxq->id; - napi_complete_done(&rxq->napi, ret); - } + /* FIRST_RSS is shared with line 0 */ + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS && + rxq->id == 1) + irq_line = 0; - return ret; -} - -static int iwl_pcie_napi_poll_msix_shared(struct napi_struct *napi, int budget) -{ - struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); - struct iwl_trans_pcie *trans_pcie; - struct iwl_trans *trans; - int ret; - - trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); - trans = trans_pcie->trans; - - ret = iwl_pcie_rx_handle(trans, rxq->id, budget); - - if (ret < budget) { spin_lock(&trans_pcie->irq_lock); - iwl_pcie_clear_irq(trans, 0); + iwl_pcie_clear_irq(trans, irq_line); spin_unlock(&trans_pcie->irq_lock); napi_complete_done(&rxq->napi, ret); @@ -1134,18 +1123,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) if (!rxq->napi.poll) { int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll; - if (trans_pcie->msix_enabled) { + if (trans_pcie->msix_enabled) poll = iwl_pcie_napi_poll_msix; - if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX && - i == 0) - poll = iwl_pcie_napi_poll_msix_shared; - - if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS && - i == 1) - poll = iwl_pcie_napi_poll_msix_shared; - } - netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, poll, NAPI_POLL_WEIGHT); napi_enable(&rxq->napi); @@ -1659,10 +1639,13 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) if (WARN_ON(entry->entry >= trans->num_rx_queues)) return IRQ_NONE; - if (WARN_ONCE(!rxq, "Got MSI-X interrupt before we have Rx queues")) + if (WARN_ONCE(!rxq, + "[%d] Got MSI-X interrupt before we have Rx queues", + entry->entry)) return IRQ_NONE; lock_map_acquire(&trans->sync_cmd_lockdep_map); + IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); local_bh_disable(); if (napi_schedule_prep(&rxq->napi)) @@ -2194,9 +2177,16 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans *trans = trans_pcie->trans; struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE; u32 inta_fh, inta_hw; bool polling = false; + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) + inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0; + + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) + inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1; + lock_map_acquire(&trans->sync_cmd_lockdep_map); spin_lock_bh(&trans_pcie->irq_lock); @@ -2205,7 +2195,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) /* * Clear causes registers to avoid being handling the same cause. */ - iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); + iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk); iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); spin_unlock_bh(&trans_pcie->irq_lock); @@ -2219,8 +2209,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (iwl_have_debug_level(IWL_DL_ISR)) { IWL_DEBUG_ISR(trans, - "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", - inta_fh, trans_pcie->fh_mask, + "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", + entry->entry, inta_fh, trans_pcie->fh_mask, iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); if (inta_fh & ~trans_pcie->fh_mask) IWL_DEBUG_ISR(trans, @@ -2275,8 +2265,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) /* After checking FH register check HW register */ if (iwl_have_debug_level(IWL_DL_ISR)) { IWL_DEBUG_ISR(trans, - "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", - inta_hw, trans_pcie->hw_mask, + "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", + entry->entry, inta_hw, trans_pcie->hw_mask, iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); if (inta_hw & ~trans_pcie->hw_mask) IWL_DEBUG_ISR(trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 94ffc1ae484d..1bcd36e9e008 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include "iwl-trans.h" #include "iwl-prph.h" @@ -108,8 +108,8 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) ret = wait_event_timeout(trans_pcie->fw_reset_waitq, trans_pcie->fw_reset_done, FW_RESET_TIMEOUT); if (!ret) - IWL_ERR(trans, - "firmware didn't ACK the reset - continue anyway\n"); + IWL_INFO(trans, + "firmware didn't ACK the reset - continue anyway\n"); } void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) @@ -143,7 +143,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); - iwl_txq_gen2_tx_stop(trans); + iwl_txq_gen2_tx_free(trans); iwl_pcie_rx_stop(trans); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 1bf4c37fe960..239bc177a3e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1604,6 +1604,11 @@ iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, } else { trans_pcie->trans->num_rx_queues = num_irqs - 1; } + + IWL_DEBUG_INFO(trans, + "MSI-X enabled with rx queues %d, vec mask 0x%x\n", + trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); + WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); trans_pcie->alloc_vecs = num_irqs; @@ -1973,12 +1978,16 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk) module_put(THIS_MODULE); } -static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) +/* + * This version doesn't disable BHs but rather assumes they're + * already disabled. + */ +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) { int ret; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - spin_lock_bh(&trans_pcie->reg_lock); + spin_lock(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) goto out; @@ -2063,7 +2072,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) } err: - spin_unlock_bh(&trans_pcie->reg_lock); + spin_unlock(&trans_pcie->reg_lock); return false; } @@ -2076,6 +2085,20 @@ out: return true; } +static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) +{ + bool ret; + + local_bh_disable(); + ret = __iwl_trans_pcie_grab_nic_access(trans); + if (ret) { + /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ + return ret; + } + local_bh_enable(); + return false; +} + static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 7ae32491b5da..4f6c187eed69 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2020 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -181,16 +181,20 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - lockdep_assert_held(&trans_pcie->reg_lock); - if (!trans->trans_cfg->base_params->apmg_wake_up_wa) return; - if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) + + spin_lock(&trans_pcie->reg_lock); + + if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) { + spin_unlock(&trans_pcie->reg_lock); return; + } trans_pcie->cmd_hold_nic_awake = false; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + spin_unlock(&trans_pcie->reg_lock); } /* @@ -198,7 +202,6 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) */ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[txq_id]; if (!txq) { @@ -222,12 +225,9 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_txq_free_tfd(trans, txq); txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); - if (txq->read_ptr == txq->write_ptr) { - spin_lock(&trans_pcie->reg_lock); - if (txq_id == trans->txqs.cmd.q_id) - iwl_pcie_clear_cmd_in_flight(trans); - spin_unlock(&trans_pcie->reg_lock); - } + if (txq->read_ptr == txq->write_ptr && + txq_id == trans->txqs.cmd.q_id) + iwl_pcie_clear_cmd_in_flight(trans); } while (!skb_queue_empty(&txq->overflow_q)) { @@ -629,38 +629,30 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, const struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret; - - lockdep_assert_held(&trans_pcie->reg_lock); /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; + if (!trans->trans_cfg->base_params->apmg_wake_up_wa) + return 0; + /* * wake up the NIC to make sure that the firmware will see the host * command - we will let the NIC sleep once all the host commands * returned. This needs to be done only on NICs that have - * apmg_wake_up_wa set. + * apmg_wake_up_wa set (see above.) */ - if (trans->trans_cfg->base_params->apmg_wake_up_wa && - !trans_pcie->cmd_hold_nic_awake) { - __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | - CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), - 15000); - if (ret < 0) { - __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); - return -EIO; - } - trans_pcie->cmd_hold_nic_awake = true; - } + if (!_iwl_trans_pcie_grab_nic_access(trans)) + return -EIO; + + /* + * In iwl_trans_grab_nic_access(), we've acquired the reg_lock. + * There, we also returned immediately if cmd_hold_nic_awake is + * already true, so it's OK to unconditionally set it to true. + */ + trans_pcie->cmd_hold_nic_awake = true; + spin_unlock(&trans_pcie->reg_lock); return 0; } @@ -674,7 +666,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, */ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[txq_id]; int nfreed = 0; u16 r; @@ -705,12 +696,8 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) } } - if (txq->read_ptr == txq->write_ptr) { - /* BHs are also disabled due to txq->lock */ - spin_lock(&trans_pcie->reg_lock); + if (txq->read_ptr == txq->write_ptr) iwl_pcie_clear_cmd_in_flight(trans); - spin_unlock(&trans_pcie->reg_lock); - } iwl_txq_progress(txq); } @@ -914,7 +901,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; @@ -1161,19 +1147,16 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); - spin_lock(&trans_pcie->reg_lock); ret = iwl_pcie_set_cmd_in_flight(trans, cmd); if (ret < 0) { idx = ret; - goto unlock_reg; + goto out; } /* Increment and update queue's write index */ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); iwl_pcie_txq_inc_wr_ptr(trans, txq); - unlock_reg: - spin_unlock(&trans_pcie->reg_lock); out: spin_unlock_irqrestore(&txq->lock, flags); free_dup_buf: @@ -1367,7 +1350,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, /* this is the data left for this subframe */ unsigned int data_left = min_t(unsigned int, mss, total_len); - struct sk_buff *csum_skb = NULL; unsigned int hdr_tb_len; dma_addr_t hdr_tb_phys; u8 *subf_hdrs_start = hdr_page->pos; @@ -1398,10 +1380,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, hdr_tb_len = hdr_page->pos - start_hdr; hdr_tb_phys = dma_map_single(trans->dev, start_hdr, hdr_tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { - dev_kfree_skb(csum_skb); + if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) return -EINVAL; - } iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, hdr_tb_len, false); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, @@ -1420,10 +1400,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, tb_phys = dma_map_single(trans->dev, tso.data, size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { - dev_kfree_skb(csum_skb); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; - } iwl_pcie_txq_build_tfd(trans, txq, tb_phys, size, false); diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c index 833f43d1ca7a..451b06069350 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2020 Intel Corporation + * Copyright (C) 2020-2021 Intel Corporation */ #include <net/tso.h> #include <linux/tcp.h> @@ -14,30 +14,6 @@ #include <linux/dmapool.h> /* - * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels - */ -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans) -{ - int txq_id; - - /* - * This function can be called before the op_mode disabled the - * queues. This happens when we have an rfkill interrupt. - * Since we stop Tx altogether - mark the queues as stopped. - */ - memset(trans->txqs.queue_stopped, 0, - sizeof(trans->txqs.queue_stopped)); - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); - - /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) { - if (!trans->txqs.txq[txq_id]) - continue; - iwl_txq_gen2_unmap(trans, txq_id); - } -} - -/* * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array */ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, @@ -399,7 +375,6 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, while (total_len) { /* this is the data left for this subframe */ unsigned int data_left = min_t(unsigned int, mss, total_len); - struct sk_buff *csum_skb = NULL; unsigned int tb_len; dma_addr_t tb_phys; u8 *subf_hdrs_start = hdr_page->pos; @@ -430,10 +405,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, tb_len = hdr_page->pos - start_hdr; tb_phys = dma_map_single(trans->dev, start_hdr, tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { - dev_kfree_skb(csum_skb); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; - } /* * No need for _with_wa, this is from the TSO page and * we leave some space at the end of it so can't hit @@ -458,10 +431,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, tso.data, tb_len, NULL); - if (ret) { - dev_kfree_skb(csum_skb); + if (ret) goto out_err; - } data_left -= tb_len; tso_build_data(skb, &tso, tb_len); @@ -1189,6 +1160,12 @@ static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, goto error_free_resp; } + if (WARN_ONCE(trans->txqs.txq[qid], + "queue %d already allocated\n", qid)) { + ret = -EIO; + goto error_free_resp; + } + txq->id = qid; trans->txqs.txq[qid] = txq; wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h index af1dbdf5617a..20efc62acf13 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2020 Intel Corporation + * Copyright (C) 2020-2021 Intel Corporation */ #ifndef __iwl_trans_queue_tx_h__ #define __iwl_trans_queue_tx_h__ @@ -123,7 +123,6 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, void iwl_txq_dyn_free(struct iwl_trans *trans, int queue); void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans); void iwl_txq_gen2_tx_free(struct iwl_trans *trans); int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue); diff --git a/drivers/net/wireless/marvell/libertas/decl.h b/drivers/net/wireless/marvell/libertas/decl.h index 5d1e30e0c5db..c1e0388ef01d 100644 --- a/drivers/net/wireless/marvell/libertas/decl.h +++ b/drivers/net/wireless/marvell/libertas/decl.h @@ -23,7 +23,6 @@ struct lbs_private; typedef void (*lbs_fw_cb)(struct lbs_private *priv, int ret, const struct firmware *helper, const struct firmware *mainfw); -struct lbs_private; struct sk_buff; struct net_device; struct cmd_ds_command; diff --git a/drivers/net/wireless/marvell/libertas/mesh.h b/drivers/net/wireless/marvell/libertas/mesh.h index d49717b20c09..44c4cd0230a8 100644 --- a/drivers/net/wireless/marvell/libertas/mesh.h +++ b/drivers/net/wireless/marvell/libertas/mesh.h @@ -60,13 +60,13 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev, #else -#define lbs_init_mesh(priv) -#define lbs_deinit_mesh(priv) -#define lbs_start_mesh(priv) -#define lbs_add_mesh(priv) -#define lbs_remove_mesh(priv) +#define lbs_init_mesh(priv) do { } while (0) +#define lbs_deinit_mesh(priv) do { } while (0) +#define lbs_start_mesh(priv) do { } while (0) +#define lbs_add_mesh(priv) do { } while (0) +#define lbs_remove_mesh(priv) do { } while (0) #define lbs_mesh_set_dev(priv, dev, rxpd) (dev) -#define lbs_mesh_set_txpd(priv, dev, txpd) +#define lbs_mesh_set_txpd(priv, dev, txpd) do { } while (0) #define lbs_mesh_set_channel(priv, channel) (0) #define lbs_mesh_activated(priv) (false) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index a2ed268ce0da..0961f4a5e415 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2300,8 +2300,7 @@ done: is_scanning_required = 1; } else { mwifiex_dbg(priv->adapter, MSG, - "info: trying to associate to '%.*s' bssid %pM\n", - req_ssid.ssid_len, (char *)req_ssid.ssid, + "info: trying to associate to bssid %pM\n", bss->bssid); memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); break; @@ -2378,8 +2377,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, } mwifiex_dbg(adapter, INFO, - "info: Trying to associate to %.*s and bssid %pM\n", - (int)sme->ssid_len, (char *)sme->ssid, sme->bssid); + "info: Trying to associate to bssid %pM\n", sme->bssid); if (!mwifiex_stop_bg_scan(priv)) cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0); @@ -2512,9 +2510,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, goto done; } - mwifiex_dbg(priv->adapter, MSG, - "info: trying to join to %.*s and bssid %pM\n", - params->ssid_len, (char *)params->ssid, params->bssid); + mwifiex_dbg(priv->adapter, MSG, "info: trying to join to bssid %pM\n", + params->bssid); mwifiex_set_ibss_params(priv, params); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index c2a685f63e95..0b877f3f6b97 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1211,7 +1211,6 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter, int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, struct mwifiex_bssdescriptor *bss_entry) { - int ret = 0; u8 element_id; struct ieee_types_fh_param_set *fh_param_set; struct ieee_types_ds_param_set *ds_param_set; @@ -1464,7 +1463,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, bytes_left -= total_ie_len; } /* while (bytes_left > 2) */ - return ret; + return 0; } /* diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index c9f8c056aa51..84b32a5f01ee 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -1473,6 +1473,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) if (txq->skb == NULL) { dma_free_coherent(&priv->pdev->dev, size, txq->txd, txq->txd_dma); + txq->txd = NULL; return -ENOMEM; } diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index df25c00d9e06..72622220051b 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -76,9 +76,9 @@ mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames) nframes--; status = (struct mt76_rx_status *)skb->cb; - if (!time_after(jiffies, - status->reorder_time + - mt76_aggr_tid_to_timeo(tid->num))) + if (!time_after32(jiffies, + status->reorder_time + + mt76_aggr_tid_to_timeo(tid->num))) continue; mt76_rx_aggr_release_frames(tid, frames, status->seqno); @@ -122,6 +122,7 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) struct ieee80211_bar *bar = mt76_skb_get_hdr(skb); struct mt76_wcid *wcid = status->wcid; struct mt76_rx_tid *tid; + u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; u16 seqno; if (!ieee80211_is_ctl(bar->frame_control)) @@ -130,9 +131,9 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) if (!ieee80211_is_back_req(bar->frame_control)) return; - status->tid = le16_to_cpu(bar->control) >> 12; + status->qos_ctl = tidno = le16_to_cpu(bar->control) >> 12; seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); - tid = rcu_dereference(wcid->aggr[status->tid]); + tid = rcu_dereference(wcid->aggr[tidno]); if (!tid) return; @@ -147,12 +148,12 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; - struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); struct mt76_wcid *wcid = status->wcid; struct ieee80211_sta *sta; struct mt76_rx_tid *tid; bool sn_less; u16 seqno, head, size, idx; + u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; u8 ackp; __skb_queue_tail(frames, skb); @@ -161,18 +162,18 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) if (!sta) return; - if (!status->aggr) { + if (!status->aggr && !(status->flag & RX_FLAG_8023)) { mt76_rx_aggr_check_ctl(skb, frames); return; } /* not part of a BA session */ - ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK; + ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK; if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) return; - tid = rcu_dereference(wcid->aggr[status->tid]); + tid = rcu_dereference(wcid->aggr[tidno]); if (!tid) return; diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 2f27c43ad76d..6ea58aecca41 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -79,13 +79,38 @@ mt76_free_pending_txwi(struct mt76_dev *dev) local_bh_enable(); } +static void +mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) +{ + writel(q->desc_dma, &q->regs->desc_base); + writel(q->ndesc, &q->regs->ring_size); + q->head = readl(&q->regs->dma_idx); + q->tail = q->head; +} + +static void +mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) +{ + int i; + + if (!q) + return; + + /* clear descriptors */ + for (i = 0; i < q->ndesc; i++) + q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); + + writel(0, &q->regs->cpu_idx); + writel(0, &q->regs->dma_idx); + mt76_dma_sync_idx(dev, q); +} + static int mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, int idx, int n_desc, int bufsize, u32 ring_base) { int size; - int i; spin_lock_init(&q->lock); spin_lock_init(&q->cleanup_lock); @@ -105,14 +130,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, if (!q->entry) return -ENOMEM; - /* clear descriptors */ - for (i = 0; i < q->ndesc; i++) - q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); - - writel(q->desc_dma, &q->regs->desc_base); - writel(0, &q->regs->cpu_idx); - writel(0, &q->regs->dma_idx); - writel(q->ndesc, &q->regs->ring_size); + mt76_dma_queue_reset(dev, q); return 0; } @@ -202,15 +220,6 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, } static void -mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) -{ - writel(q->desc_dma, &q->regs->desc_base); - writel(q->ndesc, &q->regs->ring_size); - q->head = readl(&q->regs->dma_idx); - q->tail = q->head; -} - -static void mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) { wmb(); @@ -309,7 +318,7 @@ static int mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, u32 tx_info) { - struct mt76_queue_buf buf; + struct mt76_queue_buf buf = {}; dma_addr_t addr; if (q->queued + 1 >= q->ndesc - 1) @@ -602,7 +611,6 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget) dev = container_of(napi->dev, struct mt76_dev, napi_dev); qid = napi - dev->napi; - local_bh_disable(); rcu_read_lock(); do { @@ -612,7 +620,6 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget) } while (cur && done < budget); rcu_read_unlock(); - local_bh_enable(); if (done < budget && napi_complete(napi)) dev->drv->rx_poll_complete(dev, qid); @@ -626,6 +633,10 @@ mt76_dma_init(struct mt76_dev *dev) int i; init_dummy_netdev(&dev->napi_dev); + init_dummy_netdev(&dev->tx_napi_dev); + snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s", + wiphy_name(dev->hw->wiphy)); + dev->napi_dev.threaded = 1; mt76_for_each_q_rx(dev, i) { netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, @@ -640,9 +651,11 @@ mt76_dma_init(struct mt76_dev *dev) static const struct mt76_queue_ops mt76_dma_ops = { .init = mt76_dma_init, .alloc = mt76_dma_alloc_queue, + .reset_q = mt76_dma_queue_reset, .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, .tx_queue_skb = mt76_dma_tx_queue_skb, .tx_cleanup = mt76_dma_tx_cleanup, + .rx_cleanup = mt76_dma_rx_cleanup, .rx_reset = mt76_dma_rx_reset, .kick = mt76_dma_kick_queue, }; diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 665b54c5c8ae..6d895738222a 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -91,16 +91,9 @@ void mt76_eeprom_override(struct mt76_phy *phy) { struct mt76_dev *dev = phy->dev; - -#ifdef CONFIG_OF struct device_node *np = dev->dev->of_node; - const u8 *mac = NULL; - if (np) - mac = of_get_mac_address(np); - if (!IS_ERR_OR_NULL(mac)) - ether_addr_copy(phy->macaddr, mac); -#endif + of_get_mac_address(np, phy->macaddr); if (!is_valid_ether_addr(phy->macaddr)) { eth_random_addr(phy->macaddr); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 696d00d1976c..29ef15ec22fe 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -508,6 +508,39 @@ void mt76_free_device(struct mt76_dev *dev) } EXPORT_SYMBOL_GPL(mt76_free_device); +static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q) +{ + struct sk_buff *skb = phy->rx_amsdu[q].head; + struct mt76_dev *dev = phy->dev; + + phy->rx_amsdu[q].head = NULL; + phy->rx_amsdu[q].tail = NULL; + __skb_queue_tail(&dev->rx_skb[q], skb); +} + +static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + + if (phy->rx_amsdu[q].head && + (!status->amsdu || status->first_amsdu || + status->seqno != phy->rx_amsdu[q].seqno)) + mt76_rx_release_amsdu(phy, q); + + if (!phy->rx_amsdu[q].head) { + phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list; + phy->rx_amsdu[q].seqno = status->seqno; + phy->rx_amsdu[q].head = skb; + } else { + *phy->rx_amsdu[q].tail = skb; + phy->rx_amsdu[q].tail = &skb->next; + } + + if (!status->amsdu || status->last_amsdu) + mt76_rx_release_amsdu(phy, q); +} + void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; @@ -525,7 +558,8 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) phy->test.rx_stats.fcs_error[q]++; } #endif - __skb_queue_tail(&dev->rx_skb[q], skb); + + mt76_rx_release_burst(phy, q, skb); } EXPORT_SYMBOL_GPL(mt76_rx); @@ -720,6 +754,8 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, status->signal = mstat.signal; status->chains = mstat.chains; status->ampdu_reference = mstat.ampdu_ref; + status->device_timestamp = mstat.timestamp; + status->mactime = mstat.timestamp; BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb)); BUILD_BUG_ON(sizeof(status->chain_signal) != @@ -737,6 +773,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb) struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_wcid *wcid = status->wcid; struct ieee80211_hdr *hdr; + u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; int ret; if (!(status->flag & RX_FLAG_DECRYPTED)) @@ -757,12 +794,12 @@ mt76_check_ccmp_pn(struct sk_buff *skb) } BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0])); - ret = memcmp(status->iv, wcid->rx_key_pn[status->tid], + ret = memcmp(status->iv, wcid->rx_key_pn[tidno], sizeof(status->iv)); if (ret <= 0) return -EINVAL; /* replay */ - memcpy(wcid->rx_key_pn[status->tid], status->iv, sizeof(status->iv)); + memcpy(wcid->rx_key_pn[tidno], status->iv, sizeof(status->iv)); if (status->flag & RX_FLAG_IV_STRIPPED) status->flag |= RX_FLAG_PN_VALIDATED; @@ -785,6 +822,7 @@ mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status, }; struct ieee80211_sta *sta; u32 airtime; + u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len); spin_lock(&dev->cc_lock); @@ -795,7 +833,7 @@ mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status, return; sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); - ieee80211_sta_register_airtime(sta, status->tid, 0, airtime); + ieee80211_sta_register_airtime(sta, tidno, 0, airtime); } static void @@ -823,7 +861,6 @@ mt76_airtime_flush_ampdu(struct mt76_dev *dev) static void mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) { - struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_wcid *wcid = status->wcid; @@ -831,6 +868,11 @@ mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) return; if (!wcid || !wcid->sta) { + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); + + if (status->flag & RX_FLAG_8023) + return; + if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr)) return; @@ -864,10 +906,12 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) struct ieee80211_sta *sta; struct ieee80211_hw *hw; struct mt76_wcid *wcid = status->wcid; + u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; bool ps; hw = mt76_phy_hw(dev, status->ext_phy); - if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) { + if (ieee80211_is_pspoll(hdr->frame_control) && !wcid && + !(status->flag & RX_FLAG_8023)) { sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL); if (sta) wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv; @@ -885,6 +929,9 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) wcid->inactive_count = 0; + if (status->flag & RX_FLAG_8023) + return; + if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags)) return; @@ -902,7 +949,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) if (ps && (ieee80211_is_data_qos(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control))) - ieee80211_sta_uapsd_trigger(sta, status->tid); + ieee80211_sta_uapsd_trigger(sta, tidno); if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps) return; @@ -926,13 +973,26 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, spin_lock(&dev->rx_lock); while ((skb = __skb_dequeue(frames)) != NULL) { + struct sk_buff *nskb = skb_shinfo(skb)->frag_list; + if (mt76_check_ccmp_pn(skb)) { dev_kfree_skb(skb); continue; } + skb_shinfo(skb)->frag_list = NULL; mt76_rx_convert(dev, skb, &hw, &sta); ieee80211_rx_list(hw, sta, skb, &list); + + /* subsequent amsdu frames */ + while (nskb) { + skb = nskb; + nskb = nskb->next; + skb->next = NULL; + + mt76_rx_convert(dev, skb, &hw, &sta); + ieee80211_rx_list(hw, sta, skb, &list); + } } spin_unlock(&dev->rx_lock); diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c index d3a5e2c4f12a..70624cd07449 100644 --- a/drivers/net/wireless/mediatek/mt76/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mcu.c @@ -99,6 +99,10 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, dev_kfree_skb(skb); } while (ret == -EAGAIN); + /* notify driver code to reset the mcu */ + if (ret == -ETIMEDOUT && dev->mcu_ops->mcu_reset) + dev->mcu_ops->mcu_reset(dev); + out: mutex_unlock(&dev->mcu.mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 8bf45497cfca..d121c176c37c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -166,6 +166,7 @@ struct mt76_mcu_ops { int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *rp, int len); int (*mcu_restart)(struct mt76_dev *dev); + void (*mcu_reset)(struct mt76_dev *dev); }; struct mt76_queue_ops { @@ -190,13 +191,18 @@ struct mt76_queue_ops { void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q, bool flush); + void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q); + void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); + + void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q); }; enum mt76_wcid_flags { MT_WCID_FLAG_CHECK_PS, MT_WCID_FLAG_PS, MT_WCID_FLAG_4ADDR, + MT_WCID_FLAG_HDR_TRANS, }; #define MT76_N_WCIDS 288 @@ -222,6 +228,7 @@ struct mt76_wcid { u16 idx; u8 hw_key_idx; + u8 hw_key_idx2; u8 sta:1; u8 ext_phy:1; @@ -491,15 +498,16 @@ struct mt76_rx_status { u16 wcid_idx; }; - unsigned long reorder_time; + u32 reorder_time; u32 ampdu_ref; + u32 timestamp; u8 iv[6]; u8 ext_phy:1; u8 aggr:1; - u8 tid; + u8 qos_ctl; u16 seqno; u16 freq; @@ -507,6 +515,7 @@ struct mt76_rx_status { u8 enc_flags; u8 encoding:2, bw:3, he_ru:3; u8 he_gi:2, he_dcm:1; + u8 amsdu:1, first_amsdu:1, last_amsdu:1; u8 rate_idx; u8 nss; u8 band; @@ -600,6 +609,12 @@ struct mt76_phy { struct delayed_work mac_work; u8 mac_work_count; + + struct { + struct sk_buff *head; + struct sk_buff **tail; + u16 seqno; + } rx_amsdu[__MT_RXQ_MAX]; }; struct mt76_dev { @@ -628,6 +643,7 @@ struct mt76_dev { struct mt76_mcu mcu; struct net_device napi_dev; + struct net_device tx_napi_dev; spinlock_t rx_lock; struct napi_struct napi[__MT_RXQ_MAX]; struct sk_buff_head rx_skb[__MT_RXQ_MAX]; @@ -783,8 +799,10 @@ static inline u16 mt76_rev(struct mt76_dev *dev) #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) -#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__) #define mt76_for_each_q_rx(dev, i) \ for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \ diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index 0086f18cb79a..2b6244116842 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -223,7 +223,7 @@ int mt7603_dma_init(struct mt7603_dev *dev) if (ret) return ret; - netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7603_poll_tx, NAPI_POLL_WEIGHT); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index f0b879c3eba8..e1b2cfa56074 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -548,6 +548,9 @@ int mt7603_register_device(struct mt7603_dev *dev) hw->max_report_rates = 7; hw->max_rate_tries = 11; + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; + hw->sta_data_size = sizeof(struct mt7603_sta); hw->vif_data_size = sizeof(struct mt7603_vif); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index cc4e7bc48294..e3a9dd6fbd87 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -532,20 +532,6 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; } - if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | - MT_RXD2_NORMAL_NON_AMPDU))) { - status->flag |= RX_FLAG_AMPDU_DETAILS; - - /* all subframes of an A-MPDU have the same timestamp */ - if (dev->rx_ampdu_ts != rxd[12]) { - if (!++dev->ampdu_ref) - dev->ampdu_ref++; - } - dev->rx_ampdu_ts = rxd[12]; - - status->ampdu_ref = dev->ampdu_ref; - } - remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) @@ -579,6 +565,23 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) return -EINVAL; } if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { + status->timestamp = le32_to_cpu(rxd[0]); + status->flag |= RX_FLAG_MACTIME_START; + + if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | + MT_RXD2_NORMAL_NON_AMPDU))) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (dev->rx_ampdu_ts != status->timestamp) { + if (!++dev->ampdu_ref) + dev->ampdu_ref++; + } + dev->rx_ampdu_ts = status->timestamp; + + status->ampdu_ref = dev->ampdu_ref; + } + rxd += 2; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -651,7 +654,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) status->aggr = unicast && !ieee80211_is_qos_nullfunc(hdr->frame_control); - status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + status->qos_ctl = *ieee80211_get_qos_ctl(hdr); status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h index b787c56fd8d6..1df5b9fed2bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h @@ -120,7 +120,7 @@ struct mt7603_dev { unsigned long last_cca_adj; u32 ampdu_ref; - __le32 rx_ampdu_ts; + u32 rx_ampdu_ts; u8 rssi_offset[3]; u8 slottime; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c index 06fa28f645f2..aa6cb668b012 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c @@ -7,7 +7,7 @@ #include "mt7603.h" static const struct pci_device_id mt76pci_device_table[] = { - { PCI_DEVICE(0x14c3, 0x7603) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7603) }, { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index 7ae48b4fa564..1b414220521a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -517,18 +517,23 @@ int mt7615_init_debugfs(struct mt7615_dev *dev) &fops_pm_idle_timeout); debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir, mt7615_radio_read); - debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern); - /* test pattern knobs */ - debugfs_create_u8("pattern_len", 0600, dir, - &dev->radar_pattern.n_pulses); - debugfs_create_u32("pulse_period", 0600, dir, - &dev->radar_pattern.period); - debugfs_create_u16("pulse_width", 0600, dir, - &dev->radar_pattern.width); - debugfs_create_u16("pulse_power", 0600, dir, - &dev->radar_pattern.power); - debugfs_create_file("radar_trigger", 0200, dir, dev, - &fops_radar_pattern); + + if (is_mt7615(&dev->mt76)) { + debugfs_create_u32("dfs_hw_pattern", 0400, dir, + &dev->hw_pattern); + /* test pattern knobs */ + debugfs_create_u8("pattern_len", 0600, dir, + &dev->radar_pattern.n_pulses); + debugfs_create_u32("pulse_period", 0600, dir, + &dev->radar_pattern.period); + debugfs_create_u16("pulse_width", 0600, dir, + &dev->radar_pattern.width); + debugfs_create_u16("pulse_power", 0600, dir, + &dev->radar_pattern.power); + debugfs_create_file("radar_trigger", 0200, dir, dev, + &fops_radar_pattern); + } + debugfs_create_file("reset_test", 0200, dir, dev, &fops_reset_test); debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index 25e3069cf2b1..0ec1c526f583 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -176,6 +176,21 @@ static void mt7663_dma_sched_init(struct mt7615_dev *dev) mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987); } +void mt7615_dma_start(struct mt7615_dev *dev) +{ + /* start dma engine */ + mt76_set(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_RX_DMA_EN | + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); + + if (is_mt7622(&dev->mt76)) + mt7622_dma_sched_init(dev); + + if (is_mt7663(&dev->mt76)) + mt7663_dma_sched_init(dev); +} + int mt7615_dma_init(struct mt7615_dev *dev) { int rx_ring_size = MT7615_RX_RING_SIZE; @@ -245,7 +260,7 @@ int mt7615_dma_init(struct mt7615_dev *dev) if (ret < 0) return ret; - netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7615_poll_tx, NAPI_POLL_WEIGHT); napi_enable(&dev->mt76.tx_napi); @@ -253,20 +268,11 @@ int mt7615_dma_init(struct mt7615_dev *dev) MT_WPDMA_GLO_CFG_TX_DMA_BUSY | MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000); - /* start dma engine */ - mt76_set(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_TX_DMA_EN | - MT_WPDMA_GLO_CFG_RX_DMA_EN); - /* enable interrupts for TX/RX rings */ mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev) | MT_INT_MCU_CMD); - if (is_mt7622(&dev->mt76)) - mt7622_dma_sched_init(dev); - - if (is_mt7663(&dev->mt76)) - mt7663_dma_sched_init(dev); + mt7615_dma_start(dev); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index 2eab23898c77..6dbaaf95ee38 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -86,6 +86,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev) switch (val) { case 0x7615: case 0x7622: + case 0x7663: return 0; default: return -EINVAL; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 571390fa4de7..1e418740c17b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -116,10 +116,10 @@ mt7615_mac_init(struct mt7615_dev *dev) mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_EN); mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_EN); - /* disable hdr translation and hw AMSDU */ mt76_wr(dev, MT_DMA_DCR0, FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072) | - MT_DMA_DCR0_RX_VEC_DROP); + MT_DMA_DCR0_RX_VEC_DROP | MT_DMA_DCR0_DAMSDU_EN | + MT_DMA_DCR0_RX_HDR_TRANS_EN); /* disable TDLS filtering */ mt76_clear(dev, MT_WF_PFCR, MT_WF_PFCR_TDLS_EN); mt76_set(dev, MT_WF_MIB_SCR0, MT_MIB_SCR0_AGG_CNT_RANGE_EN); @@ -129,6 +129,7 @@ mt7615_mac_init(struct mt7615_dev *dev) } else { mt7615_init_mac_chain(dev, 1); } + mt7615_mcu_set_rx_hdr_trans_blacklist(dev); } static void @@ -330,6 +331,10 @@ mt7615_init_wiphy(struct ieee80211_hw *hw) hw->max_rates = 3; hw->max_report_rates = 7; hw->max_rate_tries = 11; + hw->netdev_features = NETIF_F_RXCSUM; + + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; phy->slottime = 9; @@ -360,11 +365,17 @@ mt7615_init_wiphy(struct ieee80211_hw *hw) ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN); ieee80211_hw_set(hw, WANT_MONITOR_VIF); + ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); if (is_mt7615(&phy->dev->mt76)) hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM; else hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM; + + phy->mt76->sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + phy->mt76->sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + phy->mt76->sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; } static void @@ -496,16 +507,11 @@ void mt7615_init_device(struct mt7615_dev *dev) init_waitqueue_head(&dev->reset_wait); init_waitqueue_head(&dev->phy.roc_wait); - INIT_WORK(&dev->reset_work, mt7615_mac_reset_work); INIT_WORK(&dev->phy.roc_work, mt7615_roc_work); timer_setup(&dev->phy.roc_timer, mt7615_roc_timer, 0); mt7615_init_wiphy(hw); dev->pm.idle_timeout = MT7615_PM_TIMEOUT; - dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; - dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; - dev->mphy.sband_5g.sband.vht_cap.cap |= - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; mt7615_cap_dbdc_disable(dev); dev->phy.dfs_state = -1; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 59fdd0fc2ad4..005c2829d3df 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -234,11 +234,13 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) u32 rxd0 = le32_to_cpu(rxd[0]); u32 rxd1 = le32_to_cpu(rxd[1]); u32 rxd2 = le32_to_cpu(rxd[2]); - __le32 rxd12 = rxd[12]; - bool unicast, remove_pad, insert_ccmp_hdr = false; + u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; + bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false; int phy_idx; int i, idx; - u8 chfreq; + u8 chfreq, amsdu_info, qos_ctl = 0; + u16 seq_ctrl = 0; + __le16 fc = 0; memset(status, 0, sizeof(*status)); @@ -254,8 +256,12 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) else phy_idx = -1; + if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) + return -EINVAL; + unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M; idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); + hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS; status->wcid = mt7615_rx_get_wcid(dev, idx, unicast); if (status->wcid) { @@ -268,6 +274,9 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) spin_unlock_bh(&dev->sta_poll_lock); } + if ((rxd0 & csum_mask) == csum_mask) + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) status->flag |= RX_FLAG_FAILED_FCS_CRC; @@ -288,6 +297,13 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) rxd += 4; if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { + u32 v0 = le32_to_cpu(rxd[0]); + u32 v2 = le32_to_cpu(rxd[2]); + + fc = cpu_to_le16(FIELD_GET(MT_RXD4_FRAME_CONTROL, v0)); + qos_ctl = FIELD_GET(MT_RXD6_QOS_CTL, v2); + seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, v2); + rxd += 4; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -312,6 +328,23 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) } if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { + status->timestamp = le32_to_cpu(rxd[0]); + status->flag |= RX_FLAG_MACTIME_START; + + if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | + MT_RXD2_NORMAL_NON_AMPDU))) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (phy->rx_ampdu_ts != status->timestamp) { + if (!++phy->ampdu_ref) + phy->ampdu_ref++; + } + phy->rx_ampdu_ts = status->timestamp; + + status->ampdu_ref = phy->ampdu_ref; + } + rxd += 2; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -355,20 +388,6 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) if (!sband->channels) return -EINVAL; - if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | - MT_RXD2_NORMAL_NON_AMPDU))) { - status->flag |= RX_FLAG_AMPDU_DETAILS; - - /* all subframes of an A-MPDU have the same timestamp */ - if (phy->rx_ampdu_ts != rxd12) { - if (!++phy->ampdu_ref) - phy->ampdu_ref++; - } - phy->rx_ampdu_ts = rxd12; - - status->ampdu_ref = phy->ampdu_ref; - } - if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { u32 rxdg0 = le32_to_cpu(rxd[0]); u32 rxdg1 = le32_to_cpu(rxd[1]); @@ -446,20 +465,42 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); - if (insert_ccmp_hdr) { + amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1); + status->amsdu = !!amsdu_info; + if (status->amsdu) { + status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME; + status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME; + if (!hdr_trans) { + memmove(skb->data + 2, skb->data, + ieee80211_get_hdrlen_from_skb(skb)); + skb_pull(skb, 2); + } + } + + if (insert_ccmp_hdr && !hdr_trans) { u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); mt76_insert_ccmp_hdr(skb, key_id); } - hdr = (struct ieee80211_hdr *)skb->data; - if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) + if (!hdr_trans) { + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + if (ieee80211_is_data_qos(fc)) { + seq_ctrl = le16_to_cpu(hdr->seq_ctrl); + qos_ctl = *ieee80211_get_qos_ctl(hdr); + } + } else { + status->flag |= RX_FLAG_8023; + } + + if (!status->wcid || !ieee80211_is_data_qos(fc)) return 0; status->aggr = unicast && - !ieee80211_is_qos_nullfunc(hdr->frame_control); - status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; - status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + !ieee80211_is_qos_nullfunc(fc); + status->qos_ctl = qos_ctl; + status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); return 0; } @@ -690,7 +731,7 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp) { int i; - for (i = 1; i < txp->nbuf; i++) + for (i = 0; i < txp->nbuf; i++) dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); } @@ -966,6 +1007,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, struct mt7615_dev *dev = phy->dev; struct mt7615_rate_desc rd; u32 w5, w27, addr; + u16 idx = sta->vif->mt76.omac_idx; if (!mt76_is_mmio(&dev->mt76)) { mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates); @@ -1017,7 +1059,10 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, mt76_wr(dev, addr + 27 * 4, w27); - mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); + + mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */ sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0); sta->rate_set_tsf |= rd.rateset; @@ -1033,7 +1078,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); static int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, struct ieee80211_key_conf *key, - enum mt7615_cipher_type cipher, + enum mt7615_cipher_type cipher, u16 cipher_mask, enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4; @@ -1050,22 +1095,22 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, memcpy(data + 16, key->key + 24, 8); memcpy(data + 24, key->key + 16, 8); } else { - if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) - memmove(data + 16, data, 16); - if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) + if (cipher_mask == BIT(cipher)) memcpy(data, key->key, key->keylen); - else if (cipher == MT_CIPHER_BIP_CMAC_128) + else if (cipher != MT_CIPHER_BIP_CMAC_128) + memcpy(data, key->key, 16); + if (cipher == MT_CIPHER_BIP_CMAC_128) memcpy(data + 16, key->key, 16); } } else { - if (wcid->cipher & ~BIT(cipher)) { - if (cipher != MT_CIPHER_BIP_CMAC_128) - memmove(data, data + 16, 16); + if (cipher == MT_CIPHER_BIP_CMAC_128) memset(data + 16, 0, 16); - } else { + else if (cipher_mask) + memset(data, 0, 16); + if (!cipher_mask) memset(data, 0, sizeof(data)); - } } + mt76_wr_copy(dev, addr, data, sizeof(data)); return 0; @@ -1073,7 +1118,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, static int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, - enum mt7615_cipher_type cipher, + enum mt7615_cipher_type cipher, u16 cipher_mask, int keyidx, enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1; @@ -1083,20 +1128,23 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, w0 = mt76_rr(dev, addr); w1 = mt76_rr(dev, addr + 4); - if (cmd == SET_KEY) { - w0 |= MT_WTBL_W0_RX_KEY_VALID | - FIELD_PREP(MT_WTBL_W0_RX_IK_VALID, - cipher == MT_CIPHER_BIP_CMAC_128); - if (cipher != MT_CIPHER_BIP_CMAC_128 || - !wcid->cipher) - w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); - } else { - if (!(wcid->cipher & ~BIT(cipher))) - w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | - MT_WTBL_W0_KEY_IDX); - if (cipher == MT_CIPHER_BIP_CMAC_128) - w0 &= ~MT_WTBL_W0_RX_IK_VALID; + + if (cipher_mask) + w0 |= MT_WTBL_W0_RX_KEY_VALID; + else + w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX); + if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128)) + w0 |= MT_WTBL_W0_RX_IK_VALID; + else + w0 &= ~MT_WTBL_W0_RX_IK_VALID; + + if (cmd == SET_KEY && + (cipher != MT_CIPHER_BIP_CMAC_128 || + cipher_mask == BIT(cipher))) { + w0 &= ~MT_WTBL_W0_KEY_IDX; + w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); } + mt76_wr(dev, MT_WTBL_RICR0, w0); mt76_wr(dev, MT_WTBL_RICR1, w1); @@ -1109,24 +1157,25 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, static void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid, - enum mt7615_cipher_type cipher, + enum mt7615_cipher_type cipher, u16 cipher_mask, enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx); - if (cmd == SET_KEY) { - if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) - mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, - FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); - } else { - if (cipher != MT_CIPHER_BIP_CMAC_128 && - wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128)) - mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, - FIELD_PREP(MT_WTBL_W2_KEY_TYPE, - MT_CIPHER_BIP_CMAC_128)); - else if (!(wcid->cipher & ~BIT(cipher))) - mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); + if (!cipher_mask) { + mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); + return; } + + if (cmd != SET_KEY) + return; + + if (cipher == MT_CIPHER_BIP_CMAC_128 && + cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128)) + return; + + mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, + FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); } int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, @@ -1135,25 +1184,30 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, enum set_key_cmd cmd) { enum mt7615_cipher_type cipher; + u16 cipher_mask = wcid->cipher; int err; cipher = mt7615_mac_get_cipher(key->cipher); if (cipher == MT_CIPHER_NONE) return -EOPNOTSUPP; - mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd); - err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd); + if (cmd == SET_KEY) + cipher_mask |= BIT(cipher); + else + cipher_mask &= ~BIT(cipher); + + mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd); + err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask, + cmd); if (err < 0) return err; - err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd); + err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask, + key->keyidx, cmd); if (err < 0) return err; - if (cmd == SET_KEY) - wcid->cipher |= BIT(cipher); - else - wcid->cipher &= ~BIT(cipher); + wcid->cipher = cipher_mask; return 0; } @@ -1821,10 +1875,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy) int i, aggr; u32 val, val2; - memset(mib, 0, sizeof(*mib)); - - mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy), - MT_MIB_SDR3_FCS_ERR_MASK); + mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy), + MT_MIB_SDR3_FCS_ERR_MASK); val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy), MT_MIB_AMPDU_MPDU_COUNT); @@ -1837,24 +1889,16 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy) aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; for (i = 0; i < 4; i++) { val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); - - val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); - if (val2 > mib->ack_fail_cnt) - mib->ack_fail_cnt = val2; - - val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); - if (val2 > mib->ba_miss_cnt) - mib->ba_miss_cnt = val2; + mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); + mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, + val); val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); - val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); - if (val2 > mib->rts_retries_cnt) { - mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); - mib->rts_retries_cnt = val2; - } + mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); + mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, + val); val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); - dev->mt76.aggr_stats[aggr++] += val & 0xffff; dev->mt76.aggr_stats[aggr++] += val >> 16; } @@ -1924,74 +1968,6 @@ void mt7615_mac_work(struct work_struct *work) MT7615_WATCHDOG_TIME); } -static bool -mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state) -{ - bool ret; - - ret = wait_event_timeout(dev->reset_wait, - (READ_ONCE(dev->reset_state) & state), - MT7615_RESET_TIMEOUT); - WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); - return ret; -} - -static void -mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) -{ - struct ieee80211_hw *hw = priv; - struct mt7615_dev *dev = mt7615_hw_dev(hw); - - switch (vif->type) { - case NL80211_IFTYPE_MESH_POINT: - case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_AP: - mt7615_mcu_add_beacon(dev, hw, vif, - vif->bss_conf.enable_beacon); - break; - default: - break; - } -} - -static void -mt7615_update_beacons(struct mt7615_dev *dev) -{ - ieee80211_iterate_active_interfaces(dev->mt76.hw, - IEEE80211_IFACE_ITER_RESUME_ALL, - mt7615_update_vif_beacon, dev->mt76.hw); - - if (!dev->mt76.phy2) - return; - - ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw, - IEEE80211_IFACE_ITER_RESUME_ALL, - mt7615_update_vif_beacon, dev->mt76.phy2->hw); -} - -void mt7615_dma_reset(struct mt7615_dev *dev) -{ - int i; - - mt76_clear(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | - MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); - usleep_range(1000, 2000); - - mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); - for (i = 0; i < __MT_TXQ_MAX; i++) - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); - - mt76_for_each_q_rx(&dev->mt76, i) { - mt76_queue_rx_reset(dev, i); - } - - mt76_set(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | - MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); -} -EXPORT_SYMBOL_GPL(mt7615_dma_reset); - void mt7615_tx_token_put(struct mt7615_dev *dev) { struct mt76_txwi_cache *txwi; @@ -2009,94 +1985,6 @@ void mt7615_tx_token_put(struct mt7615_dev *dev) } EXPORT_SYMBOL_GPL(mt7615_tx_token_put); -void mt7615_mac_reset_work(struct work_struct *work) -{ - struct mt7615_phy *phy2; - struct mt76_phy *ext_phy; - struct mt7615_dev *dev; - - dev = container_of(work, struct mt7615_dev, reset_work); - ext_phy = dev->mt76.phy2; - phy2 = ext_phy ? ext_phy->priv : NULL; - - if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA)) - return; - - ieee80211_stop_queues(mt76_hw(dev)); - if (ext_phy) - ieee80211_stop_queues(ext_phy->hw); - - set_bit(MT76_RESET, &dev->mphy.state); - set_bit(MT76_MCU_RESET, &dev->mphy.state); - wake_up(&dev->mt76.mcu.wait); - cancel_delayed_work_sync(&dev->mphy.mac_work); - del_timer_sync(&dev->phy.roc_timer); - cancel_work_sync(&dev->phy.roc_work); - if (phy2) { - cancel_delayed_work_sync(&phy2->mt76->mac_work); - del_timer_sync(&phy2->roc_timer); - cancel_work_sync(&phy2->roc_work); - } - - /* lock/unlock all queues to ensure that no tx is pending */ - mt76_txq_schedule_all(&dev->mphy); - if (ext_phy) - mt76_txq_schedule_all(ext_phy); - - mt76_worker_disable(&dev->mt76.tx_worker); - napi_disable(&dev->mt76.napi[0]); - napi_disable(&dev->mt76.napi[1]); - napi_disable(&dev->mt76.tx_napi); - - mt7615_mutex_acquire(dev); - - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED); - - mt7615_tx_token_put(dev); - idr_init(&dev->token); - - if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { - mt7615_dma_reset(dev); - - mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0); - - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_INIT); - mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); - } - - clear_bit(MT76_MCU_RESET, &dev->mphy.state); - clear_bit(MT76_RESET, &dev->mphy.state); - - mt76_worker_enable(&dev->mt76.tx_worker); - napi_enable(&dev->mt76.tx_napi); - napi_schedule(&dev->mt76.tx_napi); - - napi_enable(&dev->mt76.napi[0]); - napi_schedule(&dev->mt76.napi[0]); - - napi_enable(&dev->mt76.napi[1]); - napi_schedule(&dev->mt76.napi[1]); - - ieee80211_wake_queues(mt76_hw(dev)); - if (ext_phy) - ieee80211_wake_queues(ext_phy->hw); - - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); - mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); - - mt7615_update_beacons(dev); - - mt7615_mutex_release(dev); - - ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, - MT7615_WATCHDOG_TIME); - if (phy2) - ieee80211_queue_delayed_work(ext_phy->hw, - &phy2->mt76->mac_work, - MT7615_WATCHDOG_TIME); - -} - static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy) { struct mt7615_dev *dev = phy->dev; @@ -2304,8 +2192,10 @@ void mt7615_coredump_work(struct work_struct *work) break; skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); - if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) - break; + if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) { + dev_kfree_skb(skb); + continue; + } memcpy(data, skb->data, skb->len); data += skb->len; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h index 169f4e17b5b4..6bf9da040196 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h @@ -33,6 +33,9 @@ enum rx_pkt_type { #define MT_RXD1_NORMAL_BSSID GENMASK(31, 26) #define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24) +#define MT_RXD1_FIRST_AMSDU_FRAME GENMASK(1, 0) +#define MT_RXD1_MID_AMSDU_FRAME BIT(1) +#define MT_RXD1_LAST_AMSDU_FRAME BIT(0) #define MT_RXD1_NORMAL_HDR_TRANS BIT(23) #define MT_RXD1_NORMAL_HDR_OFFSET BIT(22) #define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16) @@ -78,6 +81,11 @@ enum rx_pkt_type { #define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8) #define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0) +#define MT_RXD4_FRAME_CONTROL GENMASK(15, 0) + +#define MT_RXD6_SEQ_CTRL GENMASK(15, 0) +#define MT_RXD6_QOS_CTL GENMASK(31, 16) + #define MT_RXV1_ACID_DET_H BIT(31) #define MT_RXV1_ACID_DET_L BIT(30) #define MT_RXV1_VHTA2_B8_B3 GENMASK(29, 24) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 25faf486d279..e30b256784e0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -29,6 +29,7 @@ static int mt7615_start(struct ieee80211_hw *hw) struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt7615_phy *phy = mt7615_hw_phy(hw); bool running; + int ret; if (!mt7615_wait_for_mcu_init(dev)) return -EIO; @@ -38,21 +39,38 @@ static int mt7615_start(struct ieee80211_hw *hw) running = mt7615_dev_running(dev); if (!running) { - mt7615_mcu_set_pm(dev, 0, 0); - mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false); + ret = mt7615_mcu_set_pm(dev, 0, 0); + if (ret) + goto out; + + ret = mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false); + if (ret) + goto out; + mt7615_mac_enable_nf(dev, 0); } if (phy != &dev->phy) { - mt7615_mcu_set_pm(dev, 1, 0); - mt76_connac_mcu_set_mac_enable(&dev->mt76, 1, true, false); + ret = mt7615_mcu_set_pm(dev, 1, 0); + if (ret) + goto out; + + ret = mt76_connac_mcu_set_mac_enable(&dev->mt76, 1, true, false); + if (ret) + goto out; + mt7615_mac_enable_nf(dev, 1); } - if (mt7615_firmware_offload(dev)) - mt76_connac_mcu_set_channel_domain(phy->mt76); + if (mt7615_firmware_offload(dev)) { + ret = mt76_connac_mcu_set_channel_domain(phy->mt76); + if (ret) + goto out; + } - mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); + ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); + if (ret) + goto out; set_bit(MT76_STATE_RUNNING, &phy->mt76->state); @@ -62,9 +80,10 @@ static int mt7615_start(struct ieee80211_hw *hw) if (!running) mt7615_mac_reset_counters(dev); +out: mt7615_mutex_release(dev); - return 0; + return ret; } static void mt7615_stop(struct ieee80211_hw *hw) @@ -197,7 +216,9 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, dev->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); - mt7615_mcu_set_dbdc(dev); + ret = mt7615_mcu_set_dbdc(dev); + if (ret) + goto out; idx = MT7615_WTBL_RESERVED - mvif->mt76.idx; @@ -217,8 +238,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, ret = mt7615_mcu_add_dev_info(phy, vif, true); if (ret) goto out; - - mt7615_mac_set_beacon_filter(phy, vif, true); out: mt7615_mutex_release(dev); @@ -234,17 +253,17 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, struct mt7615_phy *phy = mt7615_hw_phy(hw); int idx = msta->wcid.idx; - /* TODO: disable beacon for the bss */ - mt7615_mutex_acquire(dev); + mt7615_mcu_add_bss_info(phy, vif, NULL, false); + mt7615_mcu_sta_add(phy, vif, NULL, false); + mt76_testmode_reset(phy->mt76, true); if (vif == phy->monitor_vif) phy->monitor_vif = NULL; mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); - mt7615_mac_set_beacon_filter(phy, vif, false); mt7615_mcu_add_dev_info(phy, vif, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); @@ -296,8 +315,13 @@ int mt7615_set_channel(struct mt7615_phy *phy) mt76_set_channel(phy->mt76); if (is_mt7615(&dev->mt76) && dev->flash_eeprom) { - mt7615_mcu_apply_rx_dcoc(phy); - mt7615_mcu_apply_tx_dpd(phy); + ret = mt7615_mcu_apply_rx_dcoc(phy); + if (ret) + goto out; + + ret = mt7615_mcu_apply_tx_dpd(phy); + if (ret) + goto out; } ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH); @@ -306,8 +330,13 @@ int mt7615_set_channel(struct mt7615_phy *phy) mt7615_mac_set_timing(phy); ret = mt7615_dfs_init_radar_detector(phy); + if (ret) + goto out; + mt7615_mac_cca_stats_reset(phy); - mt7615_mcu_set_sku_en(phy, true); + ret = mt7615_mcu_set_sku_en(phy, true); + if (ret) + goto out; mt7615_mac_reset_counters(dev); phy->noise = 0; @@ -337,7 +366,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta; struct mt76_wcid *wcid = &msta->wcid; - int idx = key->keyidx, err; + int idx = key->keyidx, err = 0; + u8 *wcid_keyidx = &wcid->hw_key_idx; /* The hardware does not support per-STA RX GTK, fallback * to software mode for these. @@ -352,6 +382,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, /* fall back to sw encryption for unsupported ciphers */ switch (key->cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: + wcid_keyidx = &wcid->hw_key_idx2; key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; break; case WLAN_CIPHER_SUITE_TKIP: @@ -369,12 +400,13 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, mt7615_mutex_acquire(dev); - if (cmd == SET_KEY) { - key->hw_key_idx = wcid->idx; - wcid->hw_key_idx = idx; - } else if (idx == wcid->hw_key_idx) { - wcid->hw_key_idx = -1; - } + if (cmd == SET_KEY) + *wcid_keyidx = idx; + else if (idx == *wcid_keyidx) + *wcid_keyidx = -1; + else + goto out; + mt76_wcid_key_setup(&dev->mt76, wcid, cmd == SET_KEY ? key : NULL); @@ -383,6 +415,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, else err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); +out: mt7615_mutex_release(dev); return err; @@ -526,11 +559,11 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_BEACON_ENABLED) { - mt7615_mcu_add_bss_info(phy, vif, NULL, info->enable_beacon); - mt7615_mcu_sta_add(phy, vif, NULL, info->enable_beacon); + if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) { + mt7615_mcu_add_bss_info(phy, vif, NULL, true); + mt7615_mcu_sta_add(phy, vif, NULL, true); - if (vif->p2p && info->enable_beacon) + if (vif->p2p) mt7615_mcu_set_p2p_oppps(hw, vif); } @@ -544,6 +577,9 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ARP_FILTER) mt7615_mcu_update_arp_filter(hw, vif, info); + if (changed & BSS_CHANGED_ASSOC) + mt7615_mac_set_beacon_filter(phy, vif, info->assoc); + mt7615_mutex_release(dev); } @@ -583,15 +619,21 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (err) return err; - if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) - mt7615_mcu_add_bss_info(phy, vif, sta, true); + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { + err = mt7615_mcu_add_bss_info(phy, vif, sta, true); + if (err) + return err; + } + mt7615_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - mt7615_mcu_sta_add(&dev->phy, vif, sta, true); + err = mt7615_mcu_sta_add(&dev->phy, vif, sta, true); + if (err) + return err; mt76_connac_power_save_sched(phy->mt76, &dev->pm); - return 0; + return err; } EXPORT_SYMBOL_GPL(mt7615_mac_sta_add); @@ -711,13 +753,13 @@ static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val) { struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt7615_phy *phy = mt7615_hw_phy(hw); - int band = phy != &dev->phy; + int err, band = phy != &dev->phy; mt7615_mutex_acquire(dev); - mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, band); + err = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, band); mt7615_mutex_release(dev); - return 0; + return err; } static int @@ -745,16 +787,16 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, params->buf_size); - mt7615_mcu_add_rx_ba(dev, params, true); + ret = mt7615_mcu_add_rx_ba(dev, params, true); break; case IEEE80211_AMPDU_RX_STOP: mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); - mt7615_mcu_add_rx_ba(dev, params, false); + ret = mt7615_mcu_add_rx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_OPERATIONAL: mtxq->aggr = true; mtxq->send_bar = false; - mt7615_mcu_add_tx_ba(dev, params, true); + ret = mt7615_mcu_add_tx_ba(dev, params, true); ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid); ieee80211_send_bar(vif, sta->addr, tid, IEEE80211_SN_TO_SEQ(ssn)); @@ -762,7 +804,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - mt7615_mcu_add_tx_ba(dev, params, false); + ret = mt7615_mcu_add_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid); @@ -771,7 +813,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, break; case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; - mt7615_mcu_add_tx_ba(dev, params, false); + ret = mt7615_mcu_add_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } @@ -803,26 +845,38 @@ mt7615_get_stats(struct ieee80211_hw *hw, struct mt7615_phy *phy = mt7615_hw_phy(hw); struct mib_stats *mib = &phy->mib; + mt7615_mutex_acquire(phy->dev); + stats->dot11RTSSuccessCount = mib->rts_cnt; stats->dot11RTSFailureCount = mib->rts_retries_cnt; stats->dot11FCSErrorCount = mib->fcs_err_cnt; stats->dot11ACKFailureCount = mib->ack_fail_cnt; + memset(mib, 0, sizeof(*mib)); + + mt7615_mutex_release(phy->dev); + return 0; } static u64 mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct mt7615_dev *dev = mt7615_hw_dev(hw); union { u64 t64; u32 t32[2]; } tsf; + u16 idx = mvif->mt76.omac_idx; + u32 reg; + + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); mt7615_mutex_acquire(dev); - mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ + mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0); tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1); @@ -835,18 +889,24 @@ static void mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 timestamp) { + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct mt7615_dev *dev = mt7615_hw_dev(hw); union { u64 t64; u32 t32[2]; } tsf = { .t64 = timestamp, }; + u16 idx = mvif->mt76.omac_idx; + u32 reg; + + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); mt7615_mutex_acquire(dev); mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]); mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]); /* TSF software overwrite */ - mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE); + mt76_set(dev, reg, MT_LPON_TCR_WRITE); mt7615_mutex_release(dev); } @@ -1069,6 +1129,7 @@ static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt7615_phy *phy = mt7615_hw_phy(hw); + int err; if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) return 0; @@ -1077,10 +1138,26 @@ static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw, cancel_work_sync(&phy->roc_work); mt7615_mutex_acquire(phy->dev); - mt7615_mcu_set_roc(phy, vif, NULL, 0); + err = mt7615_mcu_set_roc(phy, vif, NULL, 0); mt7615_mutex_release(phy->dev); - return 0; + return err; +} + +static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool enabled) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + + if (enabled) + set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + else + clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + + mt7615_mcu_sta_update_hdr_trans(dev, vif, sta); } #ifdef CONFIG_PM @@ -1183,6 +1260,7 @@ const struct ieee80211_ops mt7615_ops = { .sta_remove = mt7615_sta_remove, .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, .set_key = mt7615_set_key, + .sta_set_decap_offload = mt7615_sta_set_decap_offload, .ampdu_action = mt7615_ampdu_action, .set_rts_threshold = mt7615_set_rts_threshold, .wake_tx_queue = mt7615_wake_tx_queue, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 631596fc2f36..9b9f8d88e9bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -274,7 +274,7 @@ int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val) sizeof(req), false); } -static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) +void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) { if (!is_mt7622(&dev->mt76)) return; @@ -283,6 +283,7 @@ static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) MT_INFRACFG_MISC_AP2CONN_WAKE, !en * MT_INFRACFG_MISC_AP2CONN_WAKE); } +EXPORT_SYMBOL_GPL(mt7622_trigger_hif_int); static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev) { @@ -373,6 +374,23 @@ mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) } static void +mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_phy *ext_phy = mt7615_ext_phy(dev); + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7615_mcu_csa_notify *c; + + c = (struct mt7615_mcu_csa_notify *)skb->data; + + if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx)) + mphy = dev->mt76.phy2; + + ieee80211_iterate_active_interfaces_atomic(mphy->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_mcu_csa_finish, mphy->hw); +} + +static void mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb) { struct mt76_phy *mphy = &dev->mt76.phy; @@ -380,7 +398,7 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb) r = (struct mt7615_mcu_rdd_report *)skb->data; - if (r->idx && dev->mt76.phy2) + if (r->band_idx && dev->mt76.phy2) mphy = dev->mt76.phy2; ieee80211_radar_detected(mphy->hw); @@ -406,7 +424,8 @@ mt7615_mcu_rx_log_message(struct mt7615_dev *dev, struct sk_buff *skb) break; } - wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data); + wiphy_info(mt76_hw(dev)->wiphy, "%s: %*s", type, + (int)(skb->len - sizeof(*rxd)), data); } static void @@ -419,9 +438,7 @@ mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb) mt7615_mcu_rx_radar_detected(dev, skb); break; case MCU_EXT_EVENT_CSA_NOTIFY: - ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw, - IEEE80211_IFACE_ITER_RESUME_ALL, - mt7615_mcu_csa_finish, dev); + mt7615_mcu_rx_csa_notify(dev, skb); break; case MCU_EXT_EVENT_FW_LOG_2_HOST: mt7615_mcu_rx_log_message(dev, skb); @@ -685,6 +702,9 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev, }; struct sk_buff *skb; + if (!enable) + goto out; + skb = ieee80211_beacon_get_template(hw, vif, &offs); if (!skb) return -EINVAL; @@ -714,6 +734,7 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev, } dev_kfree_skb(skb); +out: return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD, &req, sizeof(req), true); } @@ -973,7 +994,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable); if (enable && sta) - mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif); + mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0); wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, WTBL_RESET_AND_SET, NULL, @@ -987,6 +1008,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, if (sta) mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta, NULL, wtbl_hdr); + mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, &msta->wcid, NULL, + wtbl_hdr); } cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE; @@ -1040,6 +1063,9 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev, wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, WTBL_SET, sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, tx, sta_wtbl, wtbl_hdr); @@ -1068,10 +1094,15 @@ __mt7615_mcu_add_sta(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable, int cmd) { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; - struct mt76_wcid *wcid; + struct mt76_sta_cmd_info info = { + .sta = sta, + .vif = vif, + .enable = enable, + .cmd = cmd, + }; - wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid; - return mt76_connac_mcu_add_sta_cmd(phy, vif, sta, wcid, enable, cmd); + info.wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid; + return mt76_connac_mcu_add_sta_cmd(phy, &info); } static int @@ -1094,6 +1125,25 @@ static const struct mt7615_mcu_ops sta_update_ops = { .set_fw_ctrl = mt7615_mcu_fw_pmctrl, }; +int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + struct wtbl_req_hdr *wtbl_hdr; + struct sk_buff *skb = NULL; + + wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, + WTBL_SET, NULL, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, &msta->wcid, NULL, wtbl_hdr); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE, + true); +} + static int mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state) { @@ -1120,8 +1170,8 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev, __le16 tim_ie_pos; __le16 csa_ie_pos; __le16 bcc_ie_pos; - /* 0: enable beacon offload - * 1: disable beacon offload + /* 0: disable beacon offload + * 1: enable beacon offload * 2: update probe respond offload */ u8 enable; @@ -1144,6 +1194,9 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev, }; struct sk_buff *skb; + if (!enable) + goto out; + skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs); if (!skb) return -EINVAL; @@ -1168,6 +1221,7 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev, } dev_kfree_skb(skb); +out: return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, &req, sizeof(req), true); } @@ -1427,8 +1481,7 @@ static int mt7615_load_n9(struct mt7615_dev *dev, const char *name) sizeof(dev->mt76.hw->wiphy->fw_version), "%.10s-%.15s", hdr->fw_ver, hdr->build_date); - if (!is_mt7615(&dev->mt76) && - !strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) { + if (!is_mt7615(&dev->mt76)) { dev->fw_ver = MT7615_FIRMWARE_V2; dev->mcu_ops = &sta_update_ops; } else { @@ -2155,7 +2208,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd) .center_chan2 = ieee80211_frequency_to_channel(freq2), }; - if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) @@ -2497,6 +2550,26 @@ out: return ret; } +int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev) +{ + struct { + u8 operation; + u8 count; + u8 _rsv[2]; + u8 index; + u8 enable; + __le16 etype; + } req = { + .operation = 1, + .count = 1, + .enable = 1, + .etype = cpu_to_le16(ETH_P_PAE), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RX_HDR_TRANS, + &req, sizeof(req), false); +} + int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, bool enable) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h index 3874f45da9eb..98c383e400a1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -176,10 +176,18 @@ struct mt7615_mcu_rxd { u8 s2d_index; }; +struct mt7615_mcu_csa_notify { + struct mt7615_mcu_rxd rxd; + + u8 omac_idx; + u8 csa_count; + u8 rsv[2]; +} __packed; + struct mt7615_mcu_rdd_report { struct mt7615_mcu_rxd rxd; - u8 idx; + u8 band_idx; u8 long_detected; u8 constant_prf_detected; u8 staggered_prf_detected; @@ -362,30 +370,6 @@ enum { BSS_INFO_MAX_NUM }; -#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_generic) + \ - sizeof(struct wtbl_rx) + \ - sizeof(struct wtbl_ht) + \ - sizeof(struct wtbl_vht) + \ - sizeof(struct wtbl_tx_ps) + \ - sizeof(struct wtbl_hdr_trans) +\ - sizeof(struct wtbl_ba) + \ - sizeof(struct wtbl_bf) + \ - sizeof(struct wtbl_smps) + \ - sizeof(struct wtbl_pn) + \ - sizeof(struct wtbl_spe)) - -#define MT7615_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ - sizeof(struct sta_rec_basic) + \ - sizeof(struct sta_rec_ht) + \ - sizeof(struct sta_rec_vht) + \ - sizeof(struct sta_rec_uapsd) + \ - sizeof(struct tlv) + \ - MT7615_WTBL_UPDATE_MAX_SIZE) - -#define MT7615_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_ba)) - enum { CH_SWITCH_NORMAL = 0, CH_SWITCH_SCAN = 3, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index a7f92fa0488f..eaa22752e7cd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -1,3 +1,6 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 491841bc6291..6a50338ec9f5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -133,11 +133,11 @@ struct mt7615_vif { }; struct mib_stats { - u16 ack_fail_cnt; - u16 fcs_err_cnt; - u16 rts_cnt; - u16 rts_retries_cnt; - u16 ba_miss_cnt; + u32 ack_fail_cnt; + u32 fcs_err_cnt; + u32 rts_cnt; + u32 rts_retries_cnt; + u32 ba_miss_cnt; unsigned long aggr_per; }; @@ -168,7 +168,7 @@ struct mt7615_phy { u8 rdd_state; int dfs_state; - __le32 rx_ampdu_ts; + u32 rx_ampdu_ts; u32 ampdu_ref; struct mib_stats mib; @@ -376,6 +376,7 @@ int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev, enum nl80211_band band); int mt7615_wait_pdma_busy(struct mt7615_dev *dev); int mt7615_dma_init(struct mt7615_dev *dev); +void mt7615_dma_start(struct mt7615_dev *dev); void mt7615_dma_cleanup(struct mt7615_dev *dev); int mt7615_mcu_init(struct mt7615_dev *dev); bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev); @@ -408,11 +409,6 @@ static inline bool is_mt7615(struct mt76_dev *dev) return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611; } -static inline bool is_mt7663(struct mt76_dev *dev) -{ - return mt76_chip(dev) == 0x7663; -} - static inline bool is_mt7611(struct mt76_dev *dev) { return mt76_chip(dev) == 0x7611; @@ -524,6 +520,10 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, void mt7615_mac_work(struct work_struct *work); void mt7615_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *txwi); +int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev); int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val); int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev, const struct mt7615_dfs_pulse *pulse); @@ -557,6 +557,8 @@ u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset); void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val); void mt7615_coredump_work(struct work_struct *work); +void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en); + /* usb */ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c index 71487f532f36..11f169cdd603 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c @@ -13,9 +13,9 @@ #include "mcu.h" static const struct pci_device_id mt7615_pci_device_table[] = { - { PCI_DEVICE(0x14c3, 0x7615) }, - { PCI_DEVICE(0x14c3, 0x7663) }, - { PCI_DEVICE(0x14c3, 0x7611) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7615) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7663) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7611) }, { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c index 72395925ddee..a629d9cb3806 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c @@ -126,6 +126,7 @@ int mt7615_register_device(struct mt7615_dev *dev) int ret; mt7615_init_device(dev); + INIT_WORK(&dev->reset_work, mt7615_mac_reset_work); /* init led callbacks */ if (IS_ENABLED(CONFIG_MT76_LEDS)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c index 1b4cb145f38e..1b206ccdadf2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c @@ -181,3 +181,171 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, return 0; } + +void mt7615_dma_reset(struct mt7615_dev *dev) +{ + int i; + + mt76_clear(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); + + usleep_range(1000, 2000); + + for (i = 0; i < __MT_TXQ_MAX; i++) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); + + for (i = 0; i < __MT_MCUQ_MAX; i++) + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); + + mt76_for_each_q_rx(&dev->mt76, i) + mt76_queue_rx_reset(dev, i); + + mt7615_dma_start(dev); +} +EXPORT_SYMBOL_GPL(mt7615_dma_reset); + +static void +mt7615_hif_int_event_trigger(struct mt7615_dev *dev, u8 event) +{ + mt76_wr(dev, MT_MCU_INT_EVENT, event); + + mt7622_trigger_hif_int(dev, true); + mt7622_trigger_hif_int(dev, false); +} + +static bool +mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state) +{ + bool ret; + + ret = wait_event_timeout(dev->reset_wait, + (READ_ONCE(dev->reset_state) & state), + MT7615_RESET_TIMEOUT); + WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); + return ret; +} + +static void +mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct ieee80211_hw *hw = priv; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + + switch (vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + mt7615_mcu_add_beacon(dev, hw, vif, + vif->bss_conf.enable_beacon); + break; + default: + break; + } +} + +static void +mt7615_update_beacons(struct mt7615_dev *dev) +{ + ieee80211_iterate_active_interfaces(dev->mt76.hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_update_vif_beacon, dev->mt76.hw); + + if (!dev->mt76.phy2) + return; + + ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_update_vif_beacon, dev->mt76.phy2->hw); +} + +void mt7615_mac_reset_work(struct work_struct *work) +{ + struct mt7615_phy *phy2; + struct mt76_phy *ext_phy; + struct mt7615_dev *dev; + + dev = container_of(work, struct mt7615_dev, reset_work); + ext_phy = dev->mt76.phy2; + phy2 = ext_phy ? ext_phy->priv : NULL; + + if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA)) + return; + + ieee80211_stop_queues(mt76_hw(dev)); + if (ext_phy) + ieee80211_stop_queues(ext_phy->hw); + + set_bit(MT76_RESET, &dev->mphy.state); + set_bit(MT76_MCU_RESET, &dev->mphy.state); + wake_up(&dev->mt76.mcu.wait); + cancel_delayed_work_sync(&dev->mphy.mac_work); + del_timer_sync(&dev->phy.roc_timer); + cancel_work_sync(&dev->phy.roc_work); + if (phy2) { + set_bit(MT76_RESET, &phy2->mt76->state); + cancel_delayed_work_sync(&phy2->mt76->mac_work); + del_timer_sync(&phy2->roc_timer); + cancel_work_sync(&phy2->roc_work); + } + + /* lock/unlock all queues to ensure that no tx is pending */ + mt76_txq_schedule_all(&dev->mphy); + if (ext_phy) + mt76_txq_schedule_all(ext_phy); + + mt76_worker_disable(&dev->mt76.tx_worker); + napi_disable(&dev->mt76.napi[0]); + napi_disable(&dev->mt76.napi[1]); + napi_disable(&dev->mt76.tx_napi); + + mt7615_mutex_acquire(dev); + + mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_STOPPED); + + mt7615_tx_token_put(dev); + idr_init(&dev->token); + + if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { + mt7615_dma_reset(dev); + + mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0); + + mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_INIT); + mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); + } + + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + clear_bit(MT76_RESET, &dev->mphy.state); + if (phy2) + clear_bit(MT76_RESET, &phy2->mt76->state); + + mt76_worker_enable(&dev->mt76.tx_worker); + napi_enable(&dev->mt76.tx_napi); + napi_schedule(&dev->mt76.tx_napi); + + napi_enable(&dev->mt76.napi[0]); + napi_schedule(&dev->mt76.napi[0]); + + napi_enable(&dev->mt76.napi[1]); + napi_schedule(&dev->mt76.napi[1]); + + ieee80211_wake_queues(mt76_hw(dev)); + if (ext_phy) + ieee80211_wake_queues(ext_phy->hw); + + mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_RESET_DONE); + mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); + + mt7615_update_beacons(dev); + + mt7615_mutex_release(dev); + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, + MT7615_WATCHDOG_TIME); + if (phy2) + ieee80211_queue_delayed_work(ext_phy->hw, + &phy2->mt76->mac_work, + MT7615_WATCHDOG_TIME); + +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h index 6e5db015b32c..190a02670795 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h @@ -368,7 +368,9 @@ enum mt7615_reg_base { #define MT_DMA_DCR0 MT_WF_DMA(0x000) #define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2) +#define MT_DMA_DCR0_DAMSDU_EN BIT(16) #define MT_DMA_DCR0_RX_VEC_DROP BIT(17) +#define MT_DMA_DCR0_RX_HDR_TRANS_EN BIT(19) #define MT_DMA_RCFR0(_band) MT_WF_DMA(0x070 + (_band) * 0x40) #define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2) @@ -447,9 +449,10 @@ enum mt7615_reg_base { #define MT_LPON(_n) ((dev)->reg_map[MT_LPON_BASE] + (_n)) -#define MT_LPON_T0CR MT_LPON(0x010) -#define MT_LPON_T0CR_MODE GENMASK(1, 0) -#define MT_LPON_T0CR_WRITE BIT(0) +#define MT_LPON_TCR0(_n) MT_LPON(0x010 + ((_n) * 4)) +#define MT_LPON_TCR2(_n) MT_LPON(0x0f8 + ((_n) - 2) * 4) +#define MT_LPON_TCR_MODE GENMASK(1, 0) +#define MT_LPON_TCR_WRITE BIT(0) #define MT_LPON_UTTR0 MT_LPON(0x018) #define MT_LPON_UTTR1 MT_LPON(0x01c) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c index 9fb506f2ace6..4393dd21ebbb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c @@ -218,12 +218,15 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0; bool mcu = q == dev->q_mcu[MT_MCUQ_WM]; struct mt76_sdio *sdio = &dev->sdio; + u8 pad; qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid; while (q->first != q->head) { struct mt76_queue_entry *e = &q->entry[q->first]; struct sk_buff *iter; + smp_rmb(); + if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) { __skb_put_zero(e->skb, 4); err = __mt7663s_xmit_queue(dev, e->skb->data, @@ -234,7 +237,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) goto next; } - if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ) + pad = roundup(e->skb->len, 4) - e->skb->len; + if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ) break; if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz, @@ -252,6 +256,11 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) len += iter->len; nframes++; } + + if (unlikely(pad)) { + memset(sdio->xmit_buf[qid] + len, 0, pad); + len += pad; + } next: q->first = (q->first + 1) % q->ndesc; e->done = true; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c index 203256862dfd..4a370b9f7a17 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -67,6 +67,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev, struct mt7615_rate_desc *rate = &wrd->rate; struct mt7615_sta *sta = wrd->sta; u32 w5, w27, addr, val; + u16 idx = sta->vif->mt76.omac_idx; lockdep_assert_held(&dev->mt76.mutex); @@ -118,7 +119,10 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev, sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1; - mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ + idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; + addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); + + mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */ val = mt76_rr(dev, MT_LPON_UTTR0); sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h index 0d58606391b0..b811f3c410a1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h @@ -73,6 +73,11 @@ static inline bool is_mt7921(struct mt76_dev *dev) return mt76_chip(dev) == 0x7961; } +static inline bool is_mt7663(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7663; +} + int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm); void mt76_connac_power_save_sched(struct mt76_phy *phy, struct mt76_connac_pm *pm); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 6cbccfb05f8b..443ec0a8bb7d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -287,7 +287,7 @@ mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid, &hdr.wlan_idx_hi); if (!nskb) { nskb = mt76_mcu_msg_alloc(dev, NULL, - MT76_CONNAC_WTBL_UPDATE_BA_SIZE); + MT76_CONNAC_WTBL_UPDATE_MAX_SIZE); if (!nskb) return ERR_PTR(-ENOMEM); @@ -392,6 +392,21 @@ mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif, uapsd->max_sp = sta->max_sp; } +void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, + struct mt76_wcid *wcid, + void *sta_wtbl, void *wtbl_tlv) +{ + struct wtbl_hdr_trans *htr; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HDR_TRANS, + sizeof(*htr), + wtbl_tlv, sta_wtbl); + htr = (struct wtbl_hdr_trans *)tlv; + htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags); +} +EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_hdr_trans_tlv); + void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, @@ -655,7 +670,8 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, struct ieee80211_sta *sta, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + u8 rcpi) { struct cfg80211_chan_def *chandef = &mphy->chandef; enum nl80211_band band = chandef->chan->band; @@ -704,6 +720,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, phy = (struct sta_rec_phy *)tlv; phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta); phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates); + phy->rcpi = rcpi; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info)); ra_info = (struct sta_rec_ra_info *)tlv; @@ -808,40 +825,42 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb, EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv); int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct mt76_wcid *wcid, - bool enable, int cmd) + struct mt76_sta_cmd_info *info) { - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv; struct mt76_dev *dev = phy->dev; struct wtbl_req_hdr *wtbl_hdr; struct tlv *sta_wtbl; struct sk_buff *skb; - skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid); + skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid); if (IS_ERR(skb)) return PTR_ERR(skb); - mt76_connac_mcu_sta_basic_tlv(skb, vif, sta, enable); - if (enable && sta) - mt76_connac_mcu_sta_tlv(phy, skb, sta, vif); + mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta, info->enable); + if (info->enable && info->sta) + mt76_connac_mcu_sta_tlv(phy, skb, info->sta, info->vif, + info->rcpi); sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); - wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, + wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid, WTBL_RESET_AND_SET, sta_wtbl, &skb); - if (enable) { - mt76_connac_mcu_wtbl_generic_tlv(dev, skb, vif, sta, sta_wtbl, + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + if (info->enable) { + mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif, + info->sta, sta_wtbl, wtbl_hdr); - if (sta) - mt76_connac_mcu_wtbl_ht_tlv(dev, skb, sta, sta_wtbl, - wtbl_hdr); + if (info->sta) + mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta, + sta_wtbl, wtbl_hdr); } - return mt76_mcu_skb_send_msg(dev, skb, cmd, true); + return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_sta_cmd); @@ -946,6 +965,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, switch (vif->type) { case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP: basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP); break; @@ -1195,6 +1215,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, .center_chan = ieee80211_frequency_to_channel(freq1), .center_chan2 = ieee80211_frequency_to_channel(freq2), .tx_streams = hweight8(phy->antenna_mask), + .ht_op_info = 4, /* set HT 40M allowed */ .rx_streams = phy->chainmask, .short_st = true, }, @@ -1287,6 +1308,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, case NL80211_CHAN_WIDTH_20: default: rlm_req.rlm.bw = CMD_CBW_20MHZ; + rlm_req.rlm.ht_op_info = 0; break; } @@ -1306,7 +1328,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, { struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; struct cfg80211_scan_request *sreq = &scan_req->req; - int n_ssids = 0, err, i, duration = MT76_CONNAC_SCAN_CHANNEL_TIME; + int n_ssids = 0, err, i, duration; int ext_channels_num = max_t(int, sreq->n_channels - 32, 0); struct ieee80211_channel **scan_list = sreq->channels; struct mt76_dev *mdev = phy->dev; @@ -1343,6 +1365,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, req->ssid_type_ext = n_ssids ? BIT(0) : 0; req->ssids_num = n_ssids; + duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME; /* increase channel time for passive scan */ if (!sreq->n_ssids) duration *= 2; @@ -1368,11 +1391,14 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, req->ies_len = cpu_to_le16(sreq->ie_len); } + if (is_mt7921(phy->dev)) + req->scan_func |= SCAN_FUNC_SPLIT_SCAN; + memcpy(req->bssid, sreq->bssid, ETH_ALEN); if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { get_random_mask_addr(req->random_mac, sreq->mac_addr, sreq->mac_addr_mask); - req->scan_func = 1; + req->scan_func |= SCAN_FUNC_RANDOM_MAC; } err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_START_HW_SCAN, false); @@ -1433,10 +1459,13 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy, req->version = 1; req->seq_num = mvif->scan_seq_num | ext_phy << 7; - if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { - get_random_mask_addr(req->random_mac, sreq->mac_addr, + if (is_mt7663(phy->dev) && + (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) { + get_random_mask_addr(req->mt7663.random_mac, sreq->mac_addr, sreq->mac_addr_mask); req->scan_func = 1; + } else if (is_mt7921(phy->dev)) { + req->mt7921.bss_idx = mvif->idx; } req->ssids_num = sreq->n_ssids; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index c1e1df5f7cd7..587097450416 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -210,7 +210,7 @@ struct wtbl_hdr_trans { __le16 len; u8 to_ds; u8 from_ds; - u8 disable_rx_trans; + u8 no_rx_trans; u8 rsv; } __packed; @@ -304,9 +304,6 @@ struct wtbl_raw { sizeof(struct tlv) + \ MT76_CONNAC_WTBL_UPDATE_MAX_SIZE) -#define MT76_CONNAC_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_ba)) - enum { STA_REC_BASIC, STA_REC_RA, @@ -365,6 +362,9 @@ enum { #define NETWORK_IBSS BIT(18) #define NETWORK_WDS BIT(21) +#define SCAN_FUNC_RANDOM_MAC BIT(0) +#define SCAN_FUNC_SPLIT_SCAN BIT(5) + #define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA) #define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA) #define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P) @@ -759,11 +759,19 @@ struct mt76_connac_sched_scan_req { u8 channel_type; u8 channels_num; u8 intervals_num; - u8 scan_func; /* BIT(0) eable random mac address */ + u8 scan_func; /* MT7663: BIT(0) eable random mac address */ struct mt76_connac_mcu_scan_channel channels[64]; __le16 intervals[MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL]; - u8 random_mac[ETH_ALEN]; /* valid when BIT(0) in scan_func is set */ - u8 pad2[58]; + union { + struct { + u8 random_mac[ETH_ALEN]; + u8 pad2[58]; + } mt7663; + struct { + u8 bss_idx; + u8 pad2[63]; + } mt7921; + }; } __packed; struct mt76_connac_sched_scan_done { @@ -876,6 +884,17 @@ struct mt76_connac_suspend_tlv { u8 pad[5]; } __packed; +struct mt76_sta_cmd_info { + struct ieee80211_sta *sta; + struct mt76_wcid *wcid; + + struct ieee80211_vif *vif; + + bool enable; + int cmd; + u8 rcpi; +}; + #define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id) #define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id) @@ -917,9 +936,13 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, struct ieee80211_sta *sta, void *sta_wtbl, void *wtbl_tlv); +void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, + struct mt76_wcid *wcid, + void *sta_wtbl, void *wtbl_tlv); void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, struct ieee80211_sta *sta, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + u8 rcpi); void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_sta *sta, void *sta_wtbl, void *wtbl_tlv); @@ -942,10 +965,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, struct mt76_wcid *wcid, bool enable); int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct mt76_wcid *wcid, - bool enable, int cmd); + struct mt76_sta_cmd_info *info); void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index 02d0aa0b815e..5847f943e8da 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -221,9 +221,9 @@ mt76x0e_remove(struct pci_dev *pdev) } static const struct pci_device_id mt76x0e_device_table[] = { - { PCI_DEVICE(0x14c3, 0x7610) }, - { PCI_DEVICE(0x14c3, 0x7630) }, - { PCI_DEVICE(0x14c3, 0x7650) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7610) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7630) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7650) }, { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index a593a7796d23..f2b2fa733845 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -32,7 +32,8 @@ static struct usb_device_id mt76x0_device_table[] = { { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */ { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */ { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */ - { USB_DEVICE(0x2357, 0x0123) }, /* TP-LINK T2UHP */ + { USB_DEVICE(0x2357, 0x0123) }, /* TP-LINK T2UHP_US_v1 */ + { USB_DEVICE(0x2357, 0x010b) }, /* TP-LINK T2UHP_UN_v1 */ /* TP-LINK Archer T1U */ { USB_DEVICE(0x2357, 0x0105), .driver_info = 1, }, /* MT7630U */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 771bad60e1bc..0da37867cb64 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -770,6 +770,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, void *rxi) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct ieee80211_hdr *hdr; struct mt76x02_rxwi *rxwi = rxi; struct mt76x02_sta *sta; u32 rxinfo = le32_to_cpu(rxwi->rxinfo); @@ -864,7 +865,8 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, status->freq = dev->mphy.chandef.chan->center_freq; status->band = dev->mphy.chandef.chan->band; - status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); + hdr = (struct ieee80211_hdr *)skb->data; + status->qos_ctl = *ieee80211_get_qos_ctl(hdr); status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); return mt76x02_mac_process_rate(dev, status, rate); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index e7a46ac97f51..fc12824ab74e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -230,7 +230,7 @@ int mt76x02_dma_init(struct mt76x02_dev *dev) if (ret) return ret; - netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt76x02_poll_tx, NAPI_POLL_WEIGHT); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index ab671e21f882..02db5d66735d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -447,6 +447,10 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) return -EOPNOTSUPP; + /* MT76x0 GTK offloading does not work with more than one VIF */ + if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL; wcid = msta ? &msta->wcid : &mvif->group_wcid; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index ecaf85b483ac..adf288e50e21 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -10,9 +10,9 @@ #include "mt76x2.h" static const struct pci_device_id mt76x2e_device_table[] = { - { PCI_DEVICE(0x14c3, 0x7662) }, - { PCI_DEVICE(0x14c3, 0x7612) }, - { PCI_DEVICE(0x14c3, 0x7602) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7662) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7612) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7602) }, { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile index cc2054dffa98..40c8061787e9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile @@ -3,6 +3,6 @@ obj-$(CONFIG_MT7915E) += mt7915e.o mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \ - debugfs.o + debugfs.o mmio.o mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 7bef36feb9c7..587d55d240a1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -124,7 +124,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy, range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i)); for (i = 0; i < ARRAY_SIZE(bound); i++) - bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1; + bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1; seq_printf(file, "\nPhy %d\n", ext_phy); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c index bf51304a770b..9fe09870f8f0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c @@ -110,121 +110,13 @@ void mt7915_dma_prefetch(struct mt7915_dev *dev) __mt7915_dma_prefetch(dev, MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE); } -static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr) -{ - static const struct { - u32 phys; - u32 mapped; - u32 size; - } fixed_map[] = { - { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */ - { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */ - { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */ - { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */ - { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */ - { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */ - { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */ - { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */ - { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */ - { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */ - { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */ - { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */ - { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */ - { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */ - { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */ - { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */ - { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */ - { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */ - { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */ - { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */ - { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */ - { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */ - { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */ - { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */ - { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */ - { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */ - { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */ - { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */ - { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */ - { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */ - { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */ - { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */ - { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */ - { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */ - { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */ - { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */ - { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */ - { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */ - }; - int i; - - if (addr < 0x100000) - return addr; - - for (i = 0; i < ARRAY_SIZE(fixed_map); i++) { - u32 ofs; - - if (addr < fixed_map[i].phys) - continue; - - ofs = addr - fixed_map[i].phys; - if (ofs > fixed_map[i].size) - continue; - - return fixed_map[i].mapped + ofs; - } - - if ((addr >= 0x18000000 && addr < 0x18c00000) || - (addr >= 0x70000000 && addr < 0x78000000) || - (addr >= 0x7c000000 && addr < 0x7c400000)) - return mt7915_reg_map_l1(dev, addr); - - return mt7915_reg_map_l2(dev, addr); -} - -static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset) -{ - struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); - u32 addr = __mt7915_reg_addr(dev, offset); - - return dev->bus_ops->rr(mdev, addr); -} - -static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val) -{ - struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); - u32 addr = __mt7915_reg_addr(dev, offset); - - dev->bus_ops->wr(mdev, addr, val); -} - -static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) -{ - struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); - u32 addr = __mt7915_reg_addr(dev, offset); - - return dev->bus_ops->rmw(mdev, addr, mask, val); -} - int mt7915_dma_init(struct mt7915_dev *dev) { /* Increase buffer size to receive large VHT/HE MPDUs */ - struct mt76_bus_ops *bus_ops; int rx_buf_size = MT_RX_BUF_SIZE * 2; u32 hif1_ofs = 0; int ret; - dev->bus_ops = dev->mt76.bus; - bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), - GFP_KERNEL); - if (!bus_ops) - return -ENOMEM; - - bus_ops->rr = mt7915_rr; - bus_ops->wr = mt7915_wr; - bus_ops->rmw = mt7915_rmw; - dev->mt76.bus = bus_ops; - mt76_dma_attach(&dev->mt76); if (dev->hif2) @@ -325,7 +217,7 @@ int mt7915_dma_init(struct mt7915_dev *dev) if (ret < 0) return ret; - netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7915_poll_tx, NAPI_POLL_WEIGHT); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c index 660398ac53c2..738ecf8f4fa2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c @@ -124,7 +124,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev, struct ieee80211_channel *chan, u8 chain_idx) { - int index; + int index, target_power; bool tssi_on; if (chain_idx > 3) @@ -133,15 +133,22 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev, tssi_on = mt7915_tssi_enabled(dev, chan->band); if (chan->band == NL80211_BAND_2GHZ) { - index = MT_EE_TX0_POWER_2G + chain_idx * 3 + !tssi_on; + index = MT_EE_TX0_POWER_2G + chain_idx * 3; + target_power = mt7915_eeprom_read(dev, index); + + if (!tssi_on) + target_power += mt7915_eeprom_read(dev, index + 1); } else { - int group = tssi_on ? - mt7915_get_channel_group(chan->hw_value) : 8; + int group = mt7915_get_channel_group(chan->hw_value); + + index = MT_EE_TX0_POWER_5G + chain_idx * 12; + target_power = mt7915_eeprom_read(dev, index + group); - index = MT_EE_TX0_POWER_5G + chain_idx * 12 + group; + if (!tssi_on) + target_power += mt7915_eeprom_read(dev, index + 8); } - return mt7915_eeprom_read(dev, index); + return target_power; } static const u8 sku_cck_delta_map[] = { diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index ad4e5b95158b..2732899689fd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -4,6 +4,7 @@ #include <linux/etherdevice.h> #include "mt7915.h" #include "mac.h" +#include "mcu.h" #include "eeprom.h" #define CCK_RATE(_idx, _rate) { \ @@ -93,6 +94,10 @@ mt7915_init_wiphy(struct ieee80211_hw *hw) hw->queues = 4; hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + hw->netdev_features = NETIF_F_RXCSUM; + + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; phy->slottime = 9; @@ -108,9 +113,28 @@ mt7915_init_wiphy(struct ieee80211_hw *hw) ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD); + ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); ieee80211_hw_set(hw, WANT_MONITOR_VIF); hw->max_tx_fragments = 4; + + if (phy->mt76->cap.has_2ghz) + phy->mt76->sband_2g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_MAX_AMSDU; + + if (phy->mt76->cap.has_5ghz) { + phy->mt76->sband_5g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_MAX_AMSDU; + phy->mt76->sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + } + + mt76_set_stream_caps(phy->mt76, true); + mt7915_set_stream_vht_txbf_caps(phy); + mt7915_set_stream_he_caps(phy); } static void @@ -153,16 +177,14 @@ static void mt7915_mac_init(struct mt7915_dev *dev) int i; mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536); - /* disable hardware de-agg */ - mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN); + /* enable hardware de-agg */ + mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN); for (i = 0; i < MT7915_WTBL_SIZE; i++) mt7915_mac_wtbl_update(dev, i, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); for (i = 0; i < 2; i++) mt7915_mac_init_band(dev, i); - - mt7915_mcu_set_rts_thresh(&dev->phy, 0x92b); } static int mt7915_txbf_init(struct mt7915_dev *dev) @@ -238,22 +260,17 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev) phy->mt76 = mphy; mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask; mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1; - mt7915_init_wiphy(mphy->hw); INIT_LIST_HEAD(&phy->stats_list); INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work); mt7915_eeprom_parse_band_config(phy); - mt7915_set_stream_vht_txbf_caps(phy); - mt7915_set_stream_he_caps(phy); + mt7915_init_wiphy(mphy->hw); memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR2, ETH_ALEN); mt76_eeprom_override(mphy); - /* The second interface does not get any packets unless it has a vif */ - ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF); - ret = mt7915_init_tx_queues(phy, MT7915_TXQ_BAND1, MT7915_TX_RING_SIZE); if (ret) @@ -280,7 +297,45 @@ static void mt7915_init_work(struct work_struct *work) mt7915_mac_init(dev); mt7915_init_txpower(dev); mt7915_txbf_init(dev); - mt7915_register_ext_phy(dev); +} + +static void mt7915_wfsys_reset(struct mt7915_dev *dev) +{ + u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON; + +#define MT_MCU_DUMMY_RANDOM GENMASK(15, 0) +#define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16) + + mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM); + + /* change to software control */ + val |= MT_TOP_PWR_SW_RST; + mt76_wr(dev, MT_TOP_PWR_CTRL, val); + + /* reset wfsys */ + val &= ~MT_TOP_PWR_SW_RST; + mt76_wr(dev, MT_TOP_PWR_CTRL, val); + + /* release wfsys then mcu re-excutes romcode */ + val |= MT_TOP_PWR_SW_RST; + mt76_wr(dev, MT_TOP_PWR_CTRL, val); + + /* switch to hw control */ + val &= ~MT_TOP_PWR_SW_RST; + val |= MT_TOP_PWR_HW_CTRL; + mt76_wr(dev, MT_TOP_PWR_CTRL, val); + + /* check whether mcu resets to default */ + if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT, + MT_MCU_DUMMY_DEFAULT, 1000)) { + dev_err(dev->mt76.dev, "wifi subsystem reset failure\n"); + return; + } + + /* wfsys reset won't clear host registers */ + mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE); + + msleep(100); } static int mt7915_init_hardware(struct mt7915_dev *dev) @@ -293,7 +348,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev) spin_lock_init(&dev->token_lock); idr_init(&dev->token); - dev->dbdc_support = !!(mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5)); + dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5)); + + /* If MCU was already running, it is likely in a bad state */ + if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) > + FW_STATE_FW_DOWNLOAD) + mt7915_wfsys_reset(dev); ret = mt7915_dma_init(dev); if (ret) @@ -308,8 +368,14 @@ static int mt7915_init_hardware(struct mt7915_dev *dev) mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); ret = mt7915_mcu_init(dev); - if (ret) - return ret; + if (ret) { + /* Reset and try again */ + mt7915_wfsys_reset(dev); + + ret = mt7915_mcu_init(dev); + if (ret) + return ret; + } ret = mt7915_eeprom_init(dev); if (ret < 0) @@ -330,8 +396,14 @@ static int mt7915_init_hardware(struct mt7915_dev *dev) void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy) { - int nss = hweight8(phy->mt76->chainmask); - u32 *cap = &phy->mt76->sband_5g.sband.vht_cap.cap; + int nss; + u32 *cap; + + if (!phy->mt76->cap.has_5ghz) + return; + + nss = hweight8(phy->mt76->chainmask); + cap = &phy->mt76->sband_5g.sband.vht_cap.cap; *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | @@ -635,25 +707,14 @@ int mt7915_register_device(struct mt7915_dev *dev) return ret; mt7915_init_wiphy(hw); - dev->mphy.sband_2g.sband.ht_cap.cap |= - IEEE80211_HT_CAP_LDPC_CODING | - IEEE80211_HT_CAP_MAX_AMSDU; - dev->mphy.sband_5g.sband.ht_cap.cap |= - IEEE80211_HT_CAP_LDPC_CODING | - IEEE80211_HT_CAP_MAX_AMSDU; - dev->mphy.sband_5g.sband.vht_cap.cap |= - IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + if (!dev->dbdc_support) dev->mphy.sband_5g.sband.vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ; + dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask; dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask; - - mt76_set_stream_caps(&dev->mphy, true); - mt7915_set_stream_vht_txbf_caps(&dev->phy); - mt7915_set_stream_he_caps(&dev->phy); dev->phy.dfs_state = -1; #ifdef CONFIG_NL80211_TESTMODE @@ -667,6 +728,10 @@ int mt7915_register_device(struct mt7915_dev *dev) ieee80211_queue_work(mt76_hw(dev), &dev->init_work); + ret = mt7915_register_ext_phy(dev); + if (ret) + return ret; + return mt7915_init_debugfs(dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index e5a258958ac9..3f3bfead1ce7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -317,11 +317,18 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) __le32 *rxd = (__le32 *)skb->data; __le32 *rxv = NULL; u32 mode = 0; + u32 rxd0 = le32_to_cpu(rxd[0]); u32 rxd1 = le32_to_cpu(rxd[1]); u32 rxd2 = le32_to_cpu(rxd[2]); u32 rxd3 = le32_to_cpu(rxd[3]); + u32 rxd4 = le32_to_cpu(rxd[4]); + u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; bool unicast, insert_ccmp_hdr = false; - u8 remove_pad; + u8 remove_pad, amsdu_info; + bool hdr_trans; + u16 seq_ctrl = 0; + u8 qos_ctl = 0; + __le16 fc = 0; int i, idx; memset(status, 0, sizeof(*status)); @@ -338,8 +345,12 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) return -EINVAL; + if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) + return -EINVAL; + unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); + hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); if (status->wcid) { @@ -362,6 +373,9 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) if (!sband->channels) return -EINVAL; + if ((rxd0 & csum_mask) == csum_mask) + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) status->flag |= RX_FLAG_FAILED_FCS_CRC; @@ -375,19 +389,6 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; } - if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { - status->flag |= RX_FLAG_AMPDU_DETAILS; - - /* all subframes of an A-MPDU have the same timestamp */ - if (phy->rx_ampdu_ts != rxd[14]) { - if (!++phy->ampdu_ref) - phy->ampdu_ref++; - } - phy->rx_ampdu_ts = rxd[14]; - - status->ampdu_ref = phy->ampdu_ref; - } - remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) @@ -395,6 +396,13 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) rxd += 6; if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { + u32 v0 = le32_to_cpu(rxd[0]); + u32 v2 = le32_to_cpu(rxd[2]); + + fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0)); + qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2); + seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2); + rxd += 4; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -419,6 +427,22 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) } if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { + status->timestamp = le32_to_cpu(rxd[0]); + status->flag |= RX_FLAG_MACTIME_START; + + if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (phy->rx_ampdu_ts != status->timestamp) { + if (!++phy->ampdu_ref) + phy->ampdu_ref++; + } + phy->rx_ampdu_ts = status->timestamp; + + status->ampdu_ref = phy->ampdu_ref; + } + rxd += 2; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -541,23 +565,47 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); - if (insert_ccmp_hdr) { + amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); + status->amsdu = !!amsdu_info; + if (status->amsdu) { + status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; + status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; + if (!hdr_trans) { + memmove(skb->data + 2, skb->data, + ieee80211_get_hdrlen_from_skb(skb)); + skb_pull(skb, 2); + } + } + + if (insert_ccmp_hdr && !hdr_trans) { u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); mt76_insert_ccmp_hdr(skb, key_id); } + if (!hdr_trans) { + hdr = mt76_skb_get_hdr(skb); + fc = hdr->frame_control; + if (ieee80211_is_data_qos(fc)) { + seq_ctrl = le16_to_cpu(hdr->seq_ctrl); + qos_ctl = *ieee80211_get_qos_ctl(hdr); + } + } else { + status->flag &= ~(RX_FLAG_RADIOTAP_HE | + RX_FLAG_RADIOTAP_HE_MU); + status->flag |= RX_FLAG_8023; + } + if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) mt7915_mac_decode_he_radiotap(skb, status, rxv, mode); - hdr = mt76_skb_get_hdr(skb); - if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) + if (!status->wcid || !ieee80211_is_data_qos(fc)) return 0; status->aggr = unicast && - !ieee80211_is_qos_nullfunc(hdr->frame_control); - status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; - status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + !ieee80211_is_qos_nullfunc(fc); + status->qos_ctl = qos_ctl; + status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); return 0; } @@ -1091,7 +1139,7 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev, int i; txp = mt7915_txwi_to_txp(dev, t); - for (i = 1; i < txp->nbuf; i++) + for (i = 0; i < txp->nbuf; i++) dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); } @@ -1252,8 +1300,8 @@ void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy) bool ext_phy = phy != &dev->phy; u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy); - mt7915_l2_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); - mt7915_l2_set(dev, reg, BIT(11) | BIT(9)); + mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); + mt76_set(dev, reg, BIT(11) | BIT(9)); } void mt7915_mac_reset_counters(struct mt7915_phy *phy) @@ -1346,12 +1394,12 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy) void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy) { - mt7915_l2_set(dev, MT_WF_PHY_RXTD12(ext_phy), - MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | - MT_WF_PHY_RXTD12_IRPI_SW_CLR); + mt76_set(dev, MT_WF_PHY_RXTD12(ext_phy), + MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | + MT_WF_PHY_RXTD12_IRPI_SW_CLR); - mt7915_l2_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy), - FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); + mt76_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy), + FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); } static u8 @@ -1366,7 +1414,7 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx) u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support)); for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { - val = mt7915_l2_rr(dev, reg); + val = mt76_rr(dev, reg); sum += val * nf_power[i]; n += val; } @@ -1470,9 +1518,8 @@ mt7915_update_beacons(struct mt7915_dev *dev) } static void -mt7915_dma_reset(struct mt7915_phy *phy) +mt7915_dma_reset(struct mt7915_dev *dev) { - struct mt7915_dev *dev = phy->dev; struct mt76_phy *mphy_ext = dev->mt76.phy2; u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE; int i; @@ -1489,18 +1536,20 @@ mt7915_dma_reset(struct mt7915_phy *phy) (MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN)); } + usleep_range(1000, 2000); - mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true); for (i = 0; i < __MT_TXQ_MAX; i++) { - mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); if (mphy_ext) mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); } - mt76_for_each_q_rx(&dev->mt76, i) { + for (i = 0; i < __MT_MCUQ_MAX; i++) + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); + + mt76_for_each_q_rx(&dev->mt76, i) mt76_queue_rx_reset(dev, i); - } /* re-init prefetch settings after reset */ mt7915_dma_prefetch(dev); @@ -1562,9 +1611,10 @@ void mt7915_mac_reset_work(struct work_struct *work) set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); cancel_delayed_work_sync(&dev->mphy.mac_work); - if (phy2) + if (phy2) { + set_bit(MT76_RESET, &phy2->mt76->state); cancel_delayed_work_sync(&phy2->mt76->mac_work); - + } /* lock/unlock all queues to ensure that no tx is pending */ mt76_txq_schedule_all(&dev->mphy); if (ext_phy) @@ -1584,7 +1634,7 @@ void mt7915_mac_reset_work(struct work_struct *work) idr_init(&dev->token); if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { - mt7915_dma_reset(&dev->phy); + mt7915_dma_reset(dev); mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); @@ -1592,6 +1642,8 @@ void mt7915_mac_reset_work(struct work_struct *work) clear_bit(MT76_MCU_RESET, &dev->mphy.state); clear_bit(MT76_RESET, &dev->mphy.state); + if (phy2) + clear_bit(MT76_RESET, &phy2->mt76->state); mt76_worker_enable(&dev->mt76.tx_worker); napi_enable(&dev->mt76.tx_napi); @@ -1633,39 +1685,30 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy) bool ext_phy = phy != &dev->phy; int i, aggr0, aggr1; - memset(mib, 0, sizeof(*mib)); - - mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy), - MT_MIB_SDR3_FCS_ERR_MASK); + mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy), + MT_MIB_SDR3_FCS_ERR_MASK); aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { - u32 val, val2; + u32 val; val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); - - val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); - if (val2 > mib->ack_fail_cnt) - mib->ack_fail_cnt = val2; - - val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); - if (val2 > mib->ba_miss_cnt) - mib->ba_miss_cnt = val2; + mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); + mib->ack_fail_cnt += + FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); - val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); - if (val2 > mib->rts_retries_cnt) { - mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); - mib->rts_retries_cnt = val2; - } + mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); + mib->rts_retries_cnt += + FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); - val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i)); - dev->mt76.aggr_stats[aggr0++] += val & 0xffff; dev->mt76.aggr_stats[aggr0++] += val >> 16; - dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff; - dev->mt76.aggr_stats[aggr1++] += val2 >> 16; + + val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i)); + dev->mt76.aggr_stats[aggr1++] += val & 0xffff; + dev->mt76.aggr_stats[aggr1++] += val >> 16; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h index 96ff3fb0d1f3..0f929fb53027 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h @@ -86,6 +86,10 @@ enum rx_pkt_type { /* RXD DW4 */ #define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0) +#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0) +#define MT_RXD4_MID_AMSDU_FRAME BIT(1) +#define MT_RXD4_LAST_AMSDU_FRAME BIT(0) + #define MT_RXD4_NORMAL_PATTERN_DROP BIT(9) #define MT_RXD4_NORMAL_CLS BIT(10) #define MT_RXD4_NORMAL_OFLD GENMASK(12, 11) @@ -97,6 +101,17 @@ enum rx_pkt_type { #define MT_RXV_HDR_BAND_IDX BIT(24) +/* RXD GROUP4 */ +#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0) +#define MT_RXD6_TA_LO GENMASK(31, 16) + +#define MT_RXD7_TA_HI GENMASK(31, 0) + +#define MT_RXD8_SEQ_CTRL GENMASK(15, 0) +#define MT_RXD8_QOS_CTL GENMASK(31, 16) + +#define MT_RXD9_HT_CONTROL GENMASK(31, 0) + /* P-RXV */ #define MT_PRXV_TX_RATE GENMASK(6, 0) #define MT_PRXV_TX_DCM BIT(4) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index d4969b2e1ffb..2fd87987312e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -25,6 +25,7 @@ static int mt7915_start(struct ieee80211_hw *hw) struct mt7915_dev *dev = mt7915_hw_dev(hw); struct mt7915_phy *phy = mt7915_hw_phy(hw); bool running; + int ret; flush_work(&dev->init_work); @@ -33,21 +34,48 @@ static int mt7915_start(struct ieee80211_hw *hw) running = mt7915_dev_running(dev); if (!running) { - mt7915_mcu_set_pm(dev, 0, 0); - mt7915_mcu_set_mac(dev, 0, true, false); - mt7915_mcu_set_scs(dev, 0, true); + ret = mt7915_mcu_set_pm(dev, 0, 0); + if (ret) + goto out; + + ret = mt7915_mcu_set_mac(dev, 0, true, true); + if (ret) + goto out; + + ret = mt7915_mcu_set_scs(dev, 0, true); + if (ret) + goto out; + mt7915_mac_enable_nf(dev, 0); } if (phy != &dev->phy) { - mt7915_mcu_set_pm(dev, 1, 0); - mt7915_mcu_set_mac(dev, 1, true, false); - mt7915_mcu_set_scs(dev, 1, true); + ret = mt7915_mcu_set_pm(dev, 1, 0); + if (ret) + goto out; + + ret = mt7915_mcu_set_mac(dev, 1, true, true); + if (ret) + goto out; + + ret = mt7915_mcu_set_scs(dev, 1, true); + if (ret) + goto out; + mt7915_mac_enable_nf(dev, 1); } - mt7915_mcu_set_sku_en(phy, true); - mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH)); + ret = mt7915_mcu_set_rts_thresh(phy, 0x92b); + if (ret) + goto out; + + ret = mt7915_mcu_set_sku_en(phy, true); + if (ret) + goto out; + + ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH)); + if (ret) + goto out; set_bit(MT76_STATE_RUNNING, &phy->mt76->state); @@ -58,9 +86,10 @@ static int mt7915_start(struct ieee80211_hw *hw) if (!running) mt7915_mac_reset_counters(phy); +out: mutex_unlock(&dev->mt76.mutex); - return 0; + return ret; } static void mt7915_stop(struct ieee80211_hw *hw) @@ -227,7 +256,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, struct mt7915_phy *phy = mt7915_hw_phy(hw); int idx = msta->wcid.idx; - /* TODO: disable beacon for the bss */ + mt7915_mcu_add_bss_info(phy, vif, false); + mt7915_mcu_add_sta(dev, vif, NULL, false); mutex_lock(&dev->mt76.mutex); mt76_testmode_reset(phy->mt76, true); @@ -317,7 +347,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; struct mt76_wcid *wcid = &msta->wcid; + u8 *wcid_keyidx = &wcid->hw_key_idx; int idx = key->keyidx; + int err = 0; /* The hardware does not support per-STA RX GTK, fallback * to software mode for these. @@ -332,6 +364,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, /* fall back to sw encryption for unsupported ciphers */ switch (key->cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: + wcid_keyidx = &wcid->hw_key_idx2; key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; break; case WLAN_CIPHER_SUITE_TKIP: @@ -347,16 +380,24 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } - if (cmd == SET_KEY) { - key->hw_key_idx = wcid->idx; - wcid->hw_key_idx = idx; - } else if (idx == wcid->hw_key_idx) { - wcid->hw_key_idx = -1; - } + mutex_lock(&dev->mt76.mutex); + + if (cmd == SET_KEY) + *wcid_keyidx = idx; + else if (idx == *wcid_keyidx) + *wcid_keyidx = -1; + else + goto out; + mt76_wcid_key_setup(&dev->mt76, wcid, cmd == SET_KEY ? key : NULL); - return mt7915_mcu_add_key(dev, vif, msta, key, cmd); + err = mt7915_mcu_add_key(dev, vif, msta, key, cmd); + +out: + mutex_unlock(&dev->mt76.mutex); + + return err; } static int mt7915_config(struct ieee80211_hw *hw, u32 changed) @@ -515,9 +556,9 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_BEACON_ENABLED) { - mt7915_mcu_add_bss_info(phy, vif, info->enable_beacon); - mt7915_mcu_add_sta(dev, vif, NULL, info->enable_beacon); + if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) { + mt7915_mcu_add_bss_info(phy, vif, true); + mt7915_mcu_add_sta(dev, vif, NULL, true); } /* ensure that enable txcmd_mode after bss_info */ @@ -631,12 +672,13 @@ static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val) { struct mt7915_dev *dev = mt7915_hw_dev(hw); struct mt7915_phy *phy = mt7915_hw_phy(hw); + int ret; mutex_lock(&dev->mt76.mutex); - mt7915_mcu_set_rts_thresh(phy, val); + ret = mt7915_mcu_set_rts_thresh(phy, val); mutex_unlock(&dev->mt76.mutex); - return 0; + return ret; } static int @@ -663,22 +705,22 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, params->buf_size); - mt7915_mcu_add_rx_ba(dev, params, true); + ret = mt7915_mcu_add_rx_ba(dev, params, true); break; case IEEE80211_AMPDU_RX_STOP: mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); - mt7915_mcu_add_rx_ba(dev, params, false); + ret = mt7915_mcu_add_rx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_OPERATIONAL: mtxq->aggr = true; mtxq->send_bar = false; - mt7915_mcu_add_tx_ba(dev, params, true); + ret = mt7915_mcu_add_tx_ba(dev, params, true); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; clear_bit(tid, &msta->ampdu_state); - mt7915_mcu_add_tx_ba(dev, params, false); + ret = mt7915_mcu_add_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: set_bit(tid, &msta->ampdu_state); @@ -687,7 +729,7 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; clear_bit(tid, &msta->ampdu_state); - mt7915_mcu_add_tx_ba(dev, params, false); + ret = mt7915_mcu_add_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } @@ -717,13 +759,19 @@ mt7915_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_dev *dev = mt7915_hw_dev(hw); struct mib_stats *mib = &phy->mib; + mutex_lock(&dev->mt76.mutex); stats->dot11RTSSuccessCount = mib->rts_cnt; stats->dot11RTSFailureCount = mib->rts_retries_cnt; stats->dot11FCSErrorCount = mib->fcs_err_cnt; stats->dot11ACKFailureCount = mib->ack_fail_cnt; + memset(mib, 0, sizeof(*mib)); + + mutex_unlock(&dev->mt76.mutex); + return 0; } @@ -833,9 +881,12 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw, struct mt7915_phy *phy = mt7915_hw_phy(hw); struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; struct mt7915_sta_stats *stats = &msta->stats; + struct rate_info rxrate = {}; - if (mt7915_mcu_get_rx_rate(phy, vif, sta, &sinfo->rxrate) == 0) + if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) { + sinfo->rxrate = rxrate; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); + } if (!stats->tx_rate.legacy && !stats->tx_rate.flags) return; @@ -888,6 +939,22 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw, mt7915_mcu_sta_update_hdr_trans(dev, vif, sta); } +static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool enabled) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + + if (enabled) + set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + else + clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + + mt7915_mcu_sta_update_hdr_trans(dev, vif, sta); +} + const struct ieee80211_ops mt7915_ops = { .tx = mt7915_tx, .start = mt7915_start, @@ -920,6 +987,7 @@ const struct ieee80211_ops mt7915_ops = { .set_coverage_class = mt7915_set_coverage_class, .sta_statistics = mt7915_sta_statistics, .sta_set_4addr = mt7915_sta_set_4addr, + .sta_set_decap_offload = mt7915_sta_set_decap_offload, CFG80211_TESTMODE_CMD(mt76_testmode_cmd) CFG80211_TESTMODE_DUMP(mt76_testmode_dump) #ifdef CONFIG_MAC80211_DEBUGFS diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 195929242b72..0b739ed8ce33 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -147,9 +147,10 @@ mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif) } static u8 -mt7915_get_phy_mode(struct mt7915_dev *dev, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta) +mt7915_get_phy_mode(struct mt76_phy *mphy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { + enum nl80211_band band = mphy->chandef.chan->band; struct ieee80211_sta_ht_cap *ht_cap; struct ieee80211_sta_vht_cap *vht_cap; const struct ieee80211_sta_he_cap *he_cap; @@ -161,12 +162,8 @@ mt7915_get_phy_mode(struct mt7915_dev *dev, struct ieee80211_vif *vif, he_cap = &sta->he_cap; } else { struct ieee80211_supported_band *sband; - struct mt7915_phy *phy; - struct mt7915_vif *mvif; - mvif = (struct mt7915_vif *)vif->drv_priv; - phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy; - sband = phy->mt76->hw->wiphy->bands[band]; + sband = mphy->hw->wiphy->bands[band]; ht_cap = &sband->ht_cap; vht_cap = &sband->vht_cap; @@ -337,6 +334,22 @@ mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) } static void +mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7915_mcu_csa_notify *c; + + c = (struct mt7915_mcu_csa_notify *)skb->data; + + if (c->band_idx && dev->mt76.phy2) + mphy = dev->mt76.phy2; + + ieee80211_iterate_active_interfaces_atomic(mphy->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7915_mcu_csa_finish, mphy->hw); +} + +static void mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) { struct mt76_phy *mphy = &dev->mt76.phy; @@ -344,61 +357,69 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) r = (struct mt7915_mcu_rdd_report *)skb->data; - if (r->idx && dev->mt76.phy2) + if (r->band_idx && dev->mt76.phy2) mphy = dev->mt76.phy2; ieee80211_radar_detected(mphy->hw); dev->hw_pattern++; } -static void +static int mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra, struct rate_info *rate, u16 r) { struct ieee80211_supported_band *sband; u16 ru_idx = le16_to_cpu(ra->ru_idx); - u16 flags = 0; + bool cck = false; rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r); rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1; switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) { case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; case MT_PHY_TYPE_OFDM: if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) sband = &mphy->sband_5g.sband; else sband = &mphy->sband_2g.sband; + rate->mcs = mt76_get_rate(mphy->dev, sband, rate->mcs, cck); rate->legacy = sband->bitrates[rate->mcs].bitrate; break; case MT_PHY_TYPE_HT: case MT_PHY_TYPE_HT_GF: rate->mcs += (rate->nss - 1) * 8; - flags |= RATE_INFO_FLAGS_MCS; + if (rate->mcs > 31) + return -EINVAL; + rate->flags = RATE_INFO_FLAGS_MCS; if (ra->gi) - flags |= RATE_INFO_FLAGS_SHORT_GI; + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case MT_PHY_TYPE_VHT: - flags |= RATE_INFO_FLAGS_VHT_MCS; + if (rate->mcs > 9) + return -EINVAL; + rate->flags = RATE_INFO_FLAGS_VHT_MCS; if (ra->gi) - flags |= RATE_INFO_FLAGS_SHORT_GI; + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case MT_PHY_TYPE_HE_SU: case MT_PHY_TYPE_HE_EXT_SU: case MT_PHY_TYPE_HE_TB: case MT_PHY_TYPE_HE_MU: + if (ra->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) + return -EINVAL; + rate->he_gi = ra->gi; rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r); - - flags |= RATE_INFO_FLAGS_HE_MCS; + rate->flags = RATE_INFO_FLAGS_HE_MCS; break; default: - break; + return -EINVAL; } - rate->flags = flags; if (ru_idx) { switch (ru_idx) { @@ -435,6 +456,8 @@ mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra, break; } } + + return 0; } static void @@ -465,12 +488,12 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb) mphy = dev->mt76.phy2; /* current rate */ - mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr); - stats->tx_rate = rate; + if (!mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr)) + stats->tx_rate = rate; /* probing rate */ - mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe); - stats->prob_rate = prob_rate; + if (!mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe)) + stats->prob_rate = prob_rate; if (attempts) { u16 success = le16_to_cpu(ra->success); @@ -498,7 +521,8 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb) break; } - wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data); + wiphy_info(mt76_hw(dev)->wiphy, "%s: %*s", type, + (int)(skb->len - sizeof(*rxd)), data); } static void @@ -511,9 +535,7 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb) mt7915_mcu_rx_radar_detected(dev, skb); break; case MCU_EXT_EVENT_CSA_NOTIFY: - ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw, - IEEE80211_IFACE_ITER_RESUME_ALL, - mt7915_mcu_csa_finish, dev); + mt7915_mcu_rx_csa_notify(dev, skb); break; case MCU_EXT_EVENT_RATE_REPORT: mt7915_mcu_tx_rate_report(dev, skb); @@ -592,7 +614,7 @@ mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta, if (!nskb) { nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL, - MT7915_WTBL_UPDATE_BA_SIZE); + MT7915_WTBL_UPDATE_MAX_SIZE); if (!nskb) return ERR_PTR(-ENOMEM); @@ -662,8 +684,6 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct mt7915_phy *phy, bool enable) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - struct cfg80211_chan_def *chandef = &phy->mt76->chandef; - enum nl80211_band band = chandef->chan->band; struct bss_info_basic *bss; u16 wlan_idx = mvif->sta.wcid.idx; u32 type = NETWORK_INFRA; @@ -713,7 +733,7 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN); bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); bss->dtim_period = vif->bss_conf.dtim_period; - bss->phy_mode = mt7915_get_phy_mode(phy->dev, vif, band, NULL); + bss->phy_mode = mt7915_get_phy_mode(phy->mt76, vif, NULL); } else { memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN); } @@ -989,8 +1009,10 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct sk_buff *skb; - if (mvif->omac_idx >= REPEATER_BSSID_START) + if (mvif->omac_idx >= REPEATER_BSSID_START) { + mt7915_mcu_muar_config(phy, vif, false, enable); mt7915_mcu_muar_config(phy, vif, true, enable); + } skb = mt7915_mcu_alloc_sta_req(phy->dev, mvif, NULL, MT7915_BSS_UPDATE_MAX_SIZE); @@ -1188,6 +1210,9 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev, wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, @@ -1685,6 +1710,7 @@ mt7915_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, return; msta = (struct mt7915_sta *)sta->drv_priv; + htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); if (test_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags)) { htr->to_ds = true; htr->from_ds = true; @@ -1704,6 +1730,9 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev, return -ENOMEM; wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr); return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE), @@ -1728,6 +1757,9 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr); return mt76_mcu_skb_send_msg(&dev->mt76, skb, @@ -2045,25 +2077,30 @@ mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif, static void mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct cfg80211_chan_def *chandef = &dev->mphy.chandef; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt76_phy *mphy = &dev->mphy; + enum nl80211_band band; struct sta_rec_ra *ra; struct tlv *tlv; - enum nl80211_band band = chandef->chan->band; - u32 supp_rate = sta->supp_rates[band]; - int n_rates = hweight32(supp_rate); - u32 cap = sta->wme ? STA_CAP_WMM : 0; + u32 supp_rate, n_rates, cap = sta->wme ? STA_CAP_WMM : 0; u8 i, nss = sta->rx_nss, mcs = 0; tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra)); - ra = (struct sta_rec_ra *)tlv; + + if (msta->wcid.ext_phy && dev->mt76.phy2) + mphy = dev->mt76.phy2; + + band = mphy->chandef.chan->band; + supp_rate = sta->supp_rates[band]; + n_rates = hweight32(supp_rate); + ra->valid = true; ra->auto_rate = true; - ra->phy_mode = mt7915_get_phy_mode(dev, vif, band, sta); - ra->channel = chandef->chan->hw_value; + ra->phy_mode = mt7915_get_phy_mode(mphy, vif, sta); + ra->channel = mphy->chandef.chan->hw_value; ra->bw = sta->bandwidth; ra->rate_len = n_rates; ra->phy.bw = sta->bandwidth; @@ -2253,6 +2290,9 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET, sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + if (enable) { mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); @@ -2411,6 +2451,17 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct bss_info_bcn *bcn; int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE; + rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len); + if (IS_ERR(rskb)) + return PTR_ERR(rskb); + + tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); + bcn = (struct bss_info_bcn *)tlv; + bcn->enable = en; + + if (!en) + goto out; + skb = ieee80211_beacon_get_template(hw, vif, &offs); if (!skb) return -EINVAL; @@ -2421,16 +2472,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, return -EINVAL; } - rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len); - if (IS_ERR(rskb)) { - dev_kfree_skb(skb); - return PTR_ERR(rskb); - } - - tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); - bcn = (struct bss_info_bcn *)tlv; - bcn->enable = en; - if (mvif->band_idx) { info = IEEE80211_SKB_CB(skb); info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; @@ -2441,6 +2482,7 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, mt7915_mcu_beacon_cont(dev, rskb, skb, bcn, &offs); dev_kfree_skb(skb); +out: return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, MCU_EXT_CMD(BSS_INFO_UPDATE), true); } @@ -2500,11 +2542,9 @@ static int mt7915_mcu_start_patch(struct mt7915_dev *dev) static int mt7915_driver_own(struct mt7915_dev *dev) { - u32 reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); - - mt76_wr(dev, reg, MT_TOP_LPCR_HOST_DRV_OWN); - if (!mt76_poll_msec(dev, reg, MT_TOP_LPCR_HOST_FW_OWN, - 0, 500)) { + mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_DRV_OWN); + if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND0, + MT_TOP_LPCR_HOST_FW_OWN, 0, 500)) { dev_err(dev->mt76.dev, "Timeout for driver own\n"); return -EIO; } @@ -2743,20 +2783,6 @@ out: static int mt7915_load_firmware(struct mt7915_dev *dev) { int ret; - u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC); - - val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD); - - if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) { - /* restart firmware once */ - __mt76_mcu_restart(&dev->mt76); - if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, - val, 1000)) { - dev_err(dev->mt76.dev, - "Firmware is not ready for download\n"); - return -EIO; - } - } ret = mt7915_load_patch(dev); if (ret) @@ -2766,7 +2792,7 @@ static int mt7915_load_firmware(struct mt7915_dev *dev) if (ret) return ret; - if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, + if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE, FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_WACPU_RDY), 1000)) { dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); @@ -2854,21 +2880,39 @@ int mt7915_mcu_init(struct mt7915_dev *dev) void mt7915_mcu_exit(struct mt7915_dev *dev) { - u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC); - __mt76_mcu_restart(&dev->mt76); - if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, + if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE, FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD), 1000)) { dev_err(dev->mt76.dev, "Failed to exit mcu\n"); return; } - reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); - mt76_wr(dev, reg, MT_TOP_LPCR_HOST_FW_OWN); + mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_FW_OWN); skb_queue_purge(&dev->mt76.mcu.res_q); } +static int +mt7915_mcu_set_rx_hdr_trans_blacklist(struct mt7915_dev *dev, int band) +{ + struct { + u8 operation; + u8 count; + u8 _rsv[2]; + u8 index; + u8 enable; + __le16 etype; + } req = { + .operation = 1, + .count = 1, + .enable = 1, + .etype = cpu_to_le16(ETH_P_PAE), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS), + &req, sizeof(req), false); +} + int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable, bool hdr_trans) { @@ -2899,6 +2943,9 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, if (ret) return ret; + if (hdr_trans) + mt7915_mcu_set_rx_hdr_trans_blacklist(dev, band); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MAC_INIT_CTRL), &req_mac, sizeof(req_mac), true); } @@ -3182,7 +3229,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) } #endif - if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) @@ -3501,9 +3548,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, struct ieee80211_supported_band *sband; struct mt7915_mcu_phy_rx_info *res; struct sk_buff *skb; - u16 flags = 0; int ret; - int i; + bool cck = false; ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO), &req, sizeof(req), true, &skb); @@ -3517,48 +3563,53 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, switch (res->mode) { case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; case MT_PHY_TYPE_OFDM: if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) sband = &mphy->sband_5g.sband; else sband = &mphy->sband_2g.sband; - for (i = 0; i < sband->n_bitrates; i++) { - if (rate->mcs != (sband->bitrates[i].hw_value & 0xf)) - continue; - - rate->legacy = sband->bitrates[i].bitrate; - break; - } + rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck); + rate->legacy = sband->bitrates[rate->mcs].bitrate; break; case MT_PHY_TYPE_HT: case MT_PHY_TYPE_HT_GF: - if (rate->mcs > 31) - return -EINVAL; - - flags |= RATE_INFO_FLAGS_MCS; + if (rate->mcs > 31) { + ret = -EINVAL; + goto out; + } + rate->flags = RATE_INFO_FLAGS_MCS; if (res->gi) - flags |= RATE_INFO_FLAGS_SHORT_GI; + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case MT_PHY_TYPE_VHT: - flags |= RATE_INFO_FLAGS_VHT_MCS; + if (rate->mcs > 9) { + ret = -EINVAL; + goto out; + } + rate->flags = RATE_INFO_FLAGS_VHT_MCS; if (res->gi) - flags |= RATE_INFO_FLAGS_SHORT_GI; + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; break; case MT_PHY_TYPE_HE_SU: case MT_PHY_TYPE_HE_EXT_SU: case MT_PHY_TYPE_HE_TB: case MT_PHY_TYPE_HE_MU: + if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) { + ret = -EINVAL; + goto out; + } rate->he_gi = res->gi; - - flags |= RATE_INFO_FLAGS_HE_MCS; + rate->flags = RATE_INFO_FLAGS_HE_MCS; break; default: - break; + ret = -EINVAL; + goto out; } - rate->flags = flags; switch (res->bw) { case IEEE80211_STA_RX_BW_160: @@ -3575,7 +3626,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, break; } +out: dev_kfree_skb(skb); - return 0; + return ret; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index 2d584142c27b..4a932140a7c3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -68,10 +68,19 @@ struct mt7915_mcu_rxd { u8 s2d_index; }; +struct mt7915_mcu_csa_notify { + struct mt7915_mcu_rxd rxd; + + u8 omac_idx; + u8 csa_count; + u8 band_idx; + u8 rsv; +} __packed; + struct mt7915_mcu_rdd_report { struct mt7915_mcu_rxd rxd; - u8 idx; + u8 band_idx; u8 long_detected; u8 constant_prf_detected; u8 staggered_prf_detected; @@ -1080,9 +1089,6 @@ enum { sizeof(struct tlv) + \ MT7915_WTBL_UPDATE_MAX_SIZE) -#define MT7915_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_ba)) - #define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ sizeof(struct bss_info_omac) + \ sizeof(struct bss_info_basic) +\ diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c new file mode 100644 index 000000000000..af712a936ef6 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#include "mt7915.h" + +static u32 mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr) +{ + u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr); + u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr); + + mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base); + /* use read to push write */ + mt76_rr(dev, MT_HIF_REMAP_L1); + + return MT_HIF_REMAP_BASE_L1 + offset; +} + +static u32 mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr) +{ + u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr); + u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr); + + mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base); + /* use read to push write */ + mt76_rr(dev, MT_HIF_REMAP_L2); + + return MT_HIF_REMAP_BASE_L2 + offset; +} + +static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr) +{ + static const struct { + u32 phys; + u32 mapped; + u32 size; + } fixed_map[] = { + { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */ + { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */ + { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */ + { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */ + { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */ + { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */ + { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */ + { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */ + { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */ + { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */ + { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */ + { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */ + { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */ + { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */ + { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */ + { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */ + { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */ + { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */ + { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */ + { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */ + { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */ + { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */ + { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */ + { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */ + { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */ + { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */ + { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */ + { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */ + { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */ + { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */ + { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */ + { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */ + { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */ + { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */ + { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */ + { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */ + { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */ + { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */ + }; + int i; + + if (addr < 0x100000) + return addr; + + for (i = 0; i < ARRAY_SIZE(fixed_map); i++) { + u32 ofs; + + if (addr < fixed_map[i].phys) + continue; + + ofs = addr - fixed_map[i].phys; + if (ofs > fixed_map[i].size) + continue; + + return fixed_map[i].mapped + ofs; + } + + if ((addr >= 0x18000000 && addr < 0x18c00000) || + (addr >= 0x70000000 && addr < 0x78000000) || + (addr >= 0x7c000000 && addr < 0x7c400000)) + return mt7915_reg_map_l1(dev, addr); + + return mt7915_reg_map_l2(dev, addr); +} + +static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + u32 addr = __mt7915_reg_addr(dev, offset); + + return dev->bus_ops->rr(mdev, addr); +} + +static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + u32 addr = __mt7915_reg_addr(dev, offset); + + dev->bus_ops->wr(mdev, addr, val); +} + +static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + u32 addr = __mt7915_reg_addr(dev, offset); + + return dev->bus_ops->rmw(mdev, addr, mask, val); +} + +int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq) +{ + struct mt76_bus_ops *bus_ops; + struct mt7915_dev *dev; + + dev = container_of(mdev, struct mt7915_dev, mt76); + mt76_mmio_init(&dev->mt76, mem_base); + + dev->bus_ops = dev->mt76.bus; + bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), + GFP_KERNEL); + if (!bus_ops) + return -ENOMEM; + + bus_ops->rr = mt7915_rr; + bus_ops->wr = mt7915_wr; + bus_ops->rmw = mt7915_rmw; + dev->mt76.bus = bus_ops; + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + mt76_wr(dev, MT_INT_MASK_CSR, 0); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 5c7eefdf2013..dbc70c0d6668 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -108,11 +108,11 @@ struct mt7915_vif { }; struct mib_stats { - u16 ack_fail_cnt; - u16 fcs_err_cnt; - u16 rts_cnt; - u16 rts_retries_cnt; - u16 ba_miss_cnt; + u32 ack_fail_cnt; + u32 fcs_err_cnt; + u32 rts_cnt; + u32 rts_retries_cnt; + u32 ba_miss_cnt; }; struct mt7915_hif { @@ -142,7 +142,7 @@ struct mt7915_phy { u8 rdd_state; int dfs_state; - __le32 rx_ampdu_ts; + u32 rx_ampdu_ts; u32 ampdu_ref; struct mib_stats mib; @@ -394,80 +394,6 @@ static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask) mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); } -static inline u32 -mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr) -{ - u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr); - u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr); - - mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base); - /* use read to push write */ - mt76_rr(dev, MT_HIF_REMAP_L1); - - return MT_HIF_REMAP_BASE_L1 + offset; -} - -static inline u32 -mt7915_l1_rr(struct mt7915_dev *dev, u32 addr) -{ - return mt76_rr(dev, mt7915_reg_map_l1(dev, addr)); -} - -static inline void -mt7915_l1_wr(struct mt7915_dev *dev, u32 addr, u32 val) -{ - mt76_wr(dev, mt7915_reg_map_l1(dev, addr), val); -} - -static inline u32 -mt7915_l1_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val) -{ - val |= mt7915_l1_rr(dev, addr) & ~mask; - mt7915_l1_wr(dev, addr, val); - - return val; -} - -#define mt7915_l1_set(dev, addr, val) mt7915_l1_rmw(dev, addr, 0, val) -#define mt7915_l1_clear(dev, addr, val) mt7915_l1_rmw(dev, addr, val, 0) - -static inline u32 -mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr) -{ - u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr); - u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr); - - mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base); - /* use read to push write */ - mt76_rr(dev, MT_HIF_REMAP_L2); - - return MT_HIF_REMAP_BASE_L2 + offset; -} - -static inline u32 -mt7915_l2_rr(struct mt7915_dev *dev, u32 addr) -{ - return mt76_rr(dev, mt7915_reg_map_l2(dev, addr)); -} - -static inline void -mt7915_l2_wr(struct mt7915_dev *dev, u32 addr, u32 val) -{ - mt76_wr(dev, mt7915_reg_map_l2(dev, addr), val); -} - -static inline u32 -mt7915_l2_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val) -{ - val |= mt7915_l2_rr(dev, addr) & ~mask; - mt7915_l2_wr(dev, addr, val); - - return val; -} - -#define mt7915_l2_set(dev, addr, val) mt7915_l2_rmw(dev, addr, 0, val) -#define mt7915_l2_clear(dev, addr, val) mt7915_l2_rmw(dev, addr, val, 0) - bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask); void mt7915_mac_reset_counters(struct mt7915_phy *phy); void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy); @@ -486,6 +412,7 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, void mt7915_mac_work(struct work_struct *work); void mt7915_mac_reset_work(struct work_struct *work); void mt7915_mac_sta_rc_work(struct work_struct *work); +int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq); int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c index 13880cc9c9e8..75769595d1e1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c @@ -201,7 +201,7 @@ static void mt7915_pci_init_hif2(struct mt7915_dev *dev) } /* master switch of PCIe tnterrupt enable */ - mt7915_l1_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); + mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); } static int mt7915_pci_hif2_probe(struct pci_dev *pdev) @@ -274,15 +274,12 @@ static int mt7915_pci_probe(struct pci_dev *pdev, if (ret) goto error; - mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); - mdev->rev = (mt7915_l1_rr(dev, MT_HW_CHIPID) << 16) | - (mt7915_l1_rr(dev, MT_HW_REV) & 0xff); - dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); - - mt76_wr(dev, MT_INT_MASK_CSR, 0); + ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq); + if (ret) + goto error; /* master switch of PCIe tnterrupt enable */ - mt7915_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); ret = devm_request_irq(mdev->dev, pdev->irq, mt7915_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index ed0c9a24bb53..dfb8880657bf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -4,6 +4,11 @@ #ifndef __MT7915_REGS_H #define __MT7915_REGS_H +/* MCU WFDMA0 */ +#define MT_MCU_WFDMA0_BASE 0x2000 +#define MT_MCU_WFDMA0(ofs) (MT_MCU_WFDMA0_BASE + (ofs)) +#define MT_MCU_WFDMA0_DUMMY_CR MT_MCU_WFDMA0(0x120) + /* MCU WFDMA1 */ #define MT_MCU_WFDMA1_BASE 0x3000 #define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs)) @@ -396,6 +401,14 @@ #define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1) #define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2) +#define MT_TOP_RGU_BASE 0xf0000 +#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0)) +#define MT_TOP_PWR_KEY (0x5746 << 16) +#define MT_TOP_PWR_SW_RST BIT(0) +#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2) +#define MT_TOP_PWR_HW_CTRL BIT(4) +#define MT_TOP_PWR_PWR_ON BIT(7) + #define MT_INFRA_CFG_BASE 0xf1000 #define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile index 09d1446ad933..e531666f9fb4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile @@ -2,4 +2,6 @@ obj-$(CONFIG_MT7921E) += mt7921e.o -mt7921e-y := pci.o mac.o mcu.o dma.o eeprom.o main.o init.o debugfs.o +CFLAGS_trace.o := -I$(src) + +mt7921e-y := pci.o mac.o mcu.o dma.o eeprom.o main.o init.o debugfs.o trace.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c index c1a64ff6a197..94f0320ab2d4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c @@ -44,14 +44,13 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy, range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i)); for (i = 0; i < ARRAY_SIZE(bound); i++) - bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1; + bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1; seq_printf(file, "\nPhy0\n"); seq_printf(file, "Length: %8d | ", bound[0]); for (i = 0; i < ARRAY_SIZE(bound) - 1; i++) - seq_printf(file, "%3d -%3d | ", - bound[i] + 1, bound[i + 1]); + seq_printf(file, "%3d %3d | ", bound[i] + 1, bound[i + 1]); seq_puts(file, "\nCount: "); for (i = 0; i < ARRAY_SIZE(bound); i++) @@ -152,7 +151,6 @@ mt7921_pm_set(void *data, u64 val) { struct mt7921_dev *dev = data; struct mt76_phy *mphy = dev->phy.mt76; - int ret = 0; mt7921_mutex_acquire(dev); @@ -163,7 +161,7 @@ mt7921_pm_set(void *data, u64 val) mt7921_pm_interface_iter, mphy->priv); mt7921_mutex_release(dev); - return ret; + return 0; } static int diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c index cd9665610284..992faf82ad09 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c @@ -299,7 +299,7 @@ int mt7921_dma_init(struct mt7921_dev *dev) if (ret < 0) return ret; - netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7921_poll_tx, NAPI_POLL_WEIGHT); napi_enable(&dev->mt76.tx_napi); @@ -323,7 +323,7 @@ int mt7921_dma_init(struct mt7921_dev *dev) mt76_set(dev, MT_WFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); - mt76_set(dev, 0x54000120, BIT(1)); + mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); /* enable interrupts for TX/RX rings */ mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c index 89a13b4a74a4..5bb0a7b9e9e5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c @@ -77,6 +77,9 @@ mt7921_init_wiphy(struct ieee80211_hw *hw) hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; + phy->slottime = 9; hw->sta_data_size = sizeof(struct mt7921_sta); @@ -95,6 +98,7 @@ mt7921_init_wiphy(struct ieee80211_hw *hw) wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; wiphy->reg_notifier = mt7921_regd_notifier; + wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); @@ -142,7 +146,7 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band) mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN); } -static void mt7921_mac_init(struct mt7921_dev *dev) +void mt7921_mac_init(struct mt7921_dev *dev) { int i; @@ -232,7 +236,6 @@ int mt7921_register_device(struct mt7921_dev *dev) INIT_LIST_HEAD(&dev->sta_poll_list); spin_lock_init(&dev->sta_poll_lock); - init_waitqueue_head(&dev->reset_wait); INIT_WORK(&dev->reset_work, mt7921_mac_reset_work); ret = mt7921_init_hardware(dev); @@ -250,9 +253,6 @@ int mt7921_register_device(struct mt7921_dev *dev) dev->mphy.sband_5g.sband.vht_cap.cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; - dev->mphy.sband_5g.sband.vht_cap.cap |= - IEEE80211_VHT_CAP_SHORT_GI_160 | - IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ; dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask; dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask; @@ -273,8 +273,6 @@ void mt7921_unregister_device(struct mt7921_dev *dev) { mt76_unregister_device(&dev->mt76); mt7921_mcu_exit(dev); - mt7921_dma_cleanup(dev); - mt7921_tx_token_put(dev); tasklet_disable(&dev->irq_tasklet); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 3f9097481a5e..b507f3917830 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -9,8 +9,6 @@ #include "mac.h" #include "mcu.h" -#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) - #define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f) #define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\ IEEE80211_RADIOTAP_HE_##f) @@ -51,14 +49,6 @@ bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask) 0, 5000); } -static u32 mt7921_mac_wtbl_lmac_addr(struct mt7921_dev *dev, u16 wcid) -{ - mt76_wr(dev, MT_WTBLON_TOP_WDUCR, - FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); - - return MT_WTBL_LMAC_OFFS(wcid, 0); -} - static void mt7921_mac_sta_poll(struct mt7921_dev *dev) { static const u8 ac_to_tid[] = { @@ -95,7 +85,7 @@ static void mt7921_mac_sta_poll(struct mt7921_dev *dev) spin_unlock_bh(&dev->sta_poll_lock); idx = msta->wcid.idx; - addr = mt7921_mac_wtbl_lmac_addr(dev, idx) + 20 * 4; + addr = MT_WTBL_LMAC_OFFS(idx, 0) + 20 * 4; for (i = 0; i < IEEE80211_NUM_ACS; i++) { u32 tx_last = msta->airtime_ac[i]; @@ -285,6 +275,37 @@ mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy, status->freq = ieee80211_channel_to_frequency(chfreq, status->band); } +static void +mt7921_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct sk_buff *skb = priv; + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); + + if (status->signal > 0) + return; + + if (!ether_addr_equal(vif->addr, hdr->addr1)) + return; + + ewma_rssi_add(&mvif->rssi, -status->signal); +} + +static void +mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); + + if (!ieee80211_is_assoc_resp(hdr->frame_control) && + !ieee80211_is_auth(hdr->frame_control)) + return; + + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7921_mac_rssi_iter, skb); +} + int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; @@ -349,19 +370,6 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; } - if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { - status->flag |= RX_FLAG_AMPDU_DETAILS; - - /* all subframes of an A-MPDU have the same timestamp */ - if (phy->rx_ampdu_ts != rxd[14]) { - if (!++phy->ampdu_ref) - phy->ampdu_ref++; - } - phy->rx_ampdu_ts = rxd[14]; - - status->ampdu_ref = phy->ampdu_ref; - } - remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) @@ -393,6 +401,22 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) } if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { + status->timestamp = le32_to_cpu(rxd[0]); + status->flag |= RX_FLAG_MACTIME_START; + + if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (phy->rx_ampdu_ts != status->timestamp) { + if (!++phy->ampdu_ref) + phy->ampdu_ref++; + } + phy->rx_ampdu_ts = status->timestamp; + + status->ampdu_ref = phy->ampdu_ref; + } + rxd += 2; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; @@ -400,7 +424,9 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) /* RXD Group 3 - P-RXV */ if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { - u32 v0, v1, v2; + u8 stbc, gi; + u32 v0, v1; + bool cck; rxv = rxd; rxd += 2; @@ -409,7 +435,6 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) v0 = le32_to_cpu(rxv[0]); v1 = le32_to_cpu(rxv[1]); - v2 = le32_to_cpu(rxv[2]); if (v0 & MT_PRXV_HT_AD_CODE) status->enc_flags |= RX_ENC_FLAG_LDPC; @@ -429,87 +454,87 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) status->chain_signal[i]); } - /* RXD Group 5 - C-RXV */ - if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { - u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2); - u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2); - bool cck = false; + stbc = FIELD_GET(MT_PRXV_STBC, v0); + gi = FIELD_GET(MT_PRXV_SGI, v0); + cck = false; - rxd += 18; - if ((u8 *)rxd - skb->data >= skb->len) - return -EINVAL; + idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0); + mode = FIELD_GET(MT_PRXV_TX_MODE, v0); - idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0); - mode = FIELD_GET(MT_CRXV_TX_MODE, v2); - - switch (mode) { - case MT_PHY_TYPE_CCK: - cck = true; - fallthrough; - case MT_PHY_TYPE_OFDM: - i = mt76_get_rate(&dev->mt76, sband, i, cck); - break; - case MT_PHY_TYPE_HT_GF: - case MT_PHY_TYPE_HT: - status->encoding = RX_ENC_HT; - if (i > 31) - return -EINVAL; - break; - case MT_PHY_TYPE_VHT: - status->nss = - FIELD_GET(MT_PRXV_NSTS, v0) + 1; - status->encoding = RX_ENC_VHT; - if (i > 9) - return -EINVAL; - break; - case MT_PHY_TYPE_HE_MU: - status->flag |= RX_FLAG_RADIOTAP_HE_MU; - fallthrough; - case MT_PHY_TYPE_HE_SU: - case MT_PHY_TYPE_HE_EXT_SU: - case MT_PHY_TYPE_HE_TB: - status->nss = - FIELD_GET(MT_PRXV_NSTS, v0) + 1; - status->encoding = RX_ENC_HE; - status->flag |= RX_FLAG_RADIOTAP_HE; - i &= GENMASK(3, 0); - - if (gi <= NL80211_RATE_INFO_HE_GI_3_2) - status->he_gi = gi; - - status->he_dcm = !!(idx & MT_PRXV_TX_DCM); - break; - default: + switch (mode) { + case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; + case MT_PHY_TYPE_OFDM: + i = mt76_get_rate(&dev->mt76, sband, i, cck); + break; + case MT_PHY_TYPE_HT_GF: + case MT_PHY_TYPE_HT: + status->encoding = RX_ENC_HT; + if (i > 31) return -EINVAL; - } - status->rate_idx = i; - - switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) { - case IEEE80211_STA_RX_BW_20: - break; - case IEEE80211_STA_RX_BW_40: - if (mode & MT_PHY_TYPE_HE_EXT_SU && - (idx & MT_PRXV_TX_ER_SU_106T)) { - status->bw = RATE_INFO_BW_HE_RU; - status->he_ru = - NL80211_RATE_INFO_HE_RU_ALLOC_106; - } else { - status->bw = RATE_INFO_BW_40; - } - break; - case IEEE80211_STA_RX_BW_80: - status->bw = RATE_INFO_BW_80; - break; - case IEEE80211_STA_RX_BW_160: - status->bw = RATE_INFO_BW_160; - break; - default: + break; + case MT_PHY_TYPE_VHT: + status->nss = + FIELD_GET(MT_PRXV_NSTS, v0) + 1; + status->encoding = RX_ENC_VHT; + if (i > 9) return -EINVAL; + break; + case MT_PHY_TYPE_HE_MU: + status->flag |= RX_FLAG_RADIOTAP_HE_MU; + fallthrough; + case MT_PHY_TYPE_HE_SU: + case MT_PHY_TYPE_HE_EXT_SU: + case MT_PHY_TYPE_HE_TB: + status->nss = + FIELD_GET(MT_PRXV_NSTS, v0) + 1; + status->encoding = RX_ENC_HE; + status->flag |= RX_FLAG_RADIOTAP_HE; + i &= GENMASK(3, 0); + + if (gi <= NL80211_RATE_INFO_HE_GI_3_2) + status->he_gi = gi; + + status->he_dcm = !!(idx & MT_PRXV_TX_DCM); + break; + default: + return -EINVAL; + } + + status->rate_idx = i; + + switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) { + case IEEE80211_STA_RX_BW_20: + break; + case IEEE80211_STA_RX_BW_40: + if (mode & MT_PHY_TYPE_HE_EXT_SU && + (idx & MT_PRXV_TX_ER_SU_106T)) { + status->bw = RATE_INFO_BW_HE_RU; + status->he_ru = + NL80211_RATE_INFO_HE_RU_ALLOC_106; + } else { + status->bw = RATE_INFO_BW_40; } + break; + case IEEE80211_STA_RX_BW_80: + status->bw = RATE_INFO_BW_80; + break; + case IEEE80211_STA_RX_BW_160: + status->bw = RATE_INFO_BW_160; + break; + default: + return -EINVAL; + } - status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; - if (mode < MT_PHY_TYPE_HE_SU && gi) - status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; + if (mode < MT_PHY_TYPE_HE_SU && gi) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { + rxd += 18; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; } } @@ -521,6 +546,8 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) mt76_insert_ccmp_hdr(skb, key_id); } + mt7921_mac_assoc_rssi(dev, skb); + if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) mt7921_mac_decode_he_radiotap(skb, status, rxv, mode); @@ -530,7 +557,7 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) status->aggr = unicast && !ieee80211_is_qos_nullfunc(hdr->frame_control); - status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + status->qos_ctl = *ieee80211_get_qos_ctl(hdr); status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); return 0; @@ -1183,43 +1210,76 @@ void mt7921_update_channel(struct mt76_dev *mdev) mt76_connac_power_save_sched(&dev->mphy, &dev->pm); } -static bool -mt7921_wait_reset_state(struct mt7921_dev *dev, u32 state) +int mt7921_wfsys_reset(struct mt7921_dev *dev) { - bool ret; + mt76_set(dev, 0x70002600, BIT(0)); + msleep(200); + mt76_clear(dev, 0x70002600, BIT(0)); - ret = wait_event_timeout(dev->reset_wait, - (READ_ONCE(dev->reset_state) & state), - MT7921_RESET_TIMEOUT); - - WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); - return ret; + return __mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B, + WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500); } static void -mt7921_dma_reset(struct mt7921_phy *phy) +mt7921_dma_reset(struct mt7921_dev *dev) { - struct mt7921_dev *dev = phy->dev; int i; + /* reset */ + mt76_clear(dev, MT_WFDMA0_RST, + MT_WFDMA0_RST_DMASHDL_ALL_RST | MT_WFDMA0_RST_LOGIC_RST); + + mt76_set(dev, MT_WFDMA0_RST, + MT_WFDMA0_RST_DMASHDL_ALL_RST | MT_WFDMA0_RST_LOGIC_RST); + + /* disable WFDMA0 */ mt76_clear(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN | + MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); - usleep_range(1000, 2000); + mt76_poll(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | + MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000); - mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true); + /* reset hw queues */ for (i = 0; i < __MT_TXQ_MAX; i++) - mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true); + mt76_queue_reset(dev, dev->mphy.q_tx[i]); - mt76_for_each_q_rx(&dev->mt76, i) { - mt76_queue_rx_reset(dev, i); - } + for (i = 0; i < __MT_MCUQ_MAX; i++) + mt76_queue_reset(dev, dev->mt76.q_mcu[i]); + + mt76_for_each_q_rx(&dev->mt76, i) + mt76_queue_reset(dev, &dev->mt76.q_rx[i]); - /* re-init prefetch settings after reset */ + /* configure perfetch settings */ mt7921_dma_prefetch(dev); + /* reset dma idx */ + mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); + + /* configure delay interrupt */ + mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); + + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_WB_DDONE | + MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN | + MT_WFDMA0_GLO_CFG_CLK_GAT_DIS | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); + mt76_set(dev, MT_WFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); + + mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); + + /* enable interrupts for TX/RX rings */ + mt7921_irq_enable(dev, + MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | + MT_INT_MCU_CMD); } void mt7921_tx_token_put(struct mt7921_dev *dev) @@ -1243,71 +1303,125 @@ void mt7921_tx_token_put(struct mt7921_dev *dev) idr_destroy(&dev->token); } -/* system error recovery */ -void mt7921_mac_reset_work(struct work_struct *work) +static void +mt7921_vif_connect_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) { - struct mt7921_dev *dev; + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_dev *dev = mvif->phy->dev; - dev = container_of(work, struct mt7921_dev, reset_work); + ieee80211_disconnect(vif, true); - if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA)) - return; + mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true); + mt7921_mcu_set_tx(dev, vif); +} - ieee80211_stop_queues(mt76_hw(dev)); +static int +mt7921_mac_reset(struct mt7921_dev *dev) +{ + int i, err; + + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + + mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); - set_bit(MT76_RESET, &dev->mphy.state); set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); - cancel_delayed_work_sync(&dev->mphy.mac_work); + skb_queue_purge(&dev->mt76.mcu.res_q); - /* lock/unlock all queues to ensure that no tx is pending */ mt76_txq_schedule_all(&dev->mphy); mt76_worker_disable(&dev->mt76.tx_worker); - napi_disable(&dev->mt76.napi[0]); - napi_disable(&dev->mt76.napi[1]); - napi_disable(&dev->mt76.napi[2]); + napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]); + napi_disable(&dev->mt76.napi[MT_RXQ_MCU]); + napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]); napi_disable(&dev->mt76.tx_napi); - mt7921_mutex_acquire(dev); - - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); - mt7921_tx_token_put(dev); idr_init(&dev->token); - if (mt7921_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { - mt7921_dma_reset(&dev->phy); + /* clean up hw queues */ + for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); - mt7921_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); - } + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); - clear_bit(MT76_MCU_RESET, &dev->mphy.state); - clear_bit(MT76_RESET, &dev->mphy.state); + mt76_for_each_q_rx(&dev->mt76, i) + mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); + + mt7921_wfsys_reset(dev); + mt7921_dma_reset(dev); + + mt76_for_each_q_rx(&dev->mt76, i) { + mt76_queue_rx_reset(dev, i); + napi_enable(&dev->mt76.napi[i]); + napi_schedule(&dev->mt76.napi[i]); + } - mt76_worker_enable(&dev->mt76.tx_worker); napi_enable(&dev->mt76.tx_napi); napi_schedule(&dev->mt76.tx_napi); + mt76_worker_enable(&dev->mt76.tx_worker); + + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + + mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + mt7921_irq_enable(dev, + MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | + MT_INT_MCU_CMD); + + err = mt7921_run_firmware(dev); + if (err) + return err; - napi_enable(&dev->mt76.napi[0]); - napi_schedule(&dev->mt76.napi[0]); + err = mt7921_mcu_set_eeprom(dev); + if (err) + return err; - napi_enable(&dev->mt76.napi[1]); - napi_schedule(&dev->mt76.napi[1]); + mt7921_mac_init(dev); + return __mt7921_start(&dev->phy); +} - napi_enable(&dev->mt76.napi[2]); - napi_schedule(&dev->mt76.napi[2]); +/* system error recovery */ +void mt7921_mac_reset_work(struct work_struct *work) +{ + struct ieee80211_hw *hw; + struct mt7921_dev *dev; + int i; - ieee80211_wake_queues(mt76_hw(dev)); + dev = container_of(work, struct mt7921_dev, reset_work); + hw = mt76_hw(dev); - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); - mt7921_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); + dev_err(dev->mt76.dev, "chip reset\n"); + ieee80211_stop_queues(hw); - mt7921_mutex_release(dev); + cancel_delayed_work_sync(&dev->mphy.mac_work); + cancel_delayed_work_sync(&dev->pm.ps_work); + cancel_work_sync(&dev->pm.wake_work); - ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, - MT7921_WATCHDOG_TIME); + mutex_lock(&dev->mt76.mutex); + for (i = 0; i < 10; i++) { + if (!mt7921_mac_reset(dev)) + break; + } + mutex_unlock(&dev->mt76.mutex); + + if (i == 10) + dev_err(dev->mt76.dev, "chip reset failed\n"); + + ieee80211_wake_queues(hw); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7921_vif_connect_iter, 0); +} + +void mt7921_reset(struct mt76_dev *mdev) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + + queue_work(dev->mt76.wq, &dev->reset_work); } static void @@ -1317,31 +1431,20 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy) struct mib_stats *mib = &phy->mib; int i, aggr0 = 0, aggr1; - memset(mib, 0, sizeof(*mib)); - - mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(0), - MT_MIB_SDR3_FCS_ERR_MASK); + mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0), + MT_MIB_SDR3_FCS_ERR_MASK); + mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0), + MT_MIB_ACK_FAIL_COUNT_MASK); + mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0), + MT_MIB_BA_FAIL_COUNT_MASK); + mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0), + MT_MIB_RTS_COUNT_MASK); + mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0), + MT_MIB_RTS_FAIL_COUNT_MASK); for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { u32 val, val2; - val = mt76_rr(dev, MT_MIB_MB_SDR1(0, i)); - - val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); - if (val2 > mib->ack_fail_cnt) - mib->ack_fail_cnt = val2; - - val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); - if (val2 > mib->ba_miss_cnt) - mib->ba_miss_cnt = val2; - - val = mt76_rr(dev, MT_MIB_MB_SDR0(0, i)); - val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); - if (val2 > mib->rts_retries_cnt) { - mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); - mib->rts_retries_cnt = val2; - } - val = mt76_rr(dev, MT_TX_AGG_CNT(0, i)); val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i)); @@ -1399,7 +1502,7 @@ void mt7921_mac_work(struct work_struct *work) if (++phy->sta_work_count == 10) { phy->sta_work_count = 0; mt7921_mac_sta_stats_work(phy); - }; + } mt7921_mutex_release(phy->dev); @@ -1503,8 +1606,10 @@ void mt7921_coredump_work(struct work_struct *work) break; skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); - if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) - break; + if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) { + dev_kfree_skb(skb); + continue; + } memcpy(data, skb->data, skb->len); data += skb->len; @@ -1513,4 +1618,5 @@ void mt7921_coredump_work(struct work_struct *work) } dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ, GFP_KERNEL); + mt7921_reset(&dev->mt76); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h index a0c1fa0f20e4..109c8849d106 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h @@ -97,18 +97,24 @@ enum rx_pkt_type { #define MT_RXD3_NORMAL_PF_MODE BIT(29) #define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30) -/* P-RXV */ +/* P-RXV DW0 */ #define MT_PRXV_TX_RATE GENMASK(6, 0) #define MT_PRXV_TX_DCM BIT(4) #define MT_PRXV_TX_ER_SU_106T BIT(5) #define MT_PRXV_NSTS GENMASK(9, 7) #define MT_PRXV_HT_AD_CODE BIT(11) +#define MT_PRXV_FRAME_MODE GENMASK(14, 12) +#define MT_PRXV_SGI GENMASK(16, 15) +#define MT_PRXV_STBC GENMASK(23, 22) +#define MT_PRXV_TX_MODE GENMASK(27, 24) #define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28) -#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0) + +/* P-RXV DW1 */ #define MT_PRXV_RCPI3 GENMASK(31, 24) #define MT_PRXV_RCPI2 GENMASK(23, 16) #define MT_PRXV_RCPI1 GENMASK(15, 8) #define MT_PRXV_RCPI0 GENMASK(7, 0) +#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0) /* C-RXV */ #define MT_CRXV_HT_STBC GENMASK(1, 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 729f6c42cdde..d9ec7d9bb1c4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -125,10 +125,6 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map); he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map); - he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map); - he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map); - he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map); - he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map); memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); if (he_cap_elem->phy_cap_info[6] & @@ -169,28 +165,44 @@ void mt7921_set_stream_he_caps(struct mt7921_phy *phy) } } -static int mt7921_start(struct ieee80211_hw *hw) +int __mt7921_start(struct mt7921_phy *phy) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt76_phy *mphy = phy->mt76; + int err; - mt7921_mutex_acquire(dev); + err = mt76_connac_mcu_set_mac_enable(mphy->dev, 0, true, false); + if (err) + return err; - mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false); - mt76_connac_mcu_set_channel_domain(phy->mt76); + err = mt76_connac_mcu_set_channel_domain(mphy); + if (err) + return err; + + err = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); + if (err) + return err; - mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); mt7921_mac_reset_counters(phy); - set_bit(MT76_STATE_RUNNING, &phy->mt76->state); + set_bit(MT76_STATE_RUNNING, &mphy->state); - ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, MT7921_WATCHDOG_TIME); - mt7921_mutex_release(dev); - return 0; } +static int mt7921_start(struct ieee80211_hw *hw) +{ + struct mt7921_phy *phy = mt7921_hw_phy(hw); + int err; + + mt7921_mutex_acquire(phy->dev); + err = __mt7921_start(phy); + mt7921_mutex_release(phy->dev); + + return err; +} + static void mt7921_stop(struct ieee80211_hw *hw) { struct mt7921_dev *dev = mt7921_hw_dev(hw); @@ -224,9 +236,6 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask) if (i) return i - 1; - if (type != NL80211_IFTYPE_STATION) - break; - /* next, try to find a free repeater entry for the sta */ i = get_free_idx(mask >> REPEATER_BSSID_START, 0, REPEATER_BSSID_MAX - REPEATER_BSSID_START); @@ -295,15 +304,6 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, if (ret) goto out; - if (dev->pm.enable) { - ret = mt7921_mcu_set_bss_pm(dev, vif, true); - if (ret) - goto out; - - vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; - mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); - } - dev->mt76.vif_mask |= BIT(mvif->mt76.idx); phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); @@ -318,6 +318,8 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, mt7921_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + ewma_rssi_init(&mvif->rssi); + rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); if (vif->txq) { mtxq = (struct mt76_txq *)vif->txq->drv_priv; @@ -348,19 +350,12 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw, if (vif == phy->monitor_vif) phy->monitor_vif = NULL; + mt7921_mutex_acquire(dev); mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); - - if (dev->pm.enable) { - mt7921_mcu_set_bss_pm(dev, vif, false); - mt76_clear(dev, MT_WF_RFCR(0), - MT_WF_RFCR_DROP_OTHER_BEACON); - } - mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); - mt7921_mutex_acquire(dev); dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx); phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); mt7921_mutex_release(dev); @@ -413,7 +408,8 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct mt7921_sta *msta = sta ? (struct mt7921_sta *)sta->drv_priv : &mvif->sta; struct mt76_wcid *wcid = &msta->wcid; - int idx = key->keyidx; + u8 *wcid_keyidx = &wcid->hw_key_idx; + int idx = key->keyidx, err = 0; /* The hardware does not support per-STA RX GTK, fallback * to software mode for these. @@ -429,6 +425,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, switch (key->cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + wcid_keyidx = &wcid->hw_key_idx2; break; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: @@ -443,23 +440,29 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } - if (cmd == SET_KEY) { - key->hw_key_idx = wcid->idx; - wcid->hw_key_idx = idx; - } else if (idx == wcid->hw_key_idx) { - wcid->hw_key_idx = -1; - } + mt7921_mutex_acquire(dev); + + if (cmd == SET_KEY) + *wcid_keyidx = idx; + else if (idx == *wcid_keyidx) + *wcid_keyidx = -1; + else + goto out; + mt76_wcid_key_setup(&dev->mt76, wcid, cmd == SET_KEY ? key : NULL); - return mt7921_mcu_add_key(dev, vif, msta, key, cmd); + err = mt7921_mcu_add_key(dev, vif, msta, key, cmd); +out: + mt7921_mutex_release(dev); + + return err; } static int mt7921_config(struct ieee80211_hw *hw, u32 changed) { struct mt7921_dev *dev = mt7921_hw_dev(hw); struct mt7921_phy *phy = mt7921_hw_phy(hw); - bool band = phy != &dev->phy; int ret; if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { @@ -480,9 +483,9 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed) else phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; - mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN, + mt76_rmw_field(dev, MT_DMA_DCR0(0), MT_DMA_DCR0_RXD_G5_EN, enabled); - mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter); } mt7921_mutex_release(dev); @@ -511,7 +514,6 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw, { struct mt7921_dev *dev = mt7921_hw_dev(hw); struct mt7921_phy *phy = mt7921_hw_phy(hw); - bool band = phy != &dev->phy; u32 ctl_flags = MT_WF_RFCR1_DROP_ACK | MT_WF_RFCR1_DROP_BF_POLL | MT_WF_RFCR1_DROP_BA | @@ -551,16 +553,46 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw, MT_WF_RFCR_DROP_NDPA); *total_flags = flags; - mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter); if (*total_flags & FIF_CONTROL) - mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags); + mt76_clear(dev, MT_WF_RFCR1(0), ctl_flags); else - mt76_set(dev, MT_WF_RFCR1(band), ctl_flags); + mt76_set(dev, MT_WF_RFCR1(0), ctl_flags); mt7921_mutex_release(dev); } +static int +mt7921_bss_bcnft_apply(struct mt7921_dev *dev, struct ieee80211_vif *vif, + bool assoc) +{ + int ret; + + if (!dev->pm.enable) + return 0; + + if (assoc) { + ret = mt7921_mcu_uni_bss_bcnft(dev, vif, true); + if (ret) + return ret; + + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); + + return 0; + } + + ret = mt7921_mcu_set_bss_pm(dev, vif, false); + if (ret) + return ret; + + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); + + return 0; +} + static void mt7921_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, @@ -587,6 +619,12 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_PS) mt7921_mcu_uni_bss_ps(dev, vif); + if (changed & BSS_CHANGED_ASSOC) + mt7921_bss_bcnft_apply(dev, vif, info->assoc); + + if (changed & BSS_CHANGED_ARP_FILTER) + mt7921_mcu_update_arp_filter(hw, vif, info); + mt7921_mutex_release(dev); } @@ -596,6 +634,15 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + int rssi = -ewma_rssi_read(&mvif->rssi); + struct mt76_sta_cmd_info info = { + .sta = sta, + .vif = vif, + .enable = true, + .cmd = MCU_UNI_CMD_STA_REC_UPDATE, + .wcid = &msta->wcid, + .rcpi = to_rcpi(rssi), + }; int ret, idx; idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1); @@ -622,8 +669,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, mt7921_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - ret = mt76_connac_mcu_add_sta_cmd(&dev->mphy, vif, sta, &msta->wcid, - true, MCU_UNI_CMD_STA_REC_UPDATE); + ret = mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info); if (ret) return ret; @@ -637,21 +683,28 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, { struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + struct mt76_sta_cmd_info info = { + .sta = sta, + .vif = vif, + .cmd = MCU_UNI_CMD_STA_REC_UPDATE, + .wcid = &msta->wcid, + }; mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); mt76_connac_pm_wake(&dev->mphy, &dev->pm); - mt76_connac_mcu_add_sta_cmd(&dev->mphy, vif, sta, &msta->wcid, false, - MCU_UNI_CMD_STA_REC_UPDATE); + mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info); mt7921_mac_wtbl_update(dev, msta->wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { + if (vif->type == NL80211_IFTYPE_STATION) { struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid, - false); + ewma_rssi_init(&mvif->rssi); + if (!sta->tdls) + mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, + &mvif->sta.wcid, false); } spin_lock_bh(&dev->sta_poll_lock); @@ -814,11 +867,17 @@ mt7921_get_stats(struct ieee80211_hw *hw, struct mt7921_phy *phy = mt7921_hw_phy(hw); struct mib_stats *mib = &phy->mib; + mt7921_mutex_acquire(phy->dev); + stats->dot11RTSSuccessCount = mib->rts_cnt; stats->dot11RTSFailureCount = mib->rts_retries_cnt; stats->dot11FCSErrorCount = mib->fcs_err_cnt; stats->dot11ACKFailureCount = mib->ack_fail_cnt; + memset(mib, 0, sizeof(*mib)); + + mt7921_mutex_release(phy->dev); + return 0; } @@ -827,9 +886,7 @@ mt7921_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); u8 omac_idx = mvif->mt76.omac_idx; - bool band = phy != &dev->phy; union { u64 t64; u32 t32[2]; @@ -840,9 +897,9 @@ mt7921_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; /* TSF software read */ - mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE); - tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band)); - tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band)); + mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_MODE); + tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(0)); + tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(0)); mt7921_mutex_release(dev); @@ -855,9 +912,7 @@ mt7921_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); u8 omac_idx = mvif->mt76.omac_idx; - bool band = phy != &dev->phy; union { u64 t64; u32 t32[2]; @@ -867,10 +922,10 @@ mt7921_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt7921_mutex_acquire(dev); n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; - mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]); - mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]); + mt76_wr(dev, MT_LPON_UTTR0(0), tsf.t32[0]); + mt76_wr(dev, MT_LPON_UTTR1(0), tsf.t32[1]); /* TSF software overwrite */ - mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_WRITE); + mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_WRITE); mt7921_mutex_release(dev); } @@ -1008,14 +1063,6 @@ mt7921_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) return 0; } -static void -mt7921_sta_rc_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - u32 changed) -{ -} - static void mt7921_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -1120,6 +1167,15 @@ static void mt7921_set_rekey_data(struct ieee80211_hw *hw, } #endif /* CONFIG_PM */ +static void mt7921_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + + wait_event_timeout(dev->mt76.tx_wait, !mt76_has_tx_pending(&dev->mphy), + HZ / 2); +} + const struct ieee80211_ops mt7921_ops = { .tx = mt7921_tx, .start = mt7921_start, @@ -1133,7 +1189,6 @@ const struct ieee80211_ops mt7921_ops = { .sta_add = mt7921_sta_add, .sta_remove = mt7921_sta_remove, .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, - .sta_rc_update = mt7921_sta_rc_update, .set_key = mt7921_set_key, .ampdu_action = mt7921_ampdu_action, .set_rts_threshold = mt7921_set_rts_threshold, @@ -1158,4 +1213,5 @@ const struct ieee80211_ops mt7921_ops = { .set_wakeup = mt7921_set_wakeup, .set_rekey_data = mt7921_set_rekey_data, #endif /* CONFIG_PM */ + .flush = mt7921_flush, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index b5cc72e7e81c..aa55667b6ed7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -4,6 +4,7 @@ #include <linux/firmware.h> #include <linux/fs.h> #include "mt7921.h" +#include "mt7921_trace.h" #include "mcu.h" #include "mac.h" @@ -222,8 +223,16 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, u32 val; u8 seq; - /* TODO: make dynamic based on msg type */ - mdev->mcu.timeout = 20 * HZ; + switch (cmd) { + case MCU_UNI_CMD_HIF_CTRL: + case MCU_UNI_CMD_SUSPEND: + case MCU_UNI_CMD_OFFLOAD: + mdev->mcu.timeout = HZ / 3; + break; + default: + mdev->mcu.timeout = 3 * HZ; + break; + } seq = ++dev->mt76.mcu.msg_seq & 0xf; if (!seq) @@ -404,9 +413,12 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb, if (wlan_idx >= MT76_N_WCIDS) return; + + rcu_read_lock(); + wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); if (!wcid) - return; + goto out; msta = container_of(wcid, struct mt7921_sta, wcid); stats = &msta->stats; @@ -414,6 +426,8 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb, /* current rate */ mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr); stats->tx_rate = rate; +out: + rcu_read_unlock(); } static void @@ -466,33 +480,45 @@ mt7921_mcu_bss_event(struct mt7921_dev *dev, struct sk_buff *skb) static void mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb) { - struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data; - struct debug_msg { + struct mt7921_debug_msg { __le16 id; u8 type; u8 flag; __le32 value; __le16 len; u8 content[512]; - } __packed * debug_msg; - u16 cur_len; - int i; - - skb_pull(skb, sizeof(*rxd)); - debug_msg = (struct debug_msg *)skb->data; + } __packed * msg; - cur_len = min_t(u16, le16_to_cpu(debug_msg->len), 512); + skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); + msg = (struct mt7921_debug_msg *)skb->data; - if (debug_msg->type == 0x3) { - for (i = 0 ; i < cur_len; i++) - if (!debug_msg->content[i]) - debug_msg->content[i] = ' '; + if (msg->type == 3) { /* fw log */ + u16 len = min_t(u16, le16_to_cpu(msg->len), 512); + int i; - dev_dbg(dev->mt76.dev, "%s", debug_msg->content); + for (i = 0 ; i < len; i++) { + if (!msg->content[i]) + msg->content[i] = ' '; + } + wiphy_info(mt76_hw(dev)->wiphy, "%*s", len, msg->content); } } static void +mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt7921_mcu_lp_event { + u8 state; + u8 reserved[3]; + } __packed * event; + + skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); + event = (struct mt7921_mcu_lp_event *)skb->data; + + trace_lp_event(dev, event->state); +} + +static void mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) { struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data; @@ -515,6 +541,9 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) mt76_connac_mcu_coredump_event(&dev->mt76, skb, &dev->coredump); return; + case MCU_EVENT_LP_INFO: + mt7921_mcu_low_power_event(dev, skb); + break; default: break; } @@ -537,6 +566,7 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb) rxd->eid == MCU_EVENT_SCAN_DONE || rxd->eid == MCU_EVENT_DBG_MSG || rxd->eid == MCU_EVENT_COREDUMP || + rxd->eid == MCU_EVENT_LP_INFO || !rxd->seq) mt7921_mcu_rx_unsolicited_event(dev, skb); else @@ -919,6 +949,24 @@ int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl) sizeof(data), false); } +int mt7921_run_firmware(struct mt7921_dev *dev) +{ + int err; + + err = mt7921_driver_own(dev); + if (err) + return err; + + err = mt7921_load_firmware(dev); + if (err) + return err; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + mt7921_mcu_fw_log_2_host(dev, 1); + + return 0; +} + int mt7921_mcu_init(struct mt7921_dev *dev) { static const struct mt76_mcu_ops mt7921_mcu_ops = { @@ -926,39 +974,17 @@ int mt7921_mcu_init(struct mt7921_dev *dev) .mcu_skb_send_msg = mt7921_mcu_send_message, .mcu_parse_response = mt7921_mcu_parse_response, .mcu_restart = mt7921_mcu_restart, + .mcu_reset = mt7921_reset, }; - int ret; dev->mt76.mcu_ops = &mt7921_mcu_ops; - ret = mt7921_driver_own(dev); - if (ret) - return ret; - - ret = mt7921_load_firmware(dev); - if (ret) - return ret; - - set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); - mt7921_mcu_fw_log_2_host(dev, 1); - - return 0; + return mt7921_run_firmware(dev); } void mt7921_mcu_exit(struct mt7921_dev *dev) { - u32 reg = mt7921_reg_map_l1(dev, MT_TOP_MISC); - - __mt76_mcu_restart(&dev->mt76); - if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, - FIELD_PREP(MT_TOP_MISC_FW_STATE, - FW_STATE_FW_DOWNLOAD), 1000)) { - dev_err(dev->mt76.dev, "Failed to exit mcu\n"); - return; - } - - reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); - mt76_wr(dev, reg, MT_TOP_LPCR_HOST_FW_OWN); + mt7921_wfsys_reset(dev); skb_queue_purge(&dev->mt76.mcu.res_q); } @@ -1255,6 +1281,7 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) if (i == MT7921_DRV_OWN_RETRY_COUNT) { dev_err(dev->mt76.dev, "driver own failed\n"); + mt7921_reset(&dev->mt76); return -EIO; } @@ -1281,6 +1308,7 @@ int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev) if (i == MT7921_DRV_OWN_RETRY_COUNT) { dev_err(dev->mt76.dev, "firmware own failed\n"); + mt7921_reset(&dev->mt76); return -EIO; } @@ -1292,8 +1320,14 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { struct mt7921_phy *phy = priv; struct mt7921_dev *dev = phy->dev; + int ret; + + if (dev->pm.enable) + ret = mt7921_mcu_uni_bss_bcnft(dev, vif, true); + else + ret = mt7921_mcu_set_bss_pm(dev, vif, false); - if (mt7921_mcu_set_bss_pm(dev, vif, dev->pm.enable)) + if (ret) return; if (dev->pm.enable) { @@ -1304,3 +1338,47 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); } } + +int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct sk_buff *skb; + int i, len = min_t(int, info->arp_addr_cnt, + IEEE80211_BSS_ARP_ADDR_LIST_LEN); + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt76_connac_arpns_tlv arp; + } req_hdr = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .arp = { + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), + .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)), + .ips_num = len, + .mode = 2, /* update */ + .option = 1, + }, + }; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, + sizeof(req_hdr) + len * sizeof(__be32)); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); + for (i = 0; i < len; i++) { + u8 *addr = (u8 *)skb_put(skb, sizeof(__be32)); + + memcpy(addr, &info->arp_addr_list[i], sizeof(__be32)); + } + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD, + true); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h index 2fdc62367b3f..13e125f32283 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h @@ -79,6 +79,7 @@ struct mt7921_uni_txd { /* event table */ enum { MCU_EVENT_REG_ACCESS = 0x05, + MCU_EVENT_LP_INFO = 0x07, MCU_EVENT_SCAN_DONE = 0x0d, MCU_EVENT_BSS_ABSENCE = 0x11, MCU_EVENT_BSS_BEACON_LOSS = 0x13, @@ -177,25 +178,6 @@ enum { MCU_PHY_STATE_OFDMLQ_CNINFO, }; -#define STA_TYPE_STA BIT(0) -#define STA_TYPE_AP BIT(1) -#define STA_TYPE_ADHOC BIT(2) -#define STA_TYPE_WDS BIT(4) -#define STA_TYPE_BC BIT(5) - -#define NETWORK_INFRA BIT(16) -#define NETWORK_P2P BIT(17) -#define NETWORK_IBSS BIT(18) -#define NETWORK_WDS BIT(21) - -#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA) -#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA) -#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P) -#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P) -#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS) -#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS) -#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA) - struct sec_key { u8 cipher_id; u8 cipher_len; @@ -251,29 +233,6 @@ enum { MT_IBF = BIT(1) /* implicit beamforming */ }; -#define MT7921_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_generic) + \ - sizeof(struct wtbl_rx) + \ - sizeof(struct wtbl_ht) + \ - sizeof(struct wtbl_vht) + \ - sizeof(struct wtbl_hdr_trans) +\ - sizeof(struct wtbl_ba) + \ - sizeof(struct wtbl_smps)) - -#define MT7921_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ - sizeof(struct sta_rec_basic) + \ - sizeof(struct sta_rec_ht) + \ - sizeof(struct sta_rec_he) + \ - sizeof(struct sta_rec_ba) + \ - sizeof(struct sta_rec_vht) + \ - sizeof(struct sta_rec_uapsd) + \ - sizeof(struct sta_rec_amsdu) + \ - sizeof(struct tlv) + \ - MT7921_WTBL_UPDATE_MAX_SIZE) - -#define MT7921_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_ba)) - #define STA_CAP_WMM BIT(0) #define STA_CAP_SGI_20 BIT(4) #define STA_CAP_SGI_40 BIT(5) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index 46e6aeec35ae..e3d83d3d954c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -46,6 +46,9 @@ #define MT7921_SKU_MAX_DELTA_IDX MT7921_SKU_RATE_NUM #define MT7921_SKU_TABLE_SIZE (MT7921_SKU_RATE_NUM + 1) +#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) +#define to_rcpi(rssi) (2 * (rssi) + 220) + struct mt7921_vif; struct mt7921_sta; @@ -92,21 +95,25 @@ struct mt7921_sta { struct mt7921_sta_key_conf bip; }; +DECLARE_EWMA(rssi, 10, 8); + struct mt7921_vif { struct mt76_vif mt76; /* must be first */ struct mt7921_sta sta; struct mt7921_phy *phy; + struct ewma_rssi rssi; + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; }; struct mib_stats { - u16 ack_fail_cnt; - u16 fcs_err_cnt; - u16 rts_cnt; - u16 rts_retries_cnt; - u16 ba_miss_cnt; + u32 ack_fail_cnt; + u32 fcs_err_cnt; + u32 rts_cnt; + u32 rts_retries_cnt; + u32 ba_miss_cnt; }; struct mt7921_phy { @@ -125,7 +132,7 @@ struct mt7921_phy { s16 coverage_class; u8 slottime; - __le32 rx_ampdu_ts; + u32 rx_ampdu_ts; u32 ampdu_ref; struct mib_stats mib; @@ -151,8 +158,6 @@ struct mt7921_dev { struct work_struct init_work; struct work_struct reset_work; - wait_queue_head_t reset_wait; - u32 reset_state; struct list_head sta_poll_list; spinlock_t sta_poll_lock; @@ -209,6 +214,7 @@ extern struct pci_driver mt7921_pci_driver; u32 mt7921_reg_map(struct mt7921_dev *dev, u32 addr); +int __mt7921_start(struct mt7921_phy *phy); int mt7921_register_device(struct mt7921_dev *dev); void mt7921_unregister_device(struct mt7921_dev *dev); int mt7921_eeprom_init(struct mt7921_dev *dev); @@ -220,6 +226,7 @@ void mt7921_eeprom_init_sku(struct mt7921_dev *dev); int mt7921_dma_init(struct mt7921_dev *dev); void mt7921_dma_prefetch(struct mt7921_dev *dev); void mt7921_dma_cleanup(struct mt7921_dev *dev); +int mt7921_run_firmware(struct mt7921_dev *dev); int mt7921_mcu_init(struct mt7921_dev *dev); int mt7921_mcu_add_bss_info(struct mt7921_phy *phy, struct ieee80211_vif *vif, int enable); @@ -281,6 +288,7 @@ mt7921_l1_rmw(struct mt7921_dev *dev, u32 addr, u32 mask, u32 val) #define mt7921_l1_set(dev, addr, val) mt7921_l1_rmw(dev, addr, 0, val) #define mt7921_l1_clear(dev, addr, val) mt7921_l1_rmw(dev, addr, val, 0) +void mt7921_mac_init(struct mt7921_dev *dev); bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask); void mt7921_mac_reset_counters(struct mt7921_phy *phy); void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, @@ -296,6 +304,7 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt7921_mac_work(struct work_struct *work); void mt7921_mac_reset_work(struct work_struct *work); +void mt7921_reset(struct mt76_dev *mdev); int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, @@ -339,4 +348,8 @@ int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy, bool enable); void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); void mt7921_coredump_work(struct work_struct *work); +int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info); +int mt7921_wfsys_reset(struct mt7921_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h new file mode 100644 index 000000000000..9bc4db67f352 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (C) 2021 Lorenzo Bianconi <lorenzo@kernel.org> + */ + +#if !defined(__MT7921_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT7921_TRACE_H + +#include <linux/tracepoint.h> +#include "mt7921.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mt7921 + +#define MAXNAME 32 +#define DEV_ENTRY __array(char, wiphy_name, 32) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ + wiphy_name(mt76_hw(dev)->wiphy), MAXNAME) +#define DEV_PR_FMT "%s" +#define DEV_PR_ARG __entry->wiphy_name +#define LP_STATE_PR_ARG __entry->lp_state ? "lp ready" : "lp not ready" + +TRACE_EVENT(lp_event, + TP_PROTO(struct mt7921_dev *dev, u8 lp_state), + + TP_ARGS(dev, lp_state), + + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, lp_state) + ), + + TP_fast_assign( + DEV_ASSIGN; + __entry->lp_state = lp_state; + ), + + TP_printk( + DEV_PR_FMT " %s", + DEV_PR_ARG, LP_STATE_PR_ARG + ) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE mt7921_trace + +#include <trace/define_trace.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index 5570b4a50531..40e2086d075c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -13,7 +13,7 @@ #include "../trace.h" static const struct pci_device_id mt7921_pci_device_table[] = { - { PCI_DEVICE(0x14c3, 0x7961) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) }, { }, }; @@ -137,7 +137,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev, mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); - mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); ret = devm_request_irq(mdev->dev, pdev->irq, mt7921_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); @@ -146,10 +146,12 @@ static int mt7921_pci_probe(struct pci_dev *pdev, ret = mt7921_register_device(dev); if (ret) - goto err_free_dev; + goto err_free_irq; return 0; +err_free_irq: + devm_free_irq(&pdev->dev, pdev->irq, dev); err_free_dev: mt76_free_device(&dev->mt76); err_free_pci_vec: @@ -209,12 +211,12 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state) /* disable interrupt */ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); - pci_save_state(pdev); - err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); + err = mt7921_mcu_fw_pmctrl(dev); if (err) goto restore; - err = mt7921_mcu_drv_pmctrl(dev); + pci_save_state(pdev); + err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); if (err) goto restore; @@ -237,18 +239,18 @@ static int mt7921_pci_resume(struct pci_dev *pdev) struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); int i, err; - err = mt7921_mcu_fw_pmctrl(dev); - if (err < 0) - return err; - err = pci_set_power_state(pdev, PCI_D0); if (err) return err; pci_restore_state(pdev); + err = mt7921_mcu_drv_pmctrl(dev); + if (err < 0) + return err; + /* enable interrupt */ - mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | MT_INT_MCU_CMD); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h index 6dad7f6ab09d..76ecfea21dce 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h @@ -96,8 +96,8 @@ #define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800) #define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs)) -#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014) -#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0) +#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x698) +#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(31, 16) #define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c) #define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0) @@ -121,16 +121,21 @@ #define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16) #define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0) -#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4)) -#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0) -#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16) +#define MT_MIB_MB_BSDR0(_band) MT_WF_MIB(_band, 0x688) +#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0) +#define MT_MIB_MB_BSDR1(_band) MT_WF_MIB(_band, 0x690) +#define MT_MIB_RTS_FAIL_COUNT_MASK GENMASK(15, 0) +#define MT_MIB_MB_BSDR2(_band) MT_WF_MIB(_band, 0x518) +#define MT_MIB_BA_FAIL_COUNT_MASK GENMASK(15, 0) +#define MT_MIB_MB_BSDR3(_band) MT_WF_MIB(_band, 0x520) +#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(15, 0) #define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x108 + ((n) << 4)) #define MT_MIB_FRAME_RETRIES_COUNT_MASK GENMASK(15, 0) -#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2)) -#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2)) -#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2)) +#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x7dc + ((n) << 2)) +#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x7ec + ((n) << 2)) +#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x0b0 + ((n) << 2)) #define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0)) #define MT_WTBLON_TOP_BASE 0x34000 @@ -357,11 +362,11 @@ #define MT_INFRA_CFG_BASE 0xfe000 #define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs)) -#define MT_HIF_REMAP_L1 MT_INFRA(0x260) +#define MT_HIF_REMAP_L1 MT_INFRA(0x24c) #define MT_HIF_REMAP_L1_MASK GENMASK(15, 0) #define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0) #define MT_HIF_REMAP_L1_BASE GENMASK(31, 16) -#define MT_HIF_REMAP_BASE_L1 0xe0000 +#define MT_HIF_REMAP_BASE_L1 0x40000 #define MT_SWDEF_BASE 0x41f200 #define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs)) @@ -380,11 +385,17 @@ #define MT_TOP_MISC MT_TOP(0xf0) #define MT_TOP_MISC_FW_STATE GENMASK(2, 0) +#define MT_MCU_WPDMA0_BASE 0x54000000 +#define MT_MCU_WPDMA0(ofs) (MT_MCU_WPDMA0_BASE + (ofs)) + +#define MT_WFDMA_DUMMY_CR MT_MCU_WPDMA0(0x120) +#define MT_WFDMA_NEED_REINIT BIT(1) + #define MT_HW_BOUND 0x70010020 #define MT_HW_CHIPID 0x70010200 #define MT_HW_REV 0x70010204 -#define MT_PCIE_MAC_BASE 0x74030000 +#define MT_PCIE_MAC_BASE 0x10000 #define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs)) #define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188) @@ -413,6 +424,10 @@ #define PCIE_LPCR_HOST_CLR_OWN BIT(1) #define PCIE_LPCR_HOST_SET_OWN BIT(0) +#define MT_WFSYS_SW_RST_B 0x18000140 +#define WFSYS_SW_RST_B BIT(0) +#define WFSYS_SW_INIT_DONE BIT(4) + #define MT_CONN_ON_MISC 0x7c0600f0 #define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/trace.c b/drivers/net/wireless/mediatek/mt76/mt7921/trace.c new file mode 100644 index 000000000000..4dc3c7b89ebd --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/trace.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (C) 2021 Lorenzo Bianconi <lorenzo@kernel.org> + */ + +#include <linux/module.h> + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "mt7921_trace.h" + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c index 0b6facb17ff7..a18d2896ee1f 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -256,6 +256,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, q->entry[q->head].skb = tx_info.skb; q->entry[q->head].buf_sz = len; + + smp_wmb(); + q->head = (q->head + 1) % q->ndesc; q->queued++; diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index b8fe8adc43a3..451ed60c6296 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -461,11 +461,11 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) int ret = 0; while (1) { + int n_frames = 0; + if (test_bit(MT76_STATE_PM, &phy->state) || - test_bit(MT76_RESET, &phy->state)) { - ret = -EBUSY; - break; - } + test_bit(MT76_RESET, &phy->state)) + return -EBUSY; if (dev->queue_ops->tx_cleanup && q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { @@ -497,11 +497,16 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) } if (!mt76_txq_stopped(q)) - ret += mt76_txq_send_burst(phy, q, mtxq); + n_frames = mt76_txq_send_burst(phy, q, mtxq); spin_unlock_bh(&q->lock); ieee80211_return_txq(phy->hw, txq, false); + + if (unlikely(n_frames < 0)) + return n_frames; + + ret += n_frames; } return ret; diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c index c868582c5d22..aa3b64902cf9 100644 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c @@ -99,7 +99,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom) { u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); - return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); + return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); } static void diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c index cada48800928..5d9e952b2966 100644 --- a/drivers/net/wireless/mediatek/mt7601u/init.c +++ b/drivers/net/wireless/mediatek/mt7601u/init.c @@ -610,6 +610,7 @@ int mt7601u_register_device(struct mt7601u_dev *dev) wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); diff --git a/drivers/net/wireless/microchip/wilc1000/Kconfig b/drivers/net/wireless/microchip/wilc1000/Kconfig index 7f15e42602dd..62cfcdc9aacc 100644 --- a/drivers/net/wireless/microchip/wilc1000/Kconfig +++ b/drivers/net/wireless/microchip/wilc1000/Kconfig @@ -27,6 +27,7 @@ config WILC1000_SPI depends on CFG80211 && INET && SPI select WILC1000 select CRC7 + select CRC_ITU_T help This module adds support for the SPI interface of adapters using WILC1000 chipset. The Atmel WILC1000 has a Serial Peripheral diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 1b205e7d97a8..7e4d9235251c 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -24,12 +24,10 @@ static irqreturn_t isr_uh_routine(int irq, void *user_data) { - struct net_device *dev = user_data; - struct wilc_vif *vif = netdev_priv(dev); - struct wilc *wilc = vif->wilc; + struct wilc *wilc = user_data; if (wilc->close) { - netdev_err(dev, "Can't handle UH interrupt\n"); + pr_err("Can't handle UH interrupt"); return IRQ_HANDLED; } return IRQ_WAKE_THREAD; @@ -37,12 +35,10 @@ static irqreturn_t isr_uh_routine(int irq, void *user_data) static irqreturn_t isr_bh_routine(int irq, void *userdata) { - struct net_device *dev = userdata; - struct wilc_vif *vif = netdev_priv(userdata); - struct wilc *wilc = vif->wilc; + struct wilc *wilc = userdata; if (wilc->close) { - netdev_err(dev, "Can't handle BH interrupt\n"); + pr_err("Can't handle BH interrupt\n"); return IRQ_HANDLED; } @@ -60,7 +56,7 @@ static int init_irq(struct net_device *dev) ret = request_threaded_irq(wl->dev_irq_num, isr_uh_routine, isr_bh_routine, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - "WILC_IRQ", dev); + "WILC_IRQ", wl); if (ret) { netdev_err(dev, "Failed to request IRQ [%d]\n", ret); return ret; @@ -575,7 +571,6 @@ static int wilc_mac_open(struct net_device *ndev) { struct wilc_vif *vif = netdev_priv(ndev); struct wilc *wl = vif->wilc; - unsigned char mac_add[ETH_ALEN] = {0}; int ret = 0; struct mgmt_frame_regs mgmt_regs = {}; @@ -598,9 +593,12 @@ static int wilc_mac_open(struct net_device *ndev) wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype, vif->idx); - wilc_get_mac_address(vif, mac_add); - netdev_dbg(ndev, "Mac address: %pM\n", mac_add); - ether_addr_copy(ndev->dev_addr, mac_add); + + if (is_valid_ether_addr(ndev->dev_addr)) + wilc_set_mac_address(vif, ndev->dev_addr); + else + wilc_get_mac_address(vif, ndev->dev_addr); + netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr); if (!is_valid_ether_addr(ndev->dev_addr)) { netdev_err(ndev, "Wrong MAC address\n"); @@ -639,7 +637,14 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) int srcu_idx; if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; + return -EADDRNOTAVAIL; + + if (!vif->mac_opened) { + eth_commit_mac_addr_change(dev, p); + return 0; + } + + /* Verify MAC Address is not already in use: */ srcu_idx = srcu_read_lock(&wilc->srcu); list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) { @@ -647,7 +652,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) if (ether_addr_equal(addr->sa_data, mac_addr)) { if (vif != tmp_vif) { srcu_read_unlock(&wilc->srcu, srcu_idx); - return -EINVAL; + return -EADDRNOTAVAIL; } srcu_read_unlock(&wilc->srcu, srcu_idx); return 0; @@ -659,9 +664,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) if (result) return result; - ether_addr_copy(vif->bssid, addr->sa_data); - ether_addr_copy(vif->ndev->dev_addr, addr->sa_data); - + eth_commit_mac_addr_change(dev, p); return result; } diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index 351ff909ab1c..e14b9fc2c67a 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -947,7 +947,7 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint) for (i = 0; (i < 3) && (nint > 0); i++, nint--) reg |= BIT(i); - ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, ®); + ret = wilc_sdio_write_reg(wilc, WILC_INTR2_ENABLE, reg); if (ret) { dev_err(&func->dev, "Failed write reg (%08x)...\n", diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index be732929322c..1472e9843896 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -7,12 +7,41 @@ #include <linux/clk.h> #include <linux/spi/spi.h> #include <linux/crc7.h> +#include <linux/crc-itu-t.h> #include "netdev.h" #include "cfg80211.h" +static bool enable_crc7; /* protect SPI commands with CRC7 */ +module_param(enable_crc7, bool, 0644); +MODULE_PARM_DESC(enable_crc7, + "Enable CRC7 checksum to protect command transfers\n" + "\t\t\tagainst corruption during the SPI transfer.\n" + "\t\t\tCommand transfers are short and the CPU-cycle cost\n" + "\t\t\tof enabling this is small."); + +static bool enable_crc16; /* protect SPI data with CRC16 */ +module_param(enable_crc16, bool, 0644); +MODULE_PARM_DESC(enable_crc16, + "Enable CRC16 checksum to protect data transfers\n" + "\t\t\tagainst corruption during the SPI transfer.\n" + "\t\t\tData transfers can be large and the CPU-cycle cost\n" + "\t\t\tof enabling this may be substantial."); + +/* + * For CMD_SINGLE_READ and CMD_INTERNAL_READ, WILC may insert one or + * more zero bytes between the command response and the DATA Start tag + * (0xf3). This behavior appears to be undocumented in "ATWILC1000 + * USER GUIDE" (https://tinyurl.com/4hhshdts) but we have observed 1-4 + * zero bytes when the SPI bus operates at 48MHz and none when it + * operates at 1MHz. + */ +#define WILC_SPI_RSP_HDR_EXTRA_DATA 8 + struct wilc_spi { - int crc_off; + bool probing_crc; /* true if we're probing chip's CRC config */ + bool crc7_enabled; /* true if crc7 is currently enabled */ + bool crc16_enabled; /* true if crc16 is currently enabled */ }; static const struct wilc_hif_func wilc_hif_spi; @@ -36,12 +65,36 @@ static const struct wilc_hif_func wilc_hif_spi; #define CMD_RESET 0xcf #define SPI_ENABLE_VMM_RETRY_LIMIT 2 -#define DATA_PKT_SZ_256 256 -#define DATA_PKT_SZ_512 512 -#define DATA_PKT_SZ_1K 1024 -#define DATA_PKT_SZ_4K (4 * 1024) -#define DATA_PKT_SZ_8K (8 * 1024) -#define DATA_PKT_SZ DATA_PKT_SZ_8K + +/* SPI response fields (section 11.1.2 in ATWILC1000 User Guide): */ +#define RSP_START_FIELD GENMASK(7, 4) +#define RSP_TYPE_FIELD GENMASK(3, 0) + +/* SPI response values for the response fields: */ +#define RSP_START_TAG 0xc +#define RSP_TYPE_FIRST_PACKET 0x1 +#define RSP_TYPE_INNER_PACKET 0x2 +#define RSP_TYPE_LAST_PACKET 0x3 +#define RSP_STATE_NO_ERROR 0x00 + +#define PROTOCOL_REG_PKT_SZ_MASK GENMASK(6, 4) +#define PROTOCOL_REG_CRC16_MASK GENMASK(3, 3) +#define PROTOCOL_REG_CRC7_MASK GENMASK(2, 2) + +/* + * The SPI data packet size may be any integer power of two in the + * range from 256 to 8192 bytes. + */ +#define DATA_PKT_LOG_SZ_MIN 8 /* 256 B */ +#define DATA_PKT_LOG_SZ_MAX 13 /* 8 KiB */ + +/* + * Select the data packet size (log2 of number of bytes): Use the + * maximum data packet size. We only retransmit complete packets, so + * there is no benefit from using smaller data packets. + */ +#define DATA_PKT_LOG_SZ DATA_PKT_LOG_SZ_MAX +#define DATA_PKT_SZ (1 << DATA_PKT_LOG_SZ) #define USE_SPI_DMA 0 @@ -79,16 +132,15 @@ struct wilc_spi_cmd { } __packed; struct wilc_spi_read_rsp_data { - u8 rsp_cmd_type; - u8 status; - u8 resp_header; - u8 resp_data[4]; + u8 header; + u8 data[4]; u8 crc[]; } __packed; struct wilc_spi_rsp_data { u8 rsp_cmd_type; u8 status; + u8 data[]; } __packed; static int wilc_bus_probe(struct spi_device *spi) @@ -281,7 +333,8 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz) struct wilc_spi *spi_priv = wilc->bus_data; int ix, nbytes; int result = 0; - u8 cmd, order, crc[2] = {0}; + u8 cmd, order, crc[2]; + u16 crc_calc; /* * Data @@ -323,9 +376,12 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz) } /* - * Write Crc + * Write CRC */ - if (!spi_priv->crc_off) { + if (spi_priv->crc16_enabled) { + crc_calc = crc_itu_t(0xffff, &b[ix], nbytes); + crc[0] = crc_calc >> 8; + crc[1] = crc_calc; if (wilc_spi_tx(wilc, crc, 2)) { dev_err(&spi->dev, "Failed data block crc write, bus error...\n"); result = -EINVAL; @@ -359,10 +415,11 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; u8 wb[32], rb[32]; - int cmd_len, resp_len; - u8 crc[2]; + int cmd_len, resp_len, i; + u16 crc_calc, crc_recv; struct wilc_spi_cmd *c; - struct wilc_spi_read_rsp_data *r; + struct wilc_spi_rsp_data *r; + struct wilc_spi_read_rsp_data *r_data; memset(wb, 0x0, sizeof(wb)); memset(rb, 0x0, sizeof(rb)); @@ -384,8 +441,9 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, } cmd_len = offsetof(struct wilc_spi_cmd, u.simple_cmd.crc); - resp_len = sizeof(*r); - if (!spi_priv->crc_off) { + resp_len = sizeof(*r) + sizeof(*r_data) + WILC_SPI_RSP_HDR_EXTRA_DATA; + + if (spi_priv->crc7_enabled) { c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); cmd_len += 1; resp_len += 2; @@ -403,11 +461,12 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, return -EINVAL; } - r = (struct wilc_spi_read_rsp_data *)&rb[cmd_len]; + r = (struct wilc_spi_rsp_data *)&rb[cmd_len]; if (r->rsp_cmd_type != cmd) { - dev_err(&spi->dev, - "Failed cmd response, cmd (%02x), resp (%02x)\n", - cmd, r->rsp_cmd_type); + if (!spi_priv->probing_crc) + dev_err(&spi->dev, + "Failed cmd, cmd (%02x), resp (%02x)\n", + cmd, r->rsp_cmd_type); return -EINVAL; } @@ -417,17 +476,30 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, return -EINVAL; } - if (WILC_GET_RESP_HDR_START(r->resp_header) != 0xf) { - dev_err(&spi->dev, "Error, data read response (%02x)\n", - r->resp_header); + for (i = 0; i < WILC_SPI_RSP_HDR_EXTRA_DATA; ++i) + if (WILC_GET_RESP_HDR_START(r->data[i]) == 0xf) + break; + + if (i >= WILC_SPI_RSP_HDR_EXTRA_DATA) { + dev_err(&spi->dev, "Error, data start missing\n"); return -EINVAL; } - if (b) - memcpy(b, r->resp_data, 4); + r_data = (struct wilc_spi_read_rsp_data *)&r->data[i]; - if (!spi_priv->crc_off) - memcpy(crc, r->crc, 2); + if (b) + memcpy(b, r_data->data, 4); + + if (!clockless && spi_priv->crc16_enabled) { + crc_recv = (r_data->crc[0] << 8) | r_data->crc[1]; + crc_calc = crc_itu_t(0xffff, r_data->data, 4); + if (crc_recv != crc_calc) { + dev_err(&spi->dev, "%s: bad CRC 0x%04x " + "(calculated 0x%04x)\n", __func__, + crc_recv, crc_calc); + return -EINVAL; + } + } return 0; } @@ -454,7 +526,7 @@ static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data, c->u.internal_w_cmd.addr[1] = adr; c->u.internal_w_cmd.data = cpu_to_be32(data); cmd_len = offsetof(struct wilc_spi_cmd, u.internal_w_cmd.crc); - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) c->u.internal_w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); } else if (cmd == CMD_SINGLE_WRITE) { c->u.w_cmd.addr[0] = adr >> 16; @@ -462,14 +534,14 @@ static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data, c->u.w_cmd.addr[2] = adr; c->u.w_cmd.data = cpu_to_be32(data); cmd_len = offsetof(struct wilc_spi_cmd, u.w_cmd.crc); - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) c->u.w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); } else { dev_err(&spi->dev, "write cmd [%x] not supported\n", cmd); return -EINVAL; } - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) cmd_len += 1; resp_len = sizeof(*r); @@ -507,6 +579,7 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz) { struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; + u16 crc_recv, crc_calc; u8 wb[32], rb[32]; int cmd_len, resp_len; int retry, ix = 0; @@ -525,7 +598,7 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz) c->u.dma_cmd.size[0] = sz >> 8; c->u.dma_cmd.size[1] = sz; cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd.crc); - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) c->u.dma_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); } else if (cmd == CMD_DMA_EXT_WRITE || cmd == CMD_DMA_EXT_READ) { c->u.dma_cmd_ext.addr[0] = adr >> 16; @@ -535,14 +608,14 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz) c->u.dma_cmd_ext.size[1] = sz >> 8; c->u.dma_cmd_ext.size[2] = sz; cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd_ext.crc); - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) c->u.dma_cmd_ext.crc[0] = wilc_get_crc7(wb, cmd_len); } else { dev_err(&spi->dev, "dma read write cmd [%x] not supported\n", cmd); return -EINVAL; } - if (!spi_priv->crc_off) + if (spi_priv->crc7_enabled) cmd_len += 1; resp_len = sizeof(*r); @@ -608,12 +681,22 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz) } /* - * Read Crc + * Read CRC */ - if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) { - dev_err(&spi->dev, - "Failed block crc read, bus err\n"); - return -EINVAL; + if (spi_priv->crc16_enabled) { + if (wilc_spi_rx(wilc, crc, 2)) { + dev_err(&spi->dev, + "Failed block CRC read, bus err\n"); + return -EINVAL; + } + crc_recv = (crc[0] << 8) | crc[1]; + crc_calc = crc_itu_t(0xffff, &b[ix], nbytes); + if (crc_recv != crc_calc) { + dev_err(&spi->dev, "%s: bad CRC 0x%04x " + "(calculated 0x%04x)\n", __func__, + crc_recv, crc_calc); + return -EINVAL; + } } ix += nbytes; @@ -680,11 +763,13 @@ static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat) static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data) { struct spi_device *spi = to_spi_device(wilc->dev); + struct wilc_spi *spi_priv = wilc->bus_data; int result; result = wilc_spi_single_read(wilc, CMD_INTERNAL_READ, adr, data, 0); if (result) { - dev_err(&spi->dev, "Failed internal read cmd...\n"); + if (!spi_priv->probing_crc) + dev_err(&spi->dev, "Failed internal read cmd...\n"); return result; } @@ -721,6 +806,52 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data) return 0; } +static int spi_data_rsp(struct wilc *wilc, u8 cmd) +{ + struct spi_device *spi = to_spi_device(wilc->dev); + int result, i; + u8 rsp[4]; + + /* + * The response to data packets is two bytes long. For + * efficiency's sake, wilc_spi_write() wisely ignores the + * responses for all packets but the final one. The downside + * of that optimization is that when the final data packet is + * short, we may receive (part of) the response to the + * second-to-last packet before the one for the final packet. + * To handle this, we always read 4 bytes and then search for + * the last byte that contains the "Response Start" code (0xc + * in the top 4 bits). We then know that this byte is the + * first response byte of the final data packet. + */ + result = wilc_spi_rx(wilc, rsp, sizeof(rsp)); + if (result) { + dev_err(&spi->dev, "Failed bus error...\n"); + return result; + } + + for (i = sizeof(rsp) - 2; i >= 0; --i) + if (FIELD_GET(RSP_START_FIELD, rsp[i]) == RSP_START_TAG) + break; + + if (i < 0) { + dev_err(&spi->dev, + "Data packet response missing (%02x %02x %02x %02x)\n", + rsp[0], rsp[1], rsp[2], rsp[3]); + return -1; + } + + /* rsp[i] is the last response start byte */ + + if (FIELD_GET(RSP_TYPE_FIELD, rsp[i]) != RSP_TYPE_LAST_PACKET + || rsp[i + 1] != RSP_STATE_NO_ERROR) { + dev_err(&spi->dev, "Data response error (%02x %02x)\n", + rsp[i], rsp[i + 1]); + return -1; + } + return 0; +} + static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) { struct spi_device *spi = to_spi_device(wilc->dev); @@ -748,7 +879,10 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) return result; } - return 0; + /* + * Data response + */ + return spi_data_rsp(wilc, CMD_DMA_EXT_WRITE); } /******************************************** @@ -772,7 +906,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) u32 reg; u32 chipid; static int isinit; - int ret; + int ret, i; if (isinit) { ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid); @@ -787,42 +921,54 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) */ /* - * TODO: We can remove the CRC trials if there is a definite - * way to reset + * Infer the CRC settings that are currently in effect. This + * is necessary because we can't be sure that the chip has + * been RESET (e.g, after module unload and reload). */ - /* the SPI to it's initial value. */ - ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, ®); - if (ret) { - /* - * Read failed. Try with CRC off. This might happen when module - * is removed but chip isn't reset - */ - spi_priv->crc_off = 1; - dev_err(&spi->dev, - "Failed read with CRC on, retrying with CRC off\n"); + spi_priv->probing_crc = true; + spi_priv->crc7_enabled = enable_crc7; + spi_priv->crc16_enabled = false; /* don't check CRC16 during probing */ + for (i = 0; i < 2; ++i) { ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, ®); - if (ret) { - /* - * Read failed with both CRC on and off, - * something went bad - */ - dev_err(&spi->dev, "Failed internal read protocol\n"); - return ret; - } + if (ret == 0) + break; + spi_priv->crc7_enabled = !enable_crc7; } - if (spi_priv->crc_off == 0) { - reg &= ~0xc; /* disable crc checking */ - reg &= ~0x70; - reg |= (0x5 << 4); - ret = spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg); - if (ret) { - dev_err(&spi->dev, - "[wilc spi %d]: Failed internal write reg\n", - __LINE__); - return ret; - } - spi_priv->crc_off = 1; + if (ret) { + dev_err(&spi->dev, "Failed with CRC7 on and off.\n"); + return ret; + } + + /* set up the desired CRC configuration: */ + reg &= ~(PROTOCOL_REG_CRC7_MASK | PROTOCOL_REG_CRC16_MASK); + if (enable_crc7) + reg |= PROTOCOL_REG_CRC7_MASK; + if (enable_crc16) + reg |= PROTOCOL_REG_CRC16_MASK; + + /* set up the data packet size: */ + BUILD_BUG_ON(DATA_PKT_LOG_SZ < DATA_PKT_LOG_SZ_MIN + || DATA_PKT_LOG_SZ > DATA_PKT_LOG_SZ_MAX); + reg &= ~PROTOCOL_REG_PKT_SZ_MASK; + reg |= FIELD_PREP(PROTOCOL_REG_PKT_SZ_MASK, + DATA_PKT_LOG_SZ - DATA_PKT_LOG_SZ_MIN); + + /* establish the new setup: */ + ret = spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg); + if (ret) { + dev_err(&spi->dev, + "[wilc spi %d]: Failed internal write reg\n", + __LINE__); + return ret; } + /* update our state to match new protocol settings: */ + spi_priv->crc7_enabled = enable_crc7; + spi_priv->crc16_enabled = enable_crc16; + + /* re-read to make sure new settings are in effect: */ + spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, ®); + + spi_priv->probing_crc = false; /* * make sure can read back chip id correctly diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index 31d51385ba93..2030fc7f53ca 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -552,12 +552,60 @@ static struct rxq_entry_t *wilc_wlan_rxq_remove(struct wilc *wilc) void chip_allow_sleep(struct wilc *wilc) { u32 reg = 0; + const struct wilc_hif_func *hif_func = wilc->hif_func; + u32 wakeup_reg, wakeup_bit; + u32 to_host_from_fw_reg, to_host_from_fw_bit; + u32 from_host_to_fw_reg, from_host_to_fw_bit; + u32 trials = 100; + int ret; + + if (wilc->io_type == WILC_HIF_SDIO) { + wakeup_reg = WILC_SDIO_WAKEUP_REG; + wakeup_bit = WILC_SDIO_WAKEUP_BIT; + from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG; + from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT; + to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG; + to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT; + } else { + wakeup_reg = WILC_SPI_WAKEUP_REG; + wakeup_bit = WILC_SPI_WAKEUP_BIT; + from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG; + from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT; + to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG; + to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT; + } + + while (--trials) { + ret = hif_func->hif_read_reg(wilc, to_host_from_fw_reg, ®); + if (ret) + return; + if ((reg & to_host_from_fw_bit) == 0) + break; + } + if (!trials) + pr_warn("FW not responding\n"); - wilc->hif_func->hif_read_reg(wilc, WILC_SDIO_WAKEUP_REG, ®); + /* Clear bit 1 */ + ret = hif_func->hif_read_reg(wilc, wakeup_reg, ®); + if (ret) + return; + if (reg & wakeup_bit) { + reg &= ~wakeup_bit; + ret = hif_func->hif_write_reg(wilc, wakeup_reg, reg); + if (ret) + return; + } - wilc->hif_func->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG, - reg & ~WILC_SDIO_WAKEUP_BIT); - wilc->hif_func->hif_write_reg(wilc, WILC_SDIO_HOST_TO_FW_REG, 0); + ret = hif_func->hif_read_reg(wilc, from_host_to_fw_reg, ®); + if (ret) + return; + if (reg & from_host_to_fw_bit) { + reg &= ~from_host_to_fw_bit; + ret = hif_func->hif_write_reg(wilc, from_host_to_fw_reg, reg); + if (ret) + return; + + } } EXPORT_SYMBOL_GPL(chip_allow_sleep); diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h index d55eb6b3a12a..771c25fa849b 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.h +++ b/drivers/net/wireless/microchip/wilc1000/wlan.h @@ -97,6 +97,12 @@ #define WILC_SPI_WAKEUP_REG 0x1 #define WILC_SPI_WAKEUP_BIT BIT(1) +#define WILC_SPI_HOST_TO_FW_REG 0x0b +#define WILC_SPI_HOST_TO_FW_BIT BIT(0) + +#define WILC_SPI_FW_TO_HOST_REG 0x10 +#define WILC_SPI_FW_TO_HOST_BIT BIT(0) + #define WILC_SPI_PROTOCOL_OFFSET (WILC_SPI_PROTOCOL_CONFIG - \ WILC_SPI_REG_BASE) @@ -392,7 +398,6 @@ struct wilc_cfg_rsp { u8 seq_no; }; -struct wilc; struct wilc_vif; int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 504b4d0b98c4..84b15a655eab 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -680,13 +680,10 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, eth_zero_addr(vif->bssid); ret = qtnf_cmd_send_connect(vif, sme); - if (ret) { + if (ret) pr_err("VIF%u.%u: failed to connect\n", vif->mac->macid, vif->vifid); - goto out; - } -out: return ret; } @@ -702,13 +699,10 @@ qtnf_external_auth(struct wiphy *wiphy, struct net_device *dev, pr_warn("unexpected bssid: %pM", auth->bssid); ret = qtnf_cmd_send_external_auth(vif, auth); - if (ret) { + if (ret) pr_err("VIF%u.%u: failed to report external auth\n", vif->mac->macid, vif->vifid); - goto out; - } -out: return ret; } @@ -727,8 +721,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, } if (vif->wdev.iftype != NL80211_IFTYPE_STATION) { - ret = -EOPNOTSUPP; - goto out; + return -EOPNOTSUPP; } ret = qtnf_cmd_send_disconnect(vif, reason_code); @@ -742,7 +735,6 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, NULL, 0, true, GFP_KERNEL); } -out: return ret; } @@ -935,13 +927,10 @@ static int qtnf_update_owe_info(struct wiphy *wiphy, struct net_device *dev, return -EOPNOTSUPP; ret = qtnf_cmd_send_update_owe(vif, owe_info); - if (ret) { + if (ret) pr_err("VIF%u.%u: failed to update owe info\n", vif->mac->macid, vif->vifid); - goto out; - } -out: return ret; } @@ -987,18 +976,14 @@ static int qtnf_resume(struct wiphy *wiphy) vif = qtnf_mac_get_base_vif(mac); if (!vif) { pr_err("MAC%u: primary VIF is not configured\n", mac->macid); - ret = -EFAULT; - goto exit; + return -EFAULT; } ret = qtnf_cmd_send_wowlan_set(vif, NULL); - if (ret) { + if (ret) pr_err("MAC%u: failed to reset WoWLAN triggers\n", mac->macid); - goto exit; - } -exit: return ret; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index f3ccbd2b1084..c68563c83098 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -379,10 +379,6 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif) qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -407,10 +403,7 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg) cmd->do_register = reg; ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -446,10 +439,7 @@ int qtnf_cmd_send_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, qtnf_cmd_skb_put_buffer(cmd_skb, buf, len); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -477,10 +467,6 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type, qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -1677,10 +1663,7 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed) wiphy->retry_short); ret = qtnf_cmd_send(mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(mac->bus); return ret; @@ -1772,10 +1755,7 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, params->seq_len); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -1807,10 +1787,7 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, cmd->pairwise = pairwise; ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -1837,10 +1814,7 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index, cmd->multicast = multicast; ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -1864,10 +1838,7 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index) cmd->key_index = key_index; ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -1931,8 +1902,6 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, } ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; out: qtnf_bus_unlock(vif->mac->bus); @@ -1966,10 +1935,7 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif, cmd->reason_code = cpu_to_le16(params->reason_code); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -2189,10 +2155,6 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -2218,10 +2180,6 @@ int qtnf_cmd_send_external_auth(struct qtnf_vif *vif, qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -2245,10 +2203,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code) cmd->reason = cpu_to_le16(reason_code); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -2271,10 +2226,6 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up) qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; @@ -2580,10 +2531,6 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif, qtnf_bus_lock(bus); ret = qtnf_cmd_send(bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(bus); return ret; @@ -2611,10 +2558,6 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, qtnf_bus_lock(bus); ret = qtnf_cmd_send(bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(bus); return ret; @@ -2639,10 +2582,7 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout) qtnf_bus_lock(bus); ret = qtnf_cmd_send(bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(bus); return ret; @@ -2754,10 +2694,7 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif, cmd->triggers = cpu_to_le32(triggers); ret = qtnf_cmd_send(bus, cmd_skb); - if (ret) - goto out; -out: qtnf_bus_unlock(bus); return ret; } @@ -2821,10 +2758,6 @@ int qtnf_cmd_send_update_owe(struct qtnf_vif *vif, qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(vif->mac->bus); return ret; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 61a4f1ad31e2..e95c101c2711 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -989,11 +989,7 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry, void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr) { - const char *mac_addr; - - mac_addr = of_get_mac_address(rt2x00dev->dev->of_node); - if (!IS_ERR(mac_addr)) - ether_addr_copy(eeprom_mac_addr, mac_addr); + of_get_mac_address(rt2x00dev->dev->of_node, eeprom_mac_addr); if (!is_valid_ether_addr(eeprom_mac_addr)) { eth_random_addr(eeprom_mac_addr); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index d6d1be4169e5..d1a566cc0c9e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1391,7 +1391,6 @@ struct rtl8xxxu_priv { struct delayed_work ra_watchdog; struct work_struct c2hcmd_work; struct sk_buff_head c2hcmd_queue; - spinlock_t c2hcmd_lock; struct rtl8xxxu_btcoex bt_coex; struct rtl8xxxu_ra_report ra_report; }; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 5cd7ef3625c5..9ff09cf7eb62 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1145,7 +1145,7 @@ void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw) switch (hw->conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: ht = false; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_20: opmode |= BW_OPMODE_20MHZ; rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode); @@ -1272,7 +1272,7 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw) switch (hw->conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: ht = false; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_20: rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_20; subchannel = 0; @@ -1741,11 +1741,11 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) case 3: priv->ep_tx_low_queue = 1; priv->ep_tx_count++; - /* fall through */ + fallthrough; case 2: priv->ep_tx_normal_queue = 1; priv->ep_tx_count++; - /* fall through */ + fallthrough; case 1: priv->ep_tx_high_queue = 1; priv->ep_tx_count++; @@ -5423,7 +5423,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) struct rtl8xxxu_priv *priv; struct rtl8723bu_c2h *c2h; struct sk_buff *skb = NULL; - unsigned long flags; u8 bt_info = 0; struct rtl8xxxu_btcoex *btcoex; struct rtl8xxxu_ra_report *rarpt; @@ -5439,9 +5438,7 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) goto out; while (!skb_queue_empty(&priv->c2hcmd_queue)) { - spin_lock_irqsave(&priv->c2hcmd_lock, flags); - skb = __skb_dequeue(&priv->c2hcmd_queue); - spin_unlock_irqrestore(&priv->c2hcmd_lock, flags); + skb = skb_dequeue(&priv->c2hcmd_queue); c2h = (struct rtl8723bu_c2h *)skb->data; @@ -5499,7 +5496,6 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data; struct device *dev = &priv->udev->dev; int len; - unsigned long flags; len = skb->len - 2; @@ -5538,9 +5534,7 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, break; } - spin_lock_irqsave(&priv->c2hcmd_lock, flags); - __skb_queue_tail(&priv->c2hcmd_queue, skb); - spin_unlock_irqrestore(&priv->c2hcmd_lock, flags); + skb_queue_tail(&priv->c2hcmd_queue, skb); schedule_work(&priv->c2hcmd_work); } @@ -6606,7 +6600,6 @@ static int rtl8xxxu_probe(struct usb_interface *interface, spin_lock_init(&priv->rx_urb_lock); INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work); INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback); - spin_lock_init(&priv->c2hcmd_lock); INIT_WORK(&priv->c2hcmd_work, rtl8xxxu_c2hcmd_callback); skb_queue_head_init(&priv->c2hcmd_queue); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 6e8bd99e8911..2a7ee90a3f54 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -551,7 +551,6 @@ int rtl_init_core(struct ieee80211_hw *hw) spin_lock_init(&rtlpriv->locks.rf_lock); spin_lock_init(&rtlpriv->locks.waitq_lock); spin_lock_init(&rtlpriv->locks.entry_list_lock); - spin_lock_init(&rtlpriv->locks.c2hcmd_lock); spin_lock_init(&rtlpriv->locks.scan_list_lock); spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock); spin_lock_init(&rtlpriv->locks.fw_ps_lock); @@ -2269,7 +2268,6 @@ static bool rtl_c2h_fast_cmd(struct ieee80211_hw *hw, struct sk_buff *skb) void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); - unsigned long flags; if (rtl_c2h_fast_cmd(hw, skb)) { rtl_c2h_content_parsing(hw, skb); @@ -2278,11 +2276,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb) } /* enqueue */ - spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags); - - __skb_queue_tail(&rtlpriv->c2hcmd_queue, skb); - - spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags); + skb_queue_tail(&rtlpriv->c2hcmd_queue, skb); /* wake up wq */ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.c2hcmd_wq, 0); @@ -2340,16 +2334,11 @@ void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct sk_buff *skb; - unsigned long flags; int i; for (i = 0; i < 200; i++) { /* dequeue a task */ - spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags); - - skb = __skb_dequeue(&rtlpriv->c2hcmd_queue); - - spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags); + skb = skb_dequeue(&rtlpriv->c2hcmd_queue); /* do it */ if (!skb) diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 965bd9589045..c9b6ee81dcb9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -564,7 +564,7 @@ static int rtl_op_resume(struct ieee80211_hw *hw) rtlhal->enter_pnp_sleep = false; rtlhal->wake_from_pnp_sleep = true; - /* to resovle s4 can not wake up*/ + /* to resolve s4 can not wake up*/ now = ktime_get_real_seconds(); if (now - rtlhal->last_suspend_sec < 5) return -1; @@ -806,7 +806,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw, if (0 == changed_flags) return; - /*TODO: we disable broadcase now, so enable here */ + /*TODO: we disable broadcast now, so enable here */ if (changed_flags & FIF_ALLMULTI) { if (*new_flags & FIF_ALLMULTI) { mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] | @@ -1796,7 +1796,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, value |= (GET_PWR_CFG_VALUE(cfg_cmd) & GET_PWR_CFG_MASK(cfg_cmd)); - /*Write the value back to sytem register*/ + /*Write the value back to system register*/ rtl_write_byte(rtlpriv, offset, value); break; case PWR_CMD_POLLING: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 861cc663ca93..bf686a916acb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -2466,8 +2466,6 @@ void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw) /* 0:Low, 1:High, 2:From Efuse. */ rtlpriv->btcoexist.reg_bt_iso = 2; - /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */ - rtlpriv->btcoexist.reg_bt_sco = 3; /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ rtlpriv->btcoexist.reg_bt_sco = 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 1dbdddce0823..a74724c971b9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -372,18 +372,14 @@ static struct pci_driver rtl92de_driver = { /* add global spin lock to solve the problem that * Dul mac register operation on the same time */ -spinlock_t globalmutex_power; -spinlock_t globalmutex_for_fwdownload; -spinlock_t globalmutex_for_power_and_efuse; +DEFINE_SPINLOCK(globalmutex_power); +DEFINE_SPINLOCK(globalmutex_for_fwdownload); +DEFINE_SPINLOCK(globalmutex_for_power_and_efuse); static int __init rtl92de_module_init(void) { int ret = 0; - spin_lock_init(&globalmutex_power); - spin_lock_init(&globalmutex_for_fwdownload); - spin_lock_init(&globalmutex_for_power_and_efuse); - ret = pci_register_driver(&rtl92de_driver); if (ret) WARN_ONCE(true, "rtl8192de: No device found\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c index 27c8a5d96520..fcaaf664cbec 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c @@ -249,7 +249,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = { 0x824, 0x00030FE0, 0x828, 0x00000000, 0x82C, 0x002081DD, - 0x830, 0x2AAA8E24, + 0x830, 0x2AAAEEC8, 0x834, 0x0037A706, 0x838, 0x06489B44, 0x83C, 0x0000095B, @@ -324,10 +324,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = { 0x9D8, 0x00000000, 0x9DC, 0x00000000, 0x9E0, 0x00005D00, - 0x9E4, 0x00000002, + 0x9E4, 0x00000003, 0x9E8, 0x00000001, 0xA00, 0x00D047C8, - 0xA04, 0x01FF000C, + 0xA04, 0x01FF800C, 0xA08, 0x8C8A8300, 0xA0C, 0x2E68000F, 0xA10, 0x9500BB78, @@ -1320,7 +1320,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x083, 0x00021800, 0x084, 0x00028000, 0x085, 0x00048000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x0009483A, + 0xA0000000, 0x00000000, 0x086, 0x00094838, + 0xB0000000, 0x00000000, 0x087, 0x00044980, 0x088, 0x00048000, 0x089, 0x0000D480, @@ -1409,36 +1413,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x03C, 0x000CA000, 0x0EF, 0x00000000, 0x0EF, 0x00001100, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004ADF3, 0x034, 0x00049DF0, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004ADF3, 0x034, 0x00049DF0, - 0xFF0F0404, 0xCDEF, - 0x034, 0x0004ADF3, - 0x034, 0x00049DF0, - 0xFF0F0200, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004ADF5, 0x034, 0x00049DF2, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A0F3, + 0x034, 0x000490B1, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004A0F3, 0x034, 0x000490B1, - 0xCDCDCDCD, 0xCDCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0xA0000000, 0x00000000, 0x034, 0x0004ADF7, 0x034, 0x00049DF3, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, - 0x034, 0x00048DED, - 0x034, 0x00047DEA, - 0x034, 0x00046DE7, - 0x034, 0x00045CE9, - 0x034, 0x00044CE6, - 0x034, 0x000438C6, - 0x034, 0x00042886, - 0x034, 0x00041486, - 0x034, 0x00040447, - 0xFF0F0204, 0xCDEF, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00048DED, 0x034, 0x00047DEA, 0x034, 0x00046DE7, @@ -1448,7 +1448,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00042886, 0x034, 0x00041486, 0x034, 0x00040447, - 0xFF0F0404, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00048DED, 0x034, 0x00047DEA, 0x034, 0x00046DE7, @@ -1458,7 +1458,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00042886, 0x034, 0x00041486, 0x034, 0x00040447, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000480AE, + 0x034, 0x000470AB, + 0x034, 0x0004608B, + 0x034, 0x00045069, + 0x034, 0x00044048, + 0x034, 0x00043045, + 0x034, 0x00042026, + 0x034, 0x00041023, + 0x034, 0x00040002, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x000480AE, 0x034, 0x000470AB, 0x034, 0x0004608B, @@ -1468,7 +1478,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00042026, 0x034, 0x00041023, 0x034, 0x00040002, - 0xCDCDCDCD, 0xCDCD, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0xA0000000, 0x00000000, 0x034, 0x00048DEF, 0x034, 0x00047DEC, 0x034, 0x00046DE9, @@ -1478,38 +1498,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x0004248A, 0x034, 0x0004108D, 0x034, 0x0004008A, - 0xFF0F0104, 0xDEAD, - 0xFF0F0200, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000210, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0002ADF4, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A0F3, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0002A0F3, - 0xCDCDCDCD, 0xCDCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002ADF4, + 0xA0000000, 0x00000000, 0x034, 0x0002ADF7, - 0xFF0F0200, 0xDEAD, - 0xFF0F0104, 0xABCD, - 0x034, 0x00029DF4, - 0xFF0F0204, 0xCDEF, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00029DF4, - 0xFF0F0404, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00029DF4, - 0xFF0F0200, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00029DF1, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000290F0, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x000290F0, - 0xCDCDCDCD, 0xCDCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00029DF1, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00029DF4, + 0xA0000000, 0x00000000, 0x034, 0x00029DF2, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, - 0x034, 0x00028DF1, - 0x034, 0x00027DEE, - 0x034, 0x00026DEB, - 0x034, 0x00025CEC, - 0x034, 0x00024CE9, - 0x034, 0x000238CA, - 0x034, 0x00022889, - 0x034, 0x00021489, - 0x034, 0x0002044A, - 0xFF0F0204, 0xCDEF, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00028DF1, 0x034, 0x00027DEE, 0x034, 0x00026DEB, @@ -1519,7 +1537,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022889, 0x034, 0x00021489, 0x034, 0x0002044A, - 0xFF0F0404, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00028DF1, 0x034, 0x00027DEE, 0x034, 0x00026DEB, @@ -1529,7 +1547,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022889, 0x034, 0x00021489, 0x034, 0x0002044A, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x000280AF, 0x034, 0x000270AC, 0x034, 0x0002608B, @@ -1539,7 +1557,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022026, 0x034, 0x00021023, 0x034, 0x00020002, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000280AF, + 0x034, 0x000270AC, + 0x034, 0x0002608B, + 0x034, 0x00025069, + 0x034, 0x00024048, + 0x034, 0x00023045, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020002, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0xA0000000, 0x00000000, 0x034, 0x00028DEE, 0x034, 0x00027DEB, 0x034, 0x00026CCD, @@ -1549,27 +1587,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022849, 0x034, 0x00021449, 0x034, 0x0002004D, - 0xFF0F0104, 0xDEAD, - 0xFF0F02C0, 0xABCD, + 0xB0000000, 0x00000000, + 0x8000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D7, + 0x034, 0x000090D3, + 0x034, 0x000080B1, + 0x034, 0x000070AE, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000A0D7, 0x034, 0x000090D3, 0x034, 0x000080B1, 0x034, 0x000070AE, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x034, 0x0000ADF7, 0x034, 0x00009DF4, 0x034, 0x00008DF1, 0x034, 0x00007DEE, - 0xFF0F02C0, 0xDEAD, - 0xFF0F0104, 0xABCD, - 0x034, 0x00006DEB, - 0x034, 0x00005CEC, - 0x034, 0x00004CE9, - 0x034, 0x000038CA, - 0x034, 0x00002889, - 0x034, 0x00001489, - 0x034, 0x0000044A, - 0xFF0F0204, 0xCDEF, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00006DEB, 0x034, 0x00005CEC, 0x034, 0x00004CE9, @@ -1577,7 +1612,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002889, 0x034, 0x00001489, 0x034, 0x0000044A, - 0xFF0F0404, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00006DEB, 0x034, 0x00005CEC, 0x034, 0x00004CE9, @@ -1585,7 +1620,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002889, 0x034, 0x00001489, 0x034, 0x0000044A, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000608D, 0x034, 0x0000506B, 0x034, 0x0000404A, @@ -1593,7 +1628,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002044, 0x034, 0x00001025, 0x034, 0x00000004, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000608D, + 0x034, 0x0000506B, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x00002044, + 0x034, 0x00001025, + 0x034, 0x00000004, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0xA0000000, 0x00000000, 0x034, 0x00006DCD, 0x034, 0x00005CCD, 0x034, 0x00004CCA, @@ -1601,11 +1652,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002888, 0x034, 0x00001488, 0x034, 0x00000486, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000040, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x035, 0x00000187, 0x035, 0x00008187, 0x035, 0x00010187, @@ -1615,7 +1666,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x035, 0x00040188, 0x035, 0x00048188, 0x035, 0x00050188, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x035, 0x00000187, 0x035, 0x00008187, 0x035, 0x00010187, @@ -1625,7 +1676,37 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x035, 0x00040188, 0x035, 0x00048188, 0x035, 0x00050188, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000128, + 0x035, 0x00008128, + 0x035, 0x00010128, + 0x035, 0x000201C8, + 0x035, 0x000281C8, + 0x035, 0x000301C8, + 0x035, 0x000401C8, + 0x035, 0x000481C8, + 0x035, 0x000501C8, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000145, + 0x035, 0x00008145, + 0x035, 0x00010145, + 0x035, 0x00020196, + 0x035, 0x00028196, + 0x035, 0x00030196, + 0x035, 0x000401C7, + 0x035, 0x000481C7, + 0x035, 0x000501C7, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000128, + 0x035, 0x00008128, + 0x035, 0x00010128, + 0x035, 0x000201C8, + 0x035, 0x000281C8, + 0x035, 0x000301C8, + 0x035, 0x000401C8, + 0x035, 0x000481C8, + 0x035, 0x000501C8, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x035, 0x00000187, 0x035, 0x00008187, 0x035, 0x00010187, @@ -1635,7 +1716,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x035, 0x00040188, 0x035, 0x00048188, 0x035, 0x00050188, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x035, 0x00000145, 0x035, 0x00008145, 0x035, 0x00010145, @@ -1645,11 +1726,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x035, 0x000401C7, 0x035, 0x000481C7, 0x035, 0x000501C7, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000010, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x036, 0x00085733, 0x036, 0x0008D733, 0x036, 0x00095733, @@ -1662,7 +1743,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x036, 0x000CE4B4, 0x036, 0x000D64B4, 0x036, 0x000DE4B4, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x036, 0x00085733, 0x036, 0x0008D733, 0x036, 0x00095733, @@ -1675,7 +1756,46 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x036, 0x000CE4B4, 0x036, 0x000D64B4, 0x036, 0x000DE4B4, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x000063B5, + 0x036, 0x0000E3B5, + 0x036, 0x000163B5, + 0x036, 0x0001E3B5, + 0x036, 0x000263B5, + 0x036, 0x0002E3B5, + 0x036, 0x000363B5, + 0x036, 0x0003E3B5, + 0x036, 0x000463B5, + 0x036, 0x0004E3B5, + 0x036, 0x000563B5, + 0x036, 0x0005E3B5, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x000056B3, + 0x036, 0x0000D6B3, + 0x036, 0x000156B3, + 0x036, 0x0001D6B3, + 0x036, 0x00026634, + 0x036, 0x0002E634, + 0x036, 0x00036634, + 0x036, 0x0003E634, + 0x036, 0x000467B4, + 0x036, 0x0004E7B4, + 0x036, 0x000567B4, + 0x036, 0x0005E7B4, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x000063B5, + 0x036, 0x0000E3B5, + 0x036, 0x000163B5, + 0x036, 0x0001E3B5, + 0x036, 0x000263B5, + 0x036, 0x0002E3B5, + 0x036, 0x000363B5, + 0x036, 0x0003E3B5, + 0x036, 0x000463B5, + 0x036, 0x0004E3B5, + 0x036, 0x000563B5, + 0x036, 0x0005E3B5, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x036, 0x00085733, 0x036, 0x0008D733, 0x036, 0x00095733, @@ -1688,7 +1808,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x036, 0x000CE4B4, 0x036, 0x000D64B4, 0x036, 0x000DE4B4, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x036, 0x000056B3, 0x036, 0x0000D6B3, 0x036, 0x000156B3, @@ -1701,103 +1821,162 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x036, 0x0004E7B4, 0x036, 0x000567B4, 0x036, 0x0005E7B4, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x00000008, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x000001C8, 0x03C, 0x00000492, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x000001C8, 0x03C, 0x00000492, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001B6, + 0x03C, 0x00000492, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000022A, + 0x03C, 0x00000594, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001B6, + 0x03C, 0x00000492, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x000001C8, 0x03C, 0x00000492, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x03C, 0x0000022A, 0x03C, 0x00000594, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000800, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000800, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000800, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000820, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000820, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000800, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000800, + 0xA0000000, 0x00000000, 0x03C, 0x00000900, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000002, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x008, 0x0004E400, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x008, 0x0004E400, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x008, 0x0004E400, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x008, 0x00002000, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0DF, 0x000000C0, - 0x01F, 0x00040064, - 0xFF0F0104, 0xABCD, + 0x01F, 0x00000064, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x058, 0x000A7284, 0x059, 0x000600EC, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x058, 0x000A7284, 0x059, 0x000600EC, - 0xFF0F0404, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x058, 0x000A7284, 0x059, 0x000600EC, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x058, 0x00081184, 0x059, 0x0006016C, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000E8D73, 0x062, 0x00093FC5, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000E8D73, 0x062, 0x00093FC5, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EFD83, + 0x062, 0x00093FCC, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EAD53, + 0x062, 0x00093BC4, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EFD83, + 0x062, 0x00093FCC, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000E8D73, 0x062, 0x00093FC5, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x061, 0x000EAD53, 0x062, 0x00093BC4, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110EB, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xFF0F0200, 0xCDEF, - 0x063, 0x000710E9, - 0xFF0F02C0, 0xCDEF, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xCDCDCDCD, 0xCDCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110EB, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110E9, + 0xA0000000, 0x00000000, 0x063, 0x000714E9, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C27C, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C27C, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x064, 0x0001C27C, - 0xFF0F0204, 0xCDEF, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C67C, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, 0x064, 0x0001C27C, - 0xFF0F0404, 0xCDEF, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, 0x064, 0x0001C27C, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x064, 0x0001C67C, - 0xFF0F0104, 0xDEAD, - 0xFF0F0200, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00091016, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00091016, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x065, 0x00093016, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x065, 0x00093015, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093015, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093016, + 0xA0000000, 0x00000000, 0x065, 0x00091016, - 0xFF0F0200, 0xDEAD, + 0xB0000000, 0x00000000, 0x018, 0x00000006, 0x0EF, 0x00002000, 0x03B, 0x0003824B, @@ -1895,9 +2074,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x0B4, 0x0001214C, 0x0B7, 0x0003000C, 0x01C, 0x000539D2, + 0x0C4, 0x000AFE00, 0x018, 0x0001F12A, - 0x0FE, 0x00000000, - 0x0FE, 0x00000000, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, 0x018, 0x0001712A, }; @@ -2017,6 +2197,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = { u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY); u32 RTL8821AE_MAC_REG_ARRAY[] = { + 0x421, 0x0000000F, 0x428, 0x0000000A, 0x429, 0x00000010, 0x430, 0x00000000, @@ -2485,7 +2666,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0xA6360001, 0x81C, 0xA5380001, 0x81C, 0xA43A0001, - 0x81C, 0xA33C0001, + 0x81C, 0x683C0001, 0x81C, 0x673E0001, 0x81C, 0x66400001, 0x81C, 0x65420001, @@ -2519,7 +2700,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0x017A0001, 0x81C, 0x017C0001, 0x81C, 0x017E0001, - 0xFF0F02C0, 0xABCD, + 0x8000020c, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFB000101, + 0x81C, 0xFA020101, + 0x81C, 0xF9040101, + 0x81C, 0xF8060101, + 0x81C, 0xF7080101, + 0x81C, 0xF60A0101, + 0x81C, 0xF50C0101, + 0x81C, 0xF40E0101, + 0x81C, 0xF3100101, + 0x81C, 0xF2120101, + 0x81C, 0xF1140101, + 0x81C, 0xF0160101, + 0x81C, 0xEF180101, + 0x81C, 0xEE1A0101, + 0x81C, 0xED1C0101, + 0x81C, 0xEC1E0101, + 0x81C, 0xEB200101, + 0x81C, 0xEA220101, + 0x81C, 0xE9240101, + 0x81C, 0xE8260101, + 0x81C, 0xE7280101, + 0x81C, 0xE62A0101, + 0x81C, 0xE52C0101, + 0x81C, 0xE42E0101, + 0x81C, 0xE3300101, + 0x81C, 0xA5320101, + 0x81C, 0xA4340101, + 0x81C, 0xA3360101, + 0x81C, 0x87380101, + 0x81C, 0x863A0101, + 0x81C, 0x853C0101, + 0x81C, 0x843E0101, + 0x81C, 0x69400101, + 0x81C, 0x68420101, + 0x81C, 0x67440101, + 0x81C, 0x66460101, + 0x81C, 0x49480101, + 0x81C, 0x484A0101, + 0x81C, 0x474C0101, + 0x81C, 0x2A4E0101, + 0x81C, 0x29500101, + 0x81C, 0x28520101, + 0x81C, 0x27540101, + 0x81C, 0x26560101, + 0x81C, 0x25580101, + 0x81C, 0x245A0101, + 0x81C, 0x235C0101, + 0x81C, 0x055E0101, + 0x81C, 0x04600101, + 0x81C, 0x03620101, + 0x81C, 0x02640101, + 0x81C, 0x01660101, + 0x81C, 0x01680101, + 0x81C, 0x016A0101, + 0x81C, 0x016C0101, + 0x81C, 0x016E0101, + 0x81C, 0x01700101, + 0x81C, 0x01720101, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x81C, 0xFB000101, 0x81C, 0xFA020101, 0x81C, 0xF9040101, @@ -2578,7 +2818,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0x016E0101, 0x81C, 0x01700101, 0x81C, 0x01720101, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x81C, 0xFF000101, 0x81C, 0xFF020101, 0x81C, 0xFE040101, @@ -2637,7 +2877,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0x046E0101, 0x81C, 0x03700101, 0x81C, 0x02720101, - 0xFF0F02C0, 0xDEAD, + 0xB0000000, 0x00000000, 0x81C, 0x01740101, 0x81C, 0x01760101, 0x81C, 0x01780101, diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 6c5e242b1bc5..37a9a03123f3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1070,7 +1070,6 @@ int rtl_usb_probe(struct usb_interface *intf, err = ieee80211_register_hw(hw); if (err) { pr_err("Can't register mac80211 hw.\n"); - err = -ENODEV; goto error_out; } rtlpriv->mac80211.mac80211_registered = 1; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index fdccfd29fd61..877ed6a1589f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2450,7 +2450,6 @@ struct rtl_locks { spinlock_t waitq_lock; spinlock_t entry_list_lock; spinlock_t usb_lock; - spinlock_t c2hcmd_lock; spinlock_t scan_list_lock; /* lock for the scan list */ /*FW clock change */ @@ -3086,14 +3085,9 @@ static inline __le16 rtl_get_fc(struct sk_buff *skb) return rtl_get_hdr(skb)->frame_control; } -static inline u16 rtl_get_tid_h(struct ieee80211_hdr *hdr) -{ - return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK; -} - static inline u16 rtl_get_tid(struct sk_buff *skb) { - return rtl_get_tid_h(rtl_get_hdr(skb)); + return ieee80211_get_tid(rtl_get_hdr(skb)); } static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index ea2be1e25065..cedbf3825848 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -787,7 +787,6 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm; - struct rtw_efuse *efuse = &rtwdev->efuse; u8 link = 0; u8 center_chan = 0; u8 bw; @@ -798,7 +797,7 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type) if (type != COEX_MEDIA_DISCONNECT) center_chan = rtwdev->hal.current_channel; - if (center_chan == 0 || (efuse->share_ant && center_chan <= 14)) { + if (center_chan == 0) { link = 0; center_chan = 0; bw = 0; @@ -2325,8 +2324,11 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev) if (efuse->share_ant) { /* Shared-Ant */ if (coex_stat->bt_a2dp_exist) { slot_type = TDMA_4SLOT; - table_case = 9; tdma_case = 11; + if (coex_stat->wl_gl_busy) + table_case = 26; + else + table_case = 9; } else { table_case = 9; tdma_case = 7; @@ -2646,6 +2648,11 @@ void rtw_coex_power_on_setting(struct rtw_dev *rtwdev) rtw_coex_set_gnt_debug(rtwdev); } +void rtw_coex_power_off_setting(struct rtw_dev *rtwdev) +{ + rtw_write16(rtwdev, REG_WIFI_BT_INFO, BIT_BT_INT_EN); +} + void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only) { __rtw_coex_init_hw_config(rtwdev, wifi_only); diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h index 8ab9852ec9ed..fc61a0cab3e4 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.h +++ b/drivers/net/wireless/realtek/rtw88/coex.h @@ -393,6 +393,7 @@ void rtw_coex_bt_multi_link_remain_work(struct work_struct *work); void rtw_coex_wl_ccklock_work(struct work_struct *work); void rtw_coex_power_on_setting(struct rtw_dev *rtwdev); +void rtw_coex_power_off_setting(struct rtw_dev *rtwdev); void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only); void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type); void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type); @@ -405,4 +406,12 @@ void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type); void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type); void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m); +static inline bool rtw_coex_disabled(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + return coex_stat->bt_disabled; +} + #endif diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index 948cb79050ea..5c44fa87ed2e 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -10,6 +10,7 @@ #include "fw.h" #include "debug.h" #include "phy.h" +#include "reg.h" #ifdef CONFIG_RTW88_DEBUGFS @@ -270,7 +271,7 @@ static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp, if (num != 2) { rtw_warn(rtwdev, "invalid arguments\n"); - return num; + return -EINVAL; } debugfs_priv->rsvd_page.page_offset = offset; @@ -818,6 +819,40 @@ static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v) return 0; } +static ssize_t rtw_debugfs_set_fw_crash(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + char tmp[32 + 1]; + bool input; + int ret; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1); + + ret = kstrtobool(tmp, &input); + if (ret) + return -EINVAL; + + if (!input) + return -EINVAL; + + rtw_write8(rtwdev, REG_HRCV_MSG, 1); + + return count; +} + +static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + + seq_printf(m, "%d\n", test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)); + return 0; +} + #define rtw_debug_impl_mac(page, addr) \ static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \ .cb_read = rtw_debug_get_mac_page, \ @@ -921,6 +956,11 @@ static struct rtw_debugfs_priv rtw_debug_priv_coex_info = { .cb_read = rtw_debugfs_get_coex_info, }; +static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = { + .cb_write = rtw_debugfs_set_fw_crash, + .cb_read = rtw_debugfs_get_fw_crash, +}; + #define rtw_debugfs_add_core(name, mode, fopname, parent) \ do { \ rtw_debug_priv_ ##name.rtwdev = rtwdev; \ @@ -994,6 +1034,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev) } rtw_debugfs_add_r(rf_dump); rtw_debugfs_add_r(tx_pwr_tbl); + rtw_debugfs_add_rw(fw_crash); } #endif /* CONFIG_RTW88_DEBUGFS */ diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h index e16e0da26e77..c8efd1900a34 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.h +++ b/drivers/net/wireless/realtek/rtw88/debug.h @@ -19,6 +19,7 @@ enum rtw_debug_mask { RTW_DBG_PS = 0x00000400, RTW_DBG_BF = 0x00000800, RTW_DBG_WOW = 0x00001000, + RTW_DBG_CFO = 0x00002000, RTW_DBG_ALL = 0xffffffff }; diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 6649b84f6b1e..c46b34fbe8bf 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -500,6 +500,21 @@ void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect) rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } +void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev) +{ + struct rtw_traffic_stats *stats = &rtwdev->stats; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_PHY_INFO); + SET_WL_PHY_INFO_TX_TP(h2c_pkt, stats->tx_throughput); + SET_WL_PHY_INFO_RX_TP(h2c_pkt, stats->rx_throughput); + SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, dm_info->tx_rate); + SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, dm_info->curr_rx_rate); + SET_WL_PHY_INFO_RX_EVM(h2c_pkt, dm_info->rx_evm_dbm[RF_PATH_A]); + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev) { struct rtw_lps_conf *conf = &rtwdev->lps_conf; diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 39c905c1b1d8..5c89a54475dd 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -345,6 +345,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) #define H2C_CMD_LPS_PG_INFO 0x2b #define H2C_CMD_RA_INFO 0x40 #define H2C_CMD_RSSI_MONITOR 0x42 +#define H2C_CMD_WL_PHY_INFO 0x58 #define H2C_CMD_COEX_TDMA_TYPE 0x60 #define H2C_CMD_QUERY_BT_INFO 0x61 @@ -369,6 +370,17 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) #define MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_WL_PHY_INFO_TX_TP(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(17, 8)) +#define SET_WL_PHY_INFO_RX_TP(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(27, 18)) +#define SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) +#define SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8)) +#define SET_WL_PHY_INFO_RX_EVM(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16)) + #define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8)) #define SET_PWR_MODE_SET_RLBM(h2c_pkt, value) \ @@ -559,6 +571,7 @@ void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data); void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn); +void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev); int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, u8 *buf, u32 size); void rtw_remove_rsvd_page(struct rtw_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h index 2cba327e6218..4c6fc6fb3f83 100644 --- a/drivers/net/wireless/realtek/rtw88/hci.h +++ b/drivers/net/wireless/realtek/rtw88/hci.h @@ -11,6 +11,7 @@ struct rtw_hci_ops { struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb); void (*tx_kick_off)(struct rtw_dev *rtwdev); + void (*flush_queues)(struct rtw_dev *rtwdev, u32 queues, bool drop); int (*setup)(struct rtw_dev *rtwdev); int (*start)(struct rtw_dev *rtwdev); void (*stop)(struct rtw_dev *rtwdev); @@ -258,4 +259,19 @@ static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev) return rtwdev->hci.type; } +static inline void rtw_hci_flush_queues(struct rtw_dev *rtwdev, u32 queues, + bool drop) +{ + if (rtwdev->hci.ops->flush_queues) + rtwdev->hci.ops->flush_queues(rtwdev, queues, drop); +} + +static inline void rtw_hci_flush_all_queues(struct rtw_dev *rtwdev, bool drop) +{ + if (rtwdev->hci.ops->flush_queues) + rtwdev->hci.ops->flush_queues(rtwdev, + BIT(rtwdev->hw->queues) - 1, + drop); +} + #endif diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index 59028b121b00..d1678aed9d9c 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -530,6 +530,25 @@ static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst, return 0; } +int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size) +{ + u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE; + + if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) { + rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n"); + return -EBUSY; + } + + ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN; + + if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) { + rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n"); + return -EBUSY; + } + + return 0; +} + static bool check_fw_checksum(struct rtw_dev *rtwdev, u32 addr) { diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h index ce64cdf7a565..3172aa5ac4de 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.h +++ b/drivers/net/wireless/realtek/rtw88/mac.h @@ -15,7 +15,10 @@ #define ILLEGAL_KEY_GROUP 0xFAAAAA00 /* HW memory address */ +#define OCPBASE_RXBUF_FW_88XX 0x18680000 #define OCPBASE_TXBUF_88XX 0x18780000 +#define OCPBASE_ROM_88XX 0x00000000 +#define OCPBASE_IMEM_88XX 0x00030000 #define OCPBASE_DMEM_88XX 0x00200000 #define OCPBASE_EMEM_88XX 0x00100000 @@ -33,6 +36,7 @@ void rtw_mac_power_off(struct rtw_dev *rtwdev); int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw); int rtw_mac_init(struct rtw_dev *rtwdev); void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop); +int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size); static inline void rtw_mac_flush_all_queues(struct rtw_dev *rtwdev, bool drop) { diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 2351dfb0d2e2..333df6b38113 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -520,6 +520,7 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, hw_key_type, hw_key_idx); break; case DISABLE_KEY: + rtw_hci_flush_all_queues(rtwdev, false); rtw_mac_flush_all_queues(rtwdev, false); rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx); break; @@ -670,6 +671,7 @@ static void rtw_ops_flush(struct ieee80211_hw *hw, mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); + rtw_hci_flush_queues(rtwdev, queues, drop); rtw_mac_flush_queues(rtwdev, queues, drop); mutex_unlock(&rtwdev->mutex); } diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index e6989c0525cc..f3a3a86fa9b5 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -345,15 +345,9 @@ static bool rtw_fw_dump_crash_log(struct rtw_dev *rtwdev) "fw crash dump's seq is wrong: %d\n", seq); goto free_buf; } - if (seq == 0 && - (GET_FW_DUMP_TLV_TYPE(buf) != FW_CD_TYPE || - GET_FW_DUMP_TLV_LEN(buf) != FW_CD_LEN || - GET_FW_DUMP_TLV_VAL(buf) != FW_CD_VAL)) { - rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's tlv is wrong\n"); - goto free_buf; - } - print_hex_dump_bytes("rtw88 fw dump: ", DUMP_PREFIX_OFFSET, buf, size); + print_hex_dump(KERN_ERR, "rtw88 fw dump: ", DUMP_PREFIX_OFFSET, 16, 1, + buf, size, true); if (GET_FW_DUMP_MORE(buf) == 1) { rtwdev->fw.prev_dump_seq = seq; @@ -368,6 +362,78 @@ exit: return ret; } +int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size, + const char *prefix_str) +{ + u32 rxff = rtwdev->chip->fw_rxff_size; + u32 dump_size, done_size = 0; + u8 *buf; + int ret; + + buf = vzalloc(size); + if (!buf) + return -ENOMEM; + + while (size) { + dump_size = size > rxff ? rxff : size; + + ret = rtw_ddma_to_fw_fifo(rtwdev, ocp_src + done_size, + dump_size); + if (ret) { + rtw_err(rtwdev, + "ddma fw 0x%x [+0x%x] to fw fifo fail\n", + ocp_src, done_size); + goto exit; + } + + ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, + dump_size, (u32 *)(buf + done_size)); + if (ret) { + rtw_err(rtwdev, + "dump fw 0x%x [+0x%x] from fw fifo fail\n", + ocp_src, done_size); + goto exit; + } + + size -= dump_size; + done_size += dump_size; + } + + print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 1, + buf, done_size, true); + +exit: + vfree(buf); + return ret; +} +EXPORT_SYMBOL(rtw_dump_fw); + +int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size, + const char *prefix_str) +{ + u8 *buf; + u32 i; + + if (addr & 0x3) { + WARN(1, "should be 4-byte aligned, addr = 0x%08x\n", addr); + return -EINVAL; + } + + buf = vzalloc(size); + if (!buf) + return -ENOMEM; + + for (i = 0; i < size; i += 4) + *(u32 *)(buf + i) = rtw_read32(rtwdev, addr + i); + + print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, + size, true); + + vfree(buf); + return 0; +} +EXPORT_SYMBOL(rtw_dump_reg); + void rtw_vif_assoc_changed(struct rtw_vif *rtwvif, struct ieee80211_bss_conf *conf) { @@ -419,10 +485,8 @@ void rtw_fw_recovery(struct rtw_dev *rtwdev) ieee80211_queue_work(rtwdev->hw, &rtwdev->fw_recovery_work); } -static void rtw_fw_recovery_work(struct work_struct *work) +static void __fw_recovery_work(struct rtw_dev *rtwdev) { - struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, - fw_recovery_work); /* rtw_fw_dump_crash_log() returns false indicates that there are * still more log to dump. Driver set 0x1cf[7:0] = 0x1 to tell firmware @@ -435,18 +499,26 @@ static void rtw_fw_recovery_work(struct work_struct *work) } rtwdev->fw.prev_dump_seq = 0; - WARN(1, "firmware crash, start reset and recover\n"); + set_bit(RTW_FLAG_RESTARTING, rtwdev->flags); + rtw_chip_dump_fw_crash(rtwdev); - mutex_lock(&rtwdev->mutex); + WARN(1, "firmware crash, start reset and recover\n"); - set_bit(RTW_FLAG_RESTARTING, rtwdev->flags); rcu_read_lock(); rtw_iterate_keys_rcu(rtwdev, NULL, rtw_reset_key_iter, rtwdev); rcu_read_unlock(); rtw_iterate_stas_atomic(rtwdev, rtw_reset_sta_iter, rtwdev); rtw_iterate_vifs_atomic(rtwdev, rtw_reset_vif_iter, rtwdev); rtw_enter_ips(rtwdev); +} +static void rtw_fw_recovery_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + fw_recovery_work); + + mutex_lock(&rtwdev->mutex); + __fw_recovery_work(rtwdev); mutex_unlock(&rtwdev->mutex); ieee80211_restart_hw(rtwdev->hw); @@ -1138,6 +1210,7 @@ int rtw_core_start(struct rtw_dev *rtwdev) static void rtw_power_off(struct rtw_dev *rtwdev) { rtw_hci_stop(rtwdev); + rtw_coex_power_off_setting(rtwdev); rtw_mac_power_off(rtwdev); } @@ -1393,7 +1466,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; struct rtw_hal *hal = &rtwdev->hal; struct rtw_efuse *efuse = &rtwdev->efuse; - int ret = 0; switch (rtw_hci_type(rtwdev)) { case RTW_HCI_TYPE_PCIE: @@ -1431,7 +1503,7 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev) hal->bfee_sts_cap = 3; - return ret; + return 0; } static int rtw_chip_efuse_enable(struct rtw_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 35afea91fd29..56a19b5a00fc 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -625,6 +625,7 @@ struct rtw_rx_pkt_stat { struct rtw_sta_info *si; struct ieee80211_vif *vif; + struct ieee80211_hdr *hdr; }; DECLARE_EWMA(tp, 10, 2); @@ -805,6 +806,7 @@ struct rtw_regulatory { struct rtw_chip_ops { int (*mac_init)(struct rtw_dev *rtwdev); + void (*dump_fw_crash)(struct rtw_dev *rtwdev); void (*shutdown)(struct rtw_dev *rtwdev); int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map); void (*phy_set_param)(struct rtw_dev *rtwdev); @@ -837,6 +839,8 @@ struct rtw_chip_ops { struct ieee80211_bss_conf *conf); void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate, u8 fixrate_en, u8 *new_rate); + void (*cfo_init)(struct rtw_dev *rtwdev); + void (*cfo_track)(struct rtw_dev *rtwdev); /* for coex */ void (*coex_set_init)(struct rtw_dev *rtwdev); @@ -1166,6 +1170,7 @@ struct rtw_chip_info { bool en_dis_dpd; u16 dpd_ratemask; u8 iqk_threshold; + u8 lck_threshold; const struct rtw_pwr_track_tbl *pwr_track_tbl; u8 bfer_su_max_num; @@ -1497,6 +1502,15 @@ struct rtw_iqk_info { } result; }; +struct rtw_cfo_track { + bool is_adjust; + u8 crystal_cap; + s32 cfo_tail[RTW_RF_PATH_MAX]; + s32 cfo_cnt[RTW_RF_PATH_MAX]; + u32 packet_count; + u32 packet_count_pre; +}; + #define RRSR_INIT_2G 0x15f #define RRSR_INIT_5G 0x150 @@ -1534,6 +1548,7 @@ struct rtw_dm_info { u32 rrsr_mask_min; u8 thermal_avg[RTW_RF_PATH_MAX]; u8 thermal_meter_k; + u8 thermal_meter_lck; s8 delta_power_index[RTW_RF_PATH_MAX]; s8 delta_power_index_last[RTW_RF_PATH_MAX]; u8 default_ofdm_index; @@ -1549,6 +1564,7 @@ struct rtw_dm_info { u8 dack_dck[RTW_RF_PATH_MAX][2][DACK_DCK_BACKUP_NUM]; struct rtw_dpk_info dpk_info; + struct rtw_cfo_track cfo_track; /* [bandwidth 0:20M/1:40M][number of path] */ u8 cck_pd_lv[2][RTW_RF_PATH_MAX]; @@ -1876,6 +1892,12 @@ static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id) clear_bit(mac_id, rtwdev->mac_id_map); } +static inline void rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev) +{ + if (rtwdev->chip->ops->dump_fw_crash) + rtwdev->chip->ops->dump_fw_crash(rtwdev); +} + void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *ch_param); bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target); @@ -1905,5 +1927,9 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, bool fw_exist); void rtw_fw_recovery(struct rtw_dev *rtwdev); +int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size, + const char *prefix_str); +int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size, + const char *prefix_str); #endif diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 786a48649946..b8115b31839e 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -671,6 +671,8 @@ static u8 ac_to_hwq[] = { [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK, }; +static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS); + static u8 rtw_hw_queue_mapping(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -727,6 +729,72 @@ static void rtw_pci_dma_check(struct rtw_dev *rtwdev, rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; } +static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q) +{ + u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q]; + u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2); + + return FIELD_GET(TRX_BD_IDX_MASK, bd_idx); +} + +static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q]; + u32 cur_rp; + u8 i; + + /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a + * bit dynamic, it's hard to define a reasonable fixed total timeout to + * use read_poll_timeout* helper. Instead, we can ensure a reasonable + * polling times, so we just use for loop with udelay here. + */ + for (i = 0; i < 30; i++) { + cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q); + if (cur_rp == ring->r.wp) + return; + + udelay(1); + } + + if (!drop) + rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q); +} + +static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues, + bool drop) +{ + u8 q; + + for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) { + /* It may be not necessary to flush BCN and H2C tx queues. */ + if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C) + continue; + + if (pci_queues & BIT(q)) + __pci_flush_queue(rtwdev, q, drop); + } +} + +static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) +{ + u32 pci_queues = 0; + u8 i; + + /* If all of the hardware queues are requested to flush, + * flush all of the pci queues. + */ + if (queues == BIT(rtwdev->hw->queues) - 1) { + pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1; + } else { + for (i = 0; i < rtwdev->hw->queues; i++) + if (queues & BIT(i)) + pci_queues |= BIT(ac_to_hwq[i]); + } + + __rtw_pci_flush_queues(rtwdev, pci_queues, drop); +} + static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; @@ -1490,6 +1558,7 @@ static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) static struct rtw_hci_ops rtw_pci_ops = { .tx_write = rtw_pci_tx_write, .tx_kick_off = rtw_pci_tx_kick_off, + .flush_queues = rtw_pci_flush_queues, .setup = rtw_pci_setup, .start = rtw_pci_start, .stop = rtw_pci_stop, diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index e114ddecac09..8146acaf1893 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -119,6 +119,14 @@ static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev) dm_info->cck_fa_avg = CCK_FA_AVG_RESET; } +static void rtw_phy_cfo_init(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (chip->ops->cfo_init) + chip->ops->cfo_init(rtwdev); +} + void rtw_phy_init(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; @@ -140,6 +148,7 @@ void rtw_phy_init(struct rtw_dev *rtwdev) rtw_phy_cck_pd_init(rtwdev); dm_info->iqk.done = false; + rtw_phy_cfo_init(rtwdev); } EXPORT_SYMBOL(rtw_phy_init); @@ -316,7 +325,8 @@ rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info) return damping; } -static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info, +static void rtw_phy_dig_get_boundary(struct rtw_dev *rtwdev, + struct rtw_dm_info *dm_info, u8 *upper, u8 *lower, bool linked) { u8 dig_max, dig_min, dig_mid; @@ -325,8 +335,7 @@ static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info, if (linked) { dig_max = DIG_PERF_MAX; dig_mid = DIG_PERF_MID; - /* 22B=0x1c, 22C=0x20 */ - dig_min = 0x1c; + dig_min = rtwdev->chip->dig_min; min_rssi = max_t(u8, dm_info->min_rssi, dig_min); } else { dig_max = DIG_CVRG_MAX; @@ -437,7 +446,8 @@ static void rtw_phy_dig(struct rtw_dev *rtwdev) * the peers connected with us, meanwhile make sure the igi value does * not beyond the hardware limitation */ - rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked); + rtw_phy_dig_get_boundary(rtwdev, dm_info, &upper_bound, &lower_bound, + linked); cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound); /* record current igi value and false alarm statistics for further @@ -527,6 +537,62 @@ static void rtw_phy_dpk_track(struct rtw_dev *rtwdev) chip->ops->dpk_track(rtwdev); } +struct rtw_rx_addr_match_data { + struct rtw_dev *rtwdev; + struct ieee80211_hdr *hdr; + struct rtw_rx_pkt_stat *pkt_stat; + u8 *bssid; +}; + +static void rtw_phy_parsing_cfo_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rtw_rx_addr_match_data *iter_data = data; + struct rtw_dev *rtwdev = iter_data->rtwdev; + struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + u8 *bssid = iter_data->bssid; + u8 i; + + if (!ether_addr_equal(vif->bss_conf.bssid, bssid)) + return; + + for (i = 0; i < rtwdev->hal.rf_path_num; i++) { + cfo->cfo_tail[i] += pkt_stat->cfo_tail[i]; + cfo->cfo_cnt[i]++; + } + + cfo->packet_count++; +} + +void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev, + struct rtw_rx_pkt_stat *pkt_stat) +{ + struct ieee80211_hdr *hdr = pkt_stat->hdr; + struct rtw_rx_addr_match_data data = {}; + + if (pkt_stat->crc_err || pkt_stat->icv_err || !pkt_stat->phy_status || + ieee80211_is_ctl(hdr->frame_control)) + return; + + data.rtwdev = rtwdev; + data.hdr = hdr; + data.pkt_stat = pkt_stat; + data.bssid = get_hdr_bssid(hdr); + + rtw_iterate_vifs_atomic(rtwdev, rtw_phy_parsing_cfo_iter, &data); +} +EXPORT_SYMBOL(rtw_phy_parsing_cfo); + +static void rtw_phy_cfo_track(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (chip->ops->cfo_track) + chip->ops->cfo_track(rtwdev); +} + #define CCK_PD_FA_LV1_MIN 1000 #define CCK_PD_FA_LV0_MAX 500 @@ -617,6 +683,7 @@ static void rtw_phy_pwr_track(struct rtw_dev *rtwdev) static void rtw_phy_ra_track(struct rtw_dev *rtwdev) { + rtw_fw_update_wl_phy_info(rtwdev); rtw_phy_ra_info_update(rtwdev); rtw_phy_rrsr_update(rtwdev); } @@ -628,6 +695,7 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) rtw_phy_dig(rtwdev); rtw_phy_cck_pd(rtwdev); rtw_phy_ra_track(rtwdev); + rtw_phy_cfo_track(rtwdev); rtw_phy_dpk_track(rtwdev); rtw_phy_pwr_track(rtwdev); } @@ -1584,7 +1652,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev) } EXPORT_SYMBOL(rtw_phy_load_tables); -static u8 rtw_get_channel_group(u8 channel) +static u8 rtw_get_channel_group(u8 channel, u8 rate) { switch (channel) { default: @@ -1628,6 +1696,7 @@ static u8 rtw_get_channel_group(u8 channel) case 106: return 4; case 14: + return rate <= DESC_RATE11M ? 5 : 4; case 108: case 110: case 112: @@ -1879,7 +1948,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, s8 *remnant = &pwr_param->pwr_remnant; pwr_idx = &rtwdev->efuse.txpwr_idx_table[path]; - group = rtw_get_channel_group(ch); + group = rtw_get_channel_group(ch, rate); /* base power index for 2.4G/5G */ if (IS_CH_2G_BAND(ch)) { @@ -2219,6 +2288,20 @@ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev, } EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx); +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 delta_lck; + + delta_lck = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_lck); + if (delta_lck >= rtwdev->chip->lck_threshold) { + dm_info->thermal_meter_lck = dm_info->thermal_avg[0]; + return true; + } + return false; +} +EXPORT_SYMBOL(rtw_phy_pwrtrack_need_lck); + bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h index a4fcfb878550..0b6f2fc8193c 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.h +++ b/drivers/net/wireless/realtek/rtw88/phy.h @@ -55,9 +55,12 @@ u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path); s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev, struct rtw_swing_table *swing_table, u8 tbl_path, u8 therm_path, u8 delta); +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev); bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev); void rtw_phy_config_swing_table(struct rtw_dev *rtwdev, struct rtw_swing_table *swing_table); +void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev, + struct rtw_rx_pkt_stat *pkt_stat); struct rtw_txpwr_lmt_cfg_pair { u8 regd; diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index ea518aa78552..95d29f522e27 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -516,6 +516,7 @@ #define BIT_RFE_BUF_EN BIT(3) #define REG_ANAPAR_XTAL_0 0x1040 +#define BIT_XCAP_0 GENMASK(23, 10) #define REG_CPU_DMEM_CON 0x1080 #define BIT_WL_PLATFORM_RST BIT(16) #define BIT_WL_SECURITY_CLK BIT(15) @@ -534,6 +535,7 @@ #define BIT_DDMACH0_OWN BIT(31) #define BIT_DDMACH0_CHKSUM_EN BIT(29) #define BIT_DDMACH0_CHKSUM_STS BIT(27) +#define BIT_DDMACH0_DDMA_MODE BIT(26) #define BIT_DDMACH0_RESET_CHKSUM_STS BIT(25) #define BIT_DDMACH0_CHKSUM_CONT BIT(24) #define BIT_MASK_DDMACH0_DLEN 0x3ffff @@ -652,8 +654,13 @@ #define RF_TXATANK 0x64 #define RF_TRXIQ 0x66 #define RF_RXIQGEN 0x8d +#define RF_SYN_PFD 0xb0 #define RF_XTALX2 0xb8 +#define RF_SYN_CTRL 0xbb #define RF_MALSEL 0xbe +#define RF_SYN_AAC 0xc9 +#define RF_AAC_CTRL 0xca +#define RF_FAST_LCK 0xcc #define RF_RCKD 0xde #define RF_TXADBG 0xde #define RF_LUTDBG 0xdf diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index dd560c28abb2..9f05c60c8a03 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -17,6 +17,7 @@ #include "util.h" #include "bf.h" #include "efuse.h" +#include "coex.h" #define IQK_DONE_8822C 0xaa @@ -39,7 +40,7 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) efuse->rfe_option = map->rfe_option; efuse->rf_board_option = map->rf_board_option; - efuse->crystal_cap = map->xtal_k; + efuse->crystal_cap = map->xtal_k & XCAP_MASK; efuse->channel_plan = map->channel_plan; efuse->country_code[0] = map->country_code[0]; efuse->country_code[1] = map->country_code[1]; @@ -1126,6 +1127,7 @@ static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev) dm_info->pwr_trk_triggered = false; dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k; + dm_info->thermal_meter_lck = rtwdev->efuse.thermal_meter_k; } static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev) @@ -1396,6 +1398,15 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev) return 0; } +static void rtw8822c_dump_fw_crash(struct rtw_dev *rtwdev) +{ + rtw_dump_reg(rtwdev, 0x0, 0x2000, "rtw8822c reg_"); + rtw_dump_fw(rtwdev, OCPBASE_DMEM_88XX, 0x10000, "rtw8822c DMEM_"); + rtw_dump_fw(rtwdev, OCPBASE_IMEM_88XX, 0x10000, "rtw8822c IMEM_"); + rtw_dump_fw(rtwdev, OCPBASE_EMEM_88XX, 0x20000, "rtw8822c EMEM_"); + rtw_dump_fw(rtwdev, OCPBASE_ROM_88XX, 0x10000, "rtw8822c ROM_"); +} + static void rtw8822c_rstb_3wire(struct rtw_dev *rtwdev, bool enable) { if (enable) { @@ -1856,6 +1867,7 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status, } dm_info->rx_evm_dbm[path] = evm_dbm; } + rtw_phy_parsing_cfo(rtwdev, pkt_stat); } static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status, @@ -1911,6 +1923,7 @@ static void rtw8822c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc, hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift + pkt_stat->drv_info_sz); + pkt_stat->hdr = hdr; if (pkt_stat->phy_status) { phy_status = rx_desc + desc_sz + pkt_stat->shift; query_phy_status(rtwdev, phy_status, pkt_stat); @@ -2108,6 +2121,26 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev) rtw_write32_set(rtwdev, REG_RX_BREAK, BIT_COM_RX_GCK_EN); } +static void rtw8822c_do_lck(struct rtw_dev *rtwdev) +{ + u32 val; + + rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_CTRL, RFREG_MASK, 0x80010); + rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0FA); + fsleep(1); + rtw_write_rf(rtwdev, RF_PATH_A, RF_AAC_CTRL, RFREG_MASK, 0x80000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_AAC, RFREG_MASK, 0x80001); + read_poll_timeout(rtw_read_rf, val, val != 0x1, 1000, 100000, + true, rtwdev, RF_PATH_A, RF_AAC_CTRL, 0x1000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0F8); + rtw_write_rf(rtwdev, RF_PATH_B, RF_SYN_CTRL, RFREG_MASK, 0x80010); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x4f000); + fsleep(1); + rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000); +} + static void rtw8822c_do_iqk(struct rtw_dev *rtwdev) { struct rtw_iqk_para para = {0}; @@ -3406,6 +3439,128 @@ static void rtw8822c_dpk_track(struct rtw_dev *rtwdev) } } +#define XCAP_EXTEND(val) ({typeof(val) _v = (val); _v | _v << 7; }) +static void rtw8822c_set_crystal_cap_reg(struct rtw_dev *rtwdev, u8 crystal_cap) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + u32 val = 0; + + val = XCAP_EXTEND(crystal_cap); + cfo->crystal_cap = crystal_cap; + rtw_write32_mask(rtwdev, REG_ANAPAR_XTAL_0, BIT_XCAP_0, val); +} + +static void rtw8822c_set_crystal_cap(struct rtw_dev *rtwdev, u8 crystal_cap) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + + if (cfo->crystal_cap == crystal_cap) + return; + + rtw8822c_set_crystal_cap_reg(rtwdev, crystal_cap); +} + +static void rtw8822c_cfo_tracking_reset(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + + cfo->is_adjust = true; + + if (cfo->crystal_cap > rtwdev->efuse.crystal_cap) + rtw8822c_set_crystal_cap(rtwdev, cfo->crystal_cap - 1); + else if (cfo->crystal_cap < rtwdev->efuse.crystal_cap) + rtw8822c_set_crystal_cap(rtwdev, cfo->crystal_cap + 1); +} + +static void rtw8822c_cfo_init(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + + cfo->crystal_cap = rtwdev->efuse.crystal_cap; + cfo->is_adjust = true; +} + +#define REPORT_TO_KHZ(val) ({typeof(val) _v = (val); (_v << 1) + (_v >> 1); }) +static s32 rtw8822c_cfo_calc_avg(struct rtw_dev *rtwdev, u8 path_num) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + s32 cfo_avg, cfo_path_sum = 0, cfo_rpt_sum; + u8 i; + + for (i = 0; i < path_num; i++) { + cfo_rpt_sum = REPORT_TO_KHZ(cfo->cfo_tail[i]); + + if (cfo->cfo_cnt[i]) + cfo_avg = cfo_rpt_sum / cfo->cfo_cnt[i]; + else + cfo_avg = 0; + + cfo_path_sum += cfo_avg; + } + + for (i = 0; i < path_num; i++) { + cfo->cfo_tail[i] = 0; + cfo->cfo_cnt[i] = 0; + } + + return cfo_path_sum / path_num; +} + +static void rtw8822c_cfo_need_adjust(struct rtw_dev *rtwdev, s32 cfo_avg) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + + if (!cfo->is_adjust) { + if (abs(cfo_avg) > CFO_TRK_ENABLE_TH) + cfo->is_adjust = true; + } else { + if (abs(cfo_avg) <= CFO_TRK_STOP_TH) + cfo->is_adjust = false; + } + + if (!rtw_coex_disabled(rtwdev)) { + cfo->is_adjust = false; + rtw8822c_set_crystal_cap(rtwdev, rtwdev->efuse.crystal_cap); + } +} + +static void rtw8822c_cfo_track(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_cfo_track *cfo = &dm_info->cfo_track; + u8 path_num = rtwdev->hal.rf_path_num; + s8 crystal_cap = cfo->crystal_cap; + s32 cfo_avg = 0; + + if (rtwdev->sta_cnt != 1) { + rtw8822c_cfo_tracking_reset(rtwdev); + return; + } + + if (cfo->packet_count == cfo->packet_count_pre) + return; + + cfo->packet_count_pre = cfo->packet_count; + cfo_avg = rtw8822c_cfo_calc_avg(rtwdev, path_num); + rtw8822c_cfo_need_adjust(rtwdev, cfo_avg); + + if (cfo->is_adjust) { + if (cfo_avg > CFO_TRK_ADJ_TH) + crystal_cap++; + else if (cfo_avg < -CFO_TRK_ADJ_TH) + crystal_cap--; + + crystal_cap = clamp_t(s8, crystal_cap, 0, XCAP_MASK); + rtw8822c_set_crystal_cap(rtwdev, (u8)crystal_cap); + } +} + static const struct rtw_phy_cck_pd_reg rtw8822c_cck_pd_reg[RTW_CHANNEL_WIDTH_40 + 1][RTW_RF_PATH_MAX] = { { @@ -3538,11 +3693,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev) rtw_phy_config_swing_table(rtwdev, &swing_table); + if (rtw_phy_pwrtrack_need_lck(rtwdev)) + rtw8822c_do_lck(rtwdev); + for (i = 0; i < rtwdev->hal.rf_path_num; i++) rtw8822c_pwr_track_path(rtwdev, &swing_table, i); - if (rtw_phy_pwrtrack_need_iqk(rtwdev)) - rtw8822c_do_iqk(rtwdev); } static void rtw8822c_pwr_track(struct rtw_dev *rtwdev) @@ -3971,6 +4127,7 @@ static struct rtw_chip_ops rtw8822c_ops = { .query_rx_desc = rtw8822c_query_rx_desc, .set_channel = rtw8822c_set_channel, .mac_init = rtw8822c_mac_init, + .dump_fw_crash = rtw8822c_dump_fw_crash, .read_rf = rtw_phy_read_rf, .write_rf = rtw_phy_write_rf_reg_mix, .set_tx_power_index = rtw8822c_set_tx_power_index, @@ -3984,6 +4141,8 @@ static struct rtw_chip_ops rtw8822c_ops = { .config_bfee = rtw8822c_bf_config_bfee, .set_gid_table = rtw_bf_set_gid_table, .cfg_csi_rate = rtw_bf_cfg_csi_rate, + .cfo_init = rtw8822c_cfo_init, + .cfo_track = rtw8822c_cfo_track, .coex_set_init = rtw8822c_coex_cfg_init, .coex_set_ant_switch = NULL, @@ -4351,6 +4510,7 @@ struct rtw_chip_info rtw8822c_hw_spec = { .dpd_ratemask = DIS_DPD_RATEALL, .pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl, .iqk_threshold = 8, + .lck_threshold = 8, .bfer_su_max_num = 2, .bfer_mu_max_num = 1, .rx_ldpc = true, @@ -4360,7 +4520,7 @@ struct rtw_chip_info rtw8822c_hw_spec = { .wowlan_stub = &rtw_wowlan_stub_8822c, .max_sched_scan_ssids = 4, #endif - .coex_para_ver = 0x201029, + .coex_para_ver = 0x2103181c, .bt_desired_ver = 0x1c, .scbd_support = true, .new_scbd10_def = true, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h index bb2495b8609e..e2b134ce0b3f 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h @@ -165,6 +165,11 @@ const struct rtw_table name ## _tbl = { \ #define REG_ANAPARLDO_POW_MAC 0x0029 #define BIT_LDOE25_PON BIT(0) +#define XCAP_MASK GENMASK(6, 0) +#define CFO_TRK_ENABLE_TH 20 +#define CFO_TRK_STOP_TH 10 +#define CFO_TRK_ADJ_TH 10 + #define REG_TXDFIR0 0x808 #define REG_DFIRBW 0x810 #define REG_ANTMAP0 0x820 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c index ad5715c65de3..822f3da91f1b 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c @@ -40863,7 +40863,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 0, 1, 144, 76, }, { 9, 1, 0, 1, 144, 127, }, { 0, 1, 0, 1, 149, 76, }, - { 2, 1, 0, 1, 149, -128, }, + { 2, 1, 0, 1, 149, 54, }, { 1, 1, 0, 1, 149, 127, }, { 3, 1, 0, 1, 149, 76, }, { 4, 1, 0, 1, 149, 74, }, @@ -40871,9 +40871,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 149, 76, }, { 7, 1, 0, 1, 149, 54, }, { 8, 1, 0, 1, 149, 76, }, - { 9, 1, 0, 1, 149, -128, }, + { 9, 1, 0, 1, 149, 54, }, { 0, 1, 0, 1, 153, 76, }, - { 2, 1, 0, 1, 153, -128, }, + { 2, 1, 0, 1, 153, 54, }, { 1, 1, 0, 1, 153, 127, }, { 3, 1, 0, 1, 153, 76, }, { 4, 1, 0, 1, 153, 74, }, @@ -40881,9 +40881,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 153, 76, }, { 7, 1, 0, 1, 153, 54, }, { 8, 1, 0, 1, 153, 76, }, - { 9, 1, 0, 1, 153, -128, }, + { 9, 1, 0, 1, 153, 54, }, { 0, 1, 0, 1, 157, 76, }, - { 2, 1, 0, 1, 157, -128, }, + { 2, 1, 0, 1, 157, 54, }, { 1, 1, 0, 1, 157, 127, }, { 3, 1, 0, 1, 157, 76, }, { 4, 1, 0, 1, 157, 74, }, @@ -40891,9 +40891,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 157, 76, }, { 7, 1, 0, 1, 157, 54, }, { 8, 1, 0, 1, 157, 76, }, - { 9, 1, 0, 1, 157, -128, }, + { 9, 1, 0, 1, 157, 54, }, { 0, 1, 0, 1, 161, 76, }, - { 2, 1, 0, 1, 161, -128, }, + { 2, 1, 0, 1, 161, 54, }, { 1, 1, 0, 1, 161, 127, }, { 3, 1, 0, 1, 161, 76, }, { 4, 1, 0, 1, 161, 74, }, @@ -40901,9 +40901,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 161, 76, }, { 7, 1, 0, 1, 161, 54, }, { 8, 1, 0, 1, 161, 76, }, - { 9, 1, 0, 1, 161, -128, }, + { 9, 1, 0, 1, 161, 54, }, { 0, 1, 0, 1, 165, 76, }, - { 2, 1, 0, 1, 165, -128, }, + { 2, 1, 0, 1, 165, 54, }, { 1, 1, 0, 1, 165, 127, }, { 3, 1, 0, 1, 165, 76, }, { 4, 1, 0, 1, 165, 74, }, @@ -40911,7 +40911,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 165, 76, }, { 7, 1, 0, 1, 165, 54, }, { 8, 1, 0, 1, 165, 76, }, - { 9, 1, 0, 1, 165, -128, }, + { 9, 1, 0, 1, 165, 54, }, { 0, 1, 0, 2, 36, 72, }, { 2, 1, 0, 2, 36, 62, }, { 1, 1, 0, 2, 36, 62, }, @@ -41113,7 +41113,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 0, 2, 144, 76, }, { 9, 1, 0, 2, 144, 127, }, { 0, 1, 0, 2, 149, 76, }, - { 2, 1, 0, 2, 149, -128, }, + { 2, 1, 0, 2, 149, 54, }, { 1, 1, 0, 2, 149, 127, }, { 3, 1, 0, 2, 149, 76, }, { 4, 1, 0, 2, 149, 74, }, @@ -41121,9 +41121,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 149, 76, }, { 7, 1, 0, 2, 149, 54, }, { 8, 1, 0, 2, 149, 76, }, - { 9, 1, 0, 2, 149, -128, }, + { 9, 1, 0, 2, 149, 54, }, { 0, 1, 0, 2, 153, 76, }, - { 2, 1, 0, 2, 153, -128, }, + { 2, 1, 0, 2, 153, 54, }, { 1, 1, 0, 2, 153, 127, }, { 3, 1, 0, 2, 153, 76, }, { 4, 1, 0, 2, 153, 74, }, @@ -41131,9 +41131,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 153, 76, }, { 7, 1, 0, 2, 153, 54, }, { 8, 1, 0, 2, 153, 76, }, - { 9, 1, 0, 2, 153, -128, }, + { 9, 1, 0, 2, 153, 54, }, { 0, 1, 0, 2, 157, 76, }, - { 2, 1, 0, 2, 157, -128, }, + { 2, 1, 0, 2, 157, 54, }, { 1, 1, 0, 2, 157, 127, }, { 3, 1, 0, 2, 157, 76, }, { 4, 1, 0, 2, 157, 74, }, @@ -41141,9 +41141,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 157, 76, }, { 7, 1, 0, 2, 157, 54, }, { 8, 1, 0, 2, 157, 76, }, - { 9, 1, 0, 2, 157, -128, }, + { 9, 1, 0, 2, 157, 54, }, { 0, 1, 0, 2, 161, 76, }, - { 2, 1, 0, 2, 161, -128, }, + { 2, 1, 0, 2, 161, 54, }, { 1, 1, 0, 2, 161, 127, }, { 3, 1, 0, 2, 161, 76, }, { 4, 1, 0, 2, 161, 74, }, @@ -41151,9 +41151,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 161, 76, }, { 7, 1, 0, 2, 161, 54, }, { 8, 1, 0, 2, 161, 76, }, - { 9, 1, 0, 2, 161, -128, }, + { 9, 1, 0, 2, 161, 54, }, { 0, 1, 0, 2, 165, 76, }, - { 2, 1, 0, 2, 165, -128, }, + { 2, 1, 0, 2, 165, 54, }, { 1, 1, 0, 2, 165, 127, }, { 3, 1, 0, 2, 165, 76, }, { 4, 1, 0, 2, 165, 74, }, @@ -41161,7 +41161,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 165, 76, }, { 7, 1, 0, 2, 165, 54, }, { 8, 1, 0, 2, 165, 76, }, - { 9, 1, 0, 2, 165, -128, }, + { 9, 1, 0, 2, 165, 54, }, { 0, 1, 0, 3, 36, 68, }, { 2, 1, 0, 3, 36, 38, }, { 1, 1, 0, 3, 36, 50, }, @@ -41363,7 +41363,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 0, 3, 144, 68, }, { 9, 1, 0, 3, 144, 127, }, { 0, 1, 0, 3, 149, 76, }, - { 2, 1, 0, 3, 149, -128, }, + { 2, 1, 0, 3, 149, 30, }, { 1, 1, 0, 3, 149, 127, }, { 3, 1, 0, 3, 149, 76, }, { 4, 1, 0, 3, 149, 60, }, @@ -41371,9 +41371,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 149, 76, }, { 7, 1, 0, 3, 149, 30, }, { 8, 1, 0, 3, 149, 72, }, - { 9, 1, 0, 3, 149, -128, }, + { 9, 1, 0, 3, 149, 30, }, { 0, 1, 0, 3, 153, 76, }, - { 2, 1, 0, 3, 153, -128, }, + { 2, 1, 0, 3, 153, 30, }, { 1, 1, 0, 3, 153, 127, }, { 3, 1, 0, 3, 153, 76, }, { 4, 1, 0, 3, 153, 60, }, @@ -41381,9 +41381,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 153, 76, }, { 7, 1, 0, 3, 153, 30, }, { 8, 1, 0, 3, 153, 76, }, - { 9, 1, 0, 3, 153, -128, }, + { 9, 1, 0, 3, 153, 30, }, { 0, 1, 0, 3, 157, 76, }, - { 2, 1, 0, 3, 157, -128, }, + { 2, 1, 0, 3, 157, 30, }, { 1, 1, 0, 3, 157, 127, }, { 3, 1, 0, 3, 157, 76, }, { 4, 1, 0, 3, 157, 60, }, @@ -41391,9 +41391,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 157, 76, }, { 7, 1, 0, 3, 157, 30, }, { 8, 1, 0, 3, 157, 76, }, - { 9, 1, 0, 3, 157, -128, }, + { 9, 1, 0, 3, 157, 30, }, { 0, 1, 0, 3, 161, 76, }, - { 2, 1, 0, 3, 161, -128, }, + { 2, 1, 0, 3, 161, 30, }, { 1, 1, 0, 3, 161, 127, }, { 3, 1, 0, 3, 161, 76, }, { 4, 1, 0, 3, 161, 60, }, @@ -41401,9 +41401,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 161, 76, }, { 7, 1, 0, 3, 161, 30, }, { 8, 1, 0, 3, 161, 76, }, - { 9, 1, 0, 3, 161, -128, }, + { 9, 1, 0, 3, 161, 30, }, { 0, 1, 0, 3, 165, 76, }, - { 2, 1, 0, 3, 165, -128, }, + { 2, 1, 0, 3, 165, 30, }, { 1, 1, 0, 3, 165, 127, }, { 3, 1, 0, 3, 165, 76, }, { 4, 1, 0, 3, 165, 60, }, @@ -41411,7 +41411,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 165, 76, }, { 7, 1, 0, 3, 165, 30, }, { 8, 1, 0, 3, 165, 76, }, - { 9, 1, 0, 3, 165, -128, }, + { 9, 1, 0, 3, 165, 30, }, { 0, 1, 1, 2, 38, 66, }, { 2, 1, 1, 2, 38, 64, }, { 1, 1, 1, 2, 38, 62, }, @@ -41513,7 +41513,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 1, 2, 142, 72, }, { 9, 1, 1, 2, 142, 127, }, { 0, 1, 1, 2, 151, 72, }, - { 2, 1, 1, 2, 151, -128, }, + { 2, 1, 1, 2, 151, 54, }, { 1, 1, 1, 2, 151, 127, }, { 3, 1, 1, 2, 151, 72, }, { 4, 1, 1, 2, 151, 72, }, @@ -41521,9 +41521,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 2, 151, 72, }, { 7, 1, 1, 2, 151, 54, }, { 8, 1, 1, 2, 151, 72, }, - { 9, 1, 1, 2, 151, -128, }, + { 9, 1, 1, 2, 151, 54, }, { 0, 1, 1, 2, 159, 72, }, - { 2, 1, 1, 2, 159, -128, }, + { 2, 1, 1, 2, 159, 54, }, { 1, 1, 1, 2, 159, 127, }, { 3, 1, 1, 2, 159, 72, }, { 4, 1, 1, 2, 159, 72, }, @@ -41531,7 +41531,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 2, 159, 72, }, { 7, 1, 1, 2, 159, 54, }, { 8, 1, 1, 2, 159, 72, }, - { 9, 1, 1, 2, 159, -128, }, + { 9, 1, 1, 2, 159, 54, }, { 0, 1, 1, 3, 38, 60, }, { 2, 1, 1, 3, 38, 40, }, { 1, 1, 1, 3, 38, 50, }, @@ -41633,7 +41633,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 1, 3, 142, 68, }, { 9, 1, 1, 3, 142, 127, }, { 0, 1, 1, 3, 151, 72, }, - { 2, 1, 1, 3, 151, -128, }, + { 2, 1, 1, 3, 151, 30, }, { 1, 1, 1, 3, 151, 127, }, { 3, 1, 1, 3, 151, 72, }, { 4, 1, 1, 3, 151, 66, }, @@ -41641,9 +41641,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 3, 151, 72, }, { 7, 1, 1, 3, 151, 30, }, { 8, 1, 1, 3, 151, 68, }, - { 9, 1, 1, 3, 151, -128, }, + { 9, 1, 1, 3, 151, 30, }, { 0, 1, 1, 3, 159, 72, }, - { 2, 1, 1, 3, 159, -128, }, + { 2, 1, 1, 3, 159, 30, }, { 1, 1, 1, 3, 159, 127, }, { 3, 1, 1, 3, 159, 72, }, { 4, 1, 1, 3, 159, 66, }, @@ -41651,7 +41651,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 3, 159, 72, }, { 7, 1, 1, 3, 159, 30, }, { 8, 1, 1, 3, 159, 72, }, - { 9, 1, 1, 3, 159, -128, }, + { 9, 1, 1, 3, 159, 30, }, { 0, 1, 2, 4, 42, 64, }, { 2, 1, 2, 4, 42, 64, }, { 1, 1, 2, 4, 42, 64, }, @@ -41703,7 +41703,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 2, 4, 138, 72, }, { 9, 1, 2, 4, 138, 127, }, { 0, 1, 2, 4, 155, 72, }, - { 2, 1, 2, 4, 155, -128, }, + { 2, 1, 2, 4, 155, 54, }, { 1, 1, 2, 4, 155, 127, }, { 3, 1, 2, 4, 155, 72, }, { 4, 1, 2, 4, 155, 68, }, @@ -41711,7 +41711,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 2, 4, 155, 72, }, { 7, 1, 2, 4, 155, 54, }, { 8, 1, 2, 4, 155, 68, }, - { 9, 1, 2, 4, 155, -128, }, + { 9, 1, 2, 4, 155, 54, }, { 0, 1, 2, 5, 42, 54, }, { 2, 1, 2, 5, 42, 40, }, { 1, 1, 2, 5, 42, 50, }, @@ -41763,7 +41763,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 8, 1, 2, 5, 138, 66, }, { 9, 1, 2, 5, 138, 127, }, { 0, 1, 2, 5, 155, 62, }, - { 2, 1, 2, 5, 155, -128, }, + { 2, 1, 2, 5, 155, 30, }, { 1, 1, 2, 5, 155, 127, }, { 3, 1, 2, 5, 155, 62, }, { 4, 1, 2, 5, 155, 58, }, @@ -41771,145 +41771,145 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 2, 5, 155, 62, }, { 7, 1, 2, 5, 155, 30, }, { 8, 1, 2, 5, 155, 62, }, - { 9, 1, 2, 5, 155, -128, }, + { 9, 1, 2, 5, 155, 30, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0); static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 0, 0, 0, 0, 1, 72, }, - { 2, 0, 0, 0, 1, 60, }, - { 1, 0, 0, 0, 1, 68, }, + { 2, 0, 0, 0, 1, 56, }, + { 1, 0, 0, 0, 1, 72, }, { 3, 0, 0, 0, 1, 72, }, { 4, 0, 0, 0, 1, 76, }, - { 5, 0, 0, 0, 1, 60, }, + { 5, 0, 0, 0, 1, 56, }, { 6, 0, 0, 0, 1, 72, }, { 7, 0, 0, 0, 1, 60, }, { 8, 0, 0, 0, 1, 72, }, { 9, 0, 0, 0, 1, 60, }, { 0, 0, 0, 0, 2, 72, }, - { 2, 0, 0, 0, 2, 60, }, - { 1, 0, 0, 0, 2, 68, }, + { 2, 0, 0, 0, 2, 56, }, + { 1, 0, 0, 0, 2, 72, }, { 3, 0, 0, 0, 2, 72, }, { 4, 0, 0, 0, 2, 76, }, - { 5, 0, 0, 0, 2, 60, }, + { 5, 0, 0, 0, 2, 56, }, { 6, 0, 0, 0, 2, 72, }, { 7, 0, 0, 0, 2, 60, }, { 8, 0, 0, 0, 2, 72, }, { 9, 0, 0, 0, 2, 60, }, { 0, 0, 0, 0, 3, 76, }, - { 2, 0, 0, 0, 3, 60, }, - { 1, 0, 0, 0, 3, 68, }, + { 2, 0, 0, 0, 3, 56, }, + { 1, 0, 0, 0, 3, 72, }, { 3, 0, 0, 0, 3, 76, }, { 4, 0, 0, 0, 3, 76, }, - { 5, 0, 0, 0, 3, 60, }, + { 5, 0, 0, 0, 3, 56, }, { 6, 0, 0, 0, 3, 76, }, { 7, 0, 0, 0, 3, 60, }, { 8, 0, 0, 0, 3, 76, }, { 9, 0, 0, 0, 3, 60, }, { 0, 0, 0, 0, 4, 76, }, - { 2, 0, 0, 0, 4, 60, }, - { 1, 0, 0, 0, 4, 68, }, + { 2, 0, 0, 0, 4, 56, }, + { 1, 0, 0, 0, 4, 72, }, { 3, 0, 0, 0, 4, 76, }, { 4, 0, 0, 0, 4, 76, }, - { 5, 0, 0, 0, 4, 60, }, + { 5, 0, 0, 0, 4, 56, }, { 6, 0, 0, 0, 4, 76, }, { 7, 0, 0, 0, 4, 60, }, { 8, 0, 0, 0, 4, 76, }, { 9, 0, 0, 0, 4, 60, }, { 0, 0, 0, 0, 5, 76, }, - { 2, 0, 0, 0, 5, 60, }, - { 1, 0, 0, 0, 5, 68, }, + { 2, 0, 0, 0, 5, 56, }, + { 1, 0, 0, 0, 5, 72, }, { 3, 0, 0, 0, 5, 76, }, { 4, 0, 0, 0, 5, 76, }, - { 5, 0, 0, 0, 5, 60, }, + { 5, 0, 0, 0, 5, 56, }, { 6, 0, 0, 0, 5, 76, }, { 7, 0, 0, 0, 5, 60, }, { 8, 0, 0, 0, 5, 76, }, { 9, 0, 0, 0, 5, 60, }, { 0, 0, 0, 0, 6, 76, }, - { 2, 0, 0, 0, 6, 60, }, - { 1, 0, 0, 0, 6, 68, }, + { 2, 0, 0, 0, 6, 56, }, + { 1, 0, 0, 0, 6, 72, }, { 3, 0, 0, 0, 6, 76, }, { 4, 0, 0, 0, 6, 76, }, - { 5, 0, 0, 0, 6, 60, }, + { 5, 0, 0, 0, 6, 56, }, { 6, 0, 0, 0, 6, 76, }, { 7, 0, 0, 0, 6, 60, }, { 8, 0, 0, 0, 6, 76, }, { 9, 0, 0, 0, 6, 60, }, { 0, 0, 0, 0, 7, 76, }, - { 2, 0, 0, 0, 7, 60, }, - { 1, 0, 0, 0, 7, 68, }, + { 2, 0, 0, 0, 7, 56, }, + { 1, 0, 0, 0, 7, 72, }, { 3, 0, 0, 0, 7, 76, }, { 4, 0, 0, 0, 7, 76, }, - { 5, 0, 0, 0, 7, 60, }, + { 5, 0, 0, 0, 7, 56, }, { 6, 0, 0, 0, 7, 76, }, { 7, 0, 0, 0, 7, 60, }, { 8, 0, 0, 0, 7, 76, }, { 9, 0, 0, 0, 7, 60, }, { 0, 0, 0, 0, 8, 76, }, - { 2, 0, 0, 0, 8, 60, }, - { 1, 0, 0, 0, 8, 68, }, + { 2, 0, 0, 0, 8, 56, }, + { 1, 0, 0, 0, 8, 72, }, { 3, 0, 0, 0, 8, 76, }, { 4, 0, 0, 0, 8, 76, }, - { 5, 0, 0, 0, 8, 60, }, + { 5, 0, 0, 0, 8, 56, }, { 6, 0, 0, 0, 8, 76, }, { 7, 0, 0, 0, 8, 60, }, { 8, 0, 0, 0, 8, 76, }, { 9, 0, 0, 0, 8, 60, }, { 0, 0, 0, 0, 9, 76, }, - { 2, 0, 0, 0, 9, 60, }, - { 1, 0, 0, 0, 9, 68, }, + { 2, 0, 0, 0, 9, 56, }, + { 1, 0, 0, 0, 9, 72, }, { 3, 0, 0, 0, 9, 76, }, { 4, 0, 0, 0, 9, 76, }, - { 5, 0, 0, 0, 9, 60, }, + { 5, 0, 0, 0, 9, 56, }, { 6, 0, 0, 0, 9, 76, }, { 7, 0, 0, 0, 9, 60, }, { 8, 0, 0, 0, 9, 76, }, { 9, 0, 0, 0, 9, 60, }, { 0, 0, 0, 0, 10, 72, }, - { 2, 0, 0, 0, 10, 60, }, - { 1, 0, 0, 0, 10, 68, }, + { 2, 0, 0, 0, 10, 56, }, + { 1, 0, 0, 0, 10, 72, }, { 3, 0, 0, 0, 10, 72, }, { 4, 0, 0, 0, 10, 76, }, - { 5, 0, 0, 0, 10, 60, }, + { 5, 0, 0, 0, 10, 56, }, { 6, 0, 0, 0, 10, 72, }, { 7, 0, 0, 0, 10, 60, }, { 8, 0, 0, 0, 10, 72, }, { 9, 0, 0, 0, 10, 60, }, { 0, 0, 0, 0, 11, 72, }, - { 2, 0, 0, 0, 11, 60, }, - { 1, 0, 0, 0, 11, 68, }, + { 2, 0, 0, 0, 11, 56, }, + { 1, 0, 0, 0, 11, 72, }, { 3, 0, 0, 0, 11, 72, }, { 4, 0, 0, 0, 11, 76, }, - { 5, 0, 0, 0, 11, 60, }, + { 5, 0, 0, 0, 11, 56, }, { 6, 0, 0, 0, 11, 72, }, { 7, 0, 0, 0, 11, 60, }, { 8, 0, 0, 0, 11, 72, }, { 9, 0, 0, 0, 11, 60, }, { 0, 0, 0, 0, 12, 44, }, - { 2, 0, 0, 0, 12, 60, }, - { 1, 0, 0, 0, 12, 68, }, + { 2, 0, 0, 0, 12, 56, }, + { 1, 0, 0, 0, 12, 72, }, { 3, 0, 0, 0, 12, 52, }, { 4, 0, 0, 0, 12, 76, }, - { 5, 0, 0, 0, 12, 60, }, + { 5, 0, 0, 0, 12, 56, }, { 6, 0, 0, 0, 12, 52, }, { 7, 0, 0, 0, 12, 60, }, { 8, 0, 0, 0, 12, 52, }, { 9, 0, 0, 0, 12, 60, }, { 0, 0, 0, 0, 13, 40, }, - { 2, 0, 0, 0, 13, 60, }, - { 1, 0, 0, 0, 13, 68, }, + { 2, 0, 0, 0, 13, 56, }, + { 1, 0, 0, 0, 13, 72, }, { 3, 0, 0, 0, 13, 48, }, { 4, 0, 0, 0, 13, 76, }, - { 5, 0, 0, 0, 13, 60, }, + { 5, 0, 0, 0, 13, 56, }, { 6, 0, 0, 0, 13, 48, }, { 7, 0, 0, 0, 13, 60, }, { 8, 0, 0, 0, 13, 48, }, { 9, 0, 0, 0, 13, 60, }, { 0, 0, 0, 0, 14, 127, }, { 2, 0, 0, 0, 14, 127, }, - { 1, 0, 0, 0, 14, 68, }, + { 1, 0, 0, 0, 14, 72, }, { 3, 0, 0, 0, 14, 127, }, { 4, 0, 0, 0, 14, 127, }, { 5, 0, 0, 0, 14, 127, }, @@ -42041,7 +42041,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 1, 13, 60, }, { 1, 0, 0, 1, 13, 76, }, { 3, 0, 0, 1, 13, 28, }, - { 4, 0, 0, 1, 13, 70, }, + { 4, 0, 0, 1, 13, 74, }, { 5, 0, 0, 1, 13, 60, }, { 6, 0, 0, 1, 13, 28, }, { 7, 0, 0, 1, 13, 60, }, @@ -42181,7 +42181,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 2, 13, 60, }, { 1, 0, 0, 2, 13, 76, }, { 3, 0, 0, 2, 13, 28, }, - { 4, 0, 0, 2, 13, 72, }, + { 4, 0, 0, 2, 13, 74, }, { 5, 0, 0, 2, 13, 60, }, { 6, 0, 0, 2, 13, 28, }, { 7, 0, 0, 2, 13, 60, }, @@ -42201,7 +42201,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 1, 36, }, { 1, 0, 0, 3, 1, 66, }, { 3, 0, 0, 3, 1, 52, }, - { 4, 0, 0, 3, 1, 68, }, + { 4, 0, 0, 3, 1, 72, }, { 5, 0, 0, 3, 1, 36, }, { 6, 0, 0, 3, 1, 52, }, { 7, 0, 0, 3, 1, 36, }, @@ -42211,7 +42211,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 2, 36, }, { 1, 0, 0, 3, 2, 66, }, { 3, 0, 0, 3, 2, 60, }, - { 4, 0, 0, 3, 2, 70, }, + { 4, 0, 0, 3, 2, 72, }, { 5, 0, 0, 3, 2, 36, }, { 6, 0, 0, 3, 2, 60, }, { 7, 0, 0, 3, 2, 36, }, @@ -42221,7 +42221,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 3, 36, }, { 1, 0, 0, 3, 3, 66, }, { 3, 0, 0, 3, 3, 64, }, - { 4, 0, 0, 3, 3, 70, }, + { 4, 0, 0, 3, 3, 72, }, { 5, 0, 0, 3, 3, 36, }, { 6, 0, 0, 3, 3, 64, }, { 7, 0, 0, 3, 3, 36, }, @@ -42231,7 +42231,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 4, 36, }, { 1, 0, 0, 3, 4, 66, }, { 3, 0, 0, 3, 4, 68, }, - { 4, 0, 0, 3, 4, 70, }, + { 4, 0, 0, 3, 4, 72, }, { 5, 0, 0, 3, 4, 36, }, { 6, 0, 0, 3, 4, 68, }, { 7, 0, 0, 3, 4, 36, }, @@ -42241,7 +42241,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 5, 36, }, { 1, 0, 0, 3, 5, 66, }, { 3, 0, 0, 3, 5, 76, }, - { 4, 0, 0, 3, 5, 70, }, + { 4, 0, 0, 3, 5, 72, }, { 5, 0, 0, 3, 5, 36, }, { 6, 0, 0, 3, 5, 76, }, { 7, 0, 0, 3, 5, 36, }, @@ -42251,7 +42251,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 6, 36, }, { 1, 0, 0, 3, 6, 66, }, { 3, 0, 0, 3, 6, 76, }, - { 4, 0, 0, 3, 6, 70, }, + { 4, 0, 0, 3, 6, 72, }, { 5, 0, 0, 3, 6, 36, }, { 6, 0, 0, 3, 6, 76, }, { 7, 0, 0, 3, 6, 36, }, @@ -42261,7 +42261,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 7, 36, }, { 1, 0, 0, 3, 7, 66, }, { 3, 0, 0, 3, 7, 76, }, - { 4, 0, 0, 3, 7, 70, }, + { 4, 0, 0, 3, 7, 72, }, { 5, 0, 0, 3, 7, 36, }, { 6, 0, 0, 3, 7, 76, }, { 7, 0, 0, 3, 7, 36, }, @@ -42271,7 +42271,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 8, 36, }, { 1, 0, 0, 3, 8, 66, }, { 3, 0, 0, 3, 8, 68, }, - { 4, 0, 0, 3, 8, 70, }, + { 4, 0, 0, 3, 8, 72, }, { 5, 0, 0, 3, 8, 36, }, { 6, 0, 0, 3, 8, 68, }, { 7, 0, 0, 3, 8, 36, }, @@ -42281,7 +42281,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 9, 36, }, { 1, 0, 0, 3, 9, 66, }, { 3, 0, 0, 3, 9, 64, }, - { 4, 0, 0, 3, 9, 70, }, + { 4, 0, 0, 3, 9, 72, }, { 5, 0, 0, 3, 9, 36, }, { 6, 0, 0, 3, 9, 64, }, { 7, 0, 0, 3, 9, 36, }, @@ -42291,7 +42291,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 10, 36, }, { 1, 0, 0, 3, 10, 66, }, { 3, 0, 0, 3, 10, 60, }, - { 4, 0, 0, 3, 10, 70, }, + { 4, 0, 0, 3, 10, 72, }, { 5, 0, 0, 3, 10, 36, }, { 6, 0, 0, 3, 10, 60, }, { 7, 0, 0, 3, 10, 36, }, @@ -42301,7 +42301,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 11, 36, }, { 1, 0, 0, 3, 11, 66, }, { 3, 0, 0, 3, 11, 52, }, - { 4, 0, 0, 3, 11, 70, }, + { 4, 0, 0, 3, 11, 72, }, { 5, 0, 0, 3, 11, 36, }, { 6, 0, 0, 3, 11, 52, }, { 7, 0, 0, 3, 11, 36, }, @@ -42311,7 +42311,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 12, 36, }, { 1, 0, 0, 3, 12, 66, }, { 3, 0, 0, 3, 12, 40, }, - { 4, 0, 0, 3, 12, 70, }, + { 4, 0, 0, 3, 12, 72, }, { 5, 0, 0, 3, 12, 36, }, { 6, 0, 0, 3, 12, 40, }, { 7, 0, 0, 3, 12, 36, }, @@ -42321,7 +42321,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 0, 3, 13, 36, }, { 1, 0, 0, 3, 13, 66, }, { 3, 0, 0, 3, 13, 28, }, - { 4, 0, 0, 3, 13, 62, }, + { 4, 0, 0, 3, 13, 68, }, { 5, 0, 0, 3, 13, 36, }, { 6, 0, 0, 3, 13, 28, }, { 7, 0, 0, 3, 13, 36, }, @@ -42501,7 +42501,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 0, 1, 3, 3, 36, }, { 1, 0, 1, 3, 3, 66, }, { 3, 0, 1, 3, 3, 48, }, - { 4, 0, 1, 3, 3, 66, }, + { 4, 0, 1, 3, 3, 68, }, { 5, 0, 1, 3, 3, 36, }, { 6, 0, 1, 3, 3, 48, }, { 7, 0, 1, 3, 3, 36, }, @@ -42618,137 +42618,137 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 0, 1, 3, 14, 127, }, { 9, 0, 1, 3, 14, 127, }, { 0, 1, 0, 1, 36, 74, }, - { 2, 1, 0, 1, 36, 62, }, - { 1, 1, 0, 1, 36, 60, }, + { 2, 1, 0, 1, 36, 58, }, + { 1, 1, 0, 1, 36, 62, }, { 3, 1, 0, 1, 36, 62, }, - { 4, 1, 0, 1, 36, 76, }, - { 5, 1, 0, 1, 36, 62, }, + { 4, 1, 0, 1, 36, 74, }, + { 5, 1, 0, 1, 36, 58, }, { 6, 1, 0, 1, 36, 64, }, { 7, 1, 0, 1, 36, 54, }, { 8, 1, 0, 1, 36, 62, }, { 9, 1, 0, 1, 36, 62, }, { 0, 1, 0, 1, 40, 76, }, - { 2, 1, 0, 1, 40, 62, }, + { 2, 1, 0, 1, 40, 58, }, { 1, 1, 0, 1, 40, 62, }, { 3, 1, 0, 1, 40, 62, }, { 4, 1, 0, 1, 40, 76, }, - { 5, 1, 0, 1, 40, 62, }, + { 5, 1, 0, 1, 40, 58, }, { 6, 1, 0, 1, 40, 64, }, { 7, 1, 0, 1, 40, 54, }, { 8, 1, 0, 1, 40, 62, }, { 9, 1, 0, 1, 40, 62, }, { 0, 1, 0, 1, 44, 76, }, - { 2, 1, 0, 1, 44, 62, }, + { 2, 1, 0, 1, 44, 58, }, { 1, 1, 0, 1, 44, 62, }, { 3, 1, 0, 1, 44, 62, }, { 4, 1, 0, 1, 44, 76, }, - { 5, 1, 0, 1, 44, 62, }, + { 5, 1, 0, 1, 44, 58, }, { 6, 1, 0, 1, 44, 64, }, { 7, 1, 0, 1, 44, 54, }, { 8, 1, 0, 1, 44, 62, }, { 9, 1, 0, 1, 44, 62, }, { 0, 1, 0, 1, 48, 76, }, - { 2, 1, 0, 1, 48, 62, }, + { 2, 1, 0, 1, 48, 58, }, { 1, 1, 0, 1, 48, 62, }, { 3, 1, 0, 1, 48, 62, }, - { 4, 1, 0, 1, 48, 54, }, - { 5, 1, 0, 1, 48, 62, }, + { 4, 1, 0, 1, 48, 58, }, + { 5, 1, 0, 1, 48, 58, }, { 6, 1, 0, 1, 48, 64, }, { 7, 1, 0, 1, 48, 54, }, { 8, 1, 0, 1, 48, 62, }, { 9, 1, 0, 1, 48, 62, }, { 0, 1, 0, 1, 52, 76, }, - { 2, 1, 0, 1, 52, 62, }, + { 2, 1, 0, 1, 52, 58, }, { 1, 1, 0, 1, 52, 62, }, { 3, 1, 0, 1, 52, 64, }, { 4, 1, 0, 1, 52, 76, }, - { 5, 1, 0, 1, 52, 62, }, + { 5, 1, 0, 1, 52, 58, }, { 6, 1, 0, 1, 52, 76, }, { 7, 1, 0, 1, 52, 54, }, { 8, 1, 0, 1, 52, 76, }, { 9, 1, 0, 1, 52, 62, }, { 0, 1, 0, 1, 56, 76, }, - { 2, 1, 0, 1, 56, 62, }, + { 2, 1, 0, 1, 56, 58, }, { 1, 1, 0, 1, 56, 62, }, { 3, 1, 0, 1, 56, 64, }, { 4, 1, 0, 1, 56, 76, }, - { 5, 1, 0, 1, 56, 62, }, + { 5, 1, 0, 1, 56, 58, }, { 6, 1, 0, 1, 56, 76, }, { 7, 1, 0, 1, 56, 54, }, { 8, 1, 0, 1, 56, 76, }, { 9, 1, 0, 1, 56, 62, }, { 0, 1, 0, 1, 60, 76, }, - { 2, 1, 0, 1, 60, 62, }, + { 2, 1, 0, 1, 60, 58, }, { 1, 1, 0, 1, 60, 62, }, { 3, 1, 0, 1, 60, 64, }, { 4, 1, 0, 1, 60, 76, }, - { 5, 1, 0, 1, 60, 62, }, + { 5, 1, 0, 1, 60, 58, }, { 6, 1, 0, 1, 60, 76, }, { 7, 1, 0, 1, 60, 54, }, { 8, 1, 0, 1, 60, 76, }, { 9, 1, 0, 1, 60, 62, }, - { 0, 1, 0, 1, 64, 74, }, - { 2, 1, 0, 1, 64, 62, }, - { 1, 1, 0, 1, 64, 60, }, + { 0, 1, 0, 1, 64, 76, }, + { 2, 1, 0, 1, 64, 58, }, + { 1, 1, 0, 1, 64, 62, }, { 3, 1, 0, 1, 64, 64, }, { 4, 1, 0, 1, 64, 76, }, - { 5, 1, 0, 1, 64, 62, }, + { 5, 1, 0, 1, 64, 58, }, { 6, 1, 0, 1, 64, 74, }, { 7, 1, 0, 1, 64, 54, }, { 8, 1, 0, 1, 64, 74, }, { 9, 1, 0, 1, 64, 62, }, - { 0, 1, 0, 1, 100, 72, }, - { 2, 1, 0, 1, 100, 62, }, + { 0, 1, 0, 1, 100, 68, }, + { 2, 1, 0, 1, 100, 58, }, { 1, 1, 0, 1, 100, 76, }, - { 3, 1, 0, 1, 100, 72, }, + { 3, 1, 0, 1, 100, 68, }, { 4, 1, 0, 1, 100, 76, }, - { 5, 1, 0, 1, 100, 62, }, + { 5, 1, 0, 1, 100, 58, }, { 6, 1, 0, 1, 100, 72, }, { 7, 1, 0, 1, 100, 54, }, { 8, 1, 0, 1, 100, 72, }, { 9, 1, 0, 1, 100, 127, }, { 0, 1, 0, 1, 104, 76, }, - { 2, 1, 0, 1, 104, 62, }, + { 2, 1, 0, 1, 104, 58, }, { 1, 1, 0, 1, 104, 76, }, { 3, 1, 0, 1, 104, 76, }, { 4, 1, 0, 1, 104, 76, }, - { 5, 1, 0, 1, 104, 62, }, + { 5, 1, 0, 1, 104, 58, }, { 6, 1, 0, 1, 104, 76, }, { 7, 1, 0, 1, 104, 54, }, { 8, 1, 0, 1, 104, 76, }, { 9, 1, 0, 1, 104, 127, }, { 0, 1, 0, 1, 108, 76, }, - { 2, 1, 0, 1, 108, 62, }, + { 2, 1, 0, 1, 108, 58, }, { 1, 1, 0, 1, 108, 76, }, { 3, 1, 0, 1, 108, 76, }, { 4, 1, 0, 1, 108, 76, }, - { 5, 1, 0, 1, 108, 62, }, + { 5, 1, 0, 1, 108, 58, }, { 6, 1, 0, 1, 108, 76, }, { 7, 1, 0, 1, 108, 54, }, { 8, 1, 0, 1, 108, 76, }, { 9, 1, 0, 1, 108, 127, }, { 0, 1, 0, 1, 112, 76, }, - { 2, 1, 0, 1, 112, 62, }, + { 2, 1, 0, 1, 112, 58, }, { 1, 1, 0, 1, 112, 76, }, { 3, 1, 0, 1, 112, 76, }, { 4, 1, 0, 1, 112, 76, }, - { 5, 1, 0, 1, 112, 62, }, + { 5, 1, 0, 1, 112, 58, }, { 6, 1, 0, 1, 112, 76, }, { 7, 1, 0, 1, 112, 54, }, { 8, 1, 0, 1, 112, 76, }, { 9, 1, 0, 1, 112, 127, }, { 0, 1, 0, 1, 116, 76, }, - { 2, 1, 0, 1, 116, 62, }, + { 2, 1, 0, 1, 116, 58, }, { 1, 1, 0, 1, 116, 76, }, { 3, 1, 0, 1, 116, 76, }, { 4, 1, 0, 1, 116, 76, }, - { 5, 1, 0, 1, 116, 62, }, + { 5, 1, 0, 1, 116, 58, }, { 6, 1, 0, 1, 116, 76, }, { 7, 1, 0, 1, 116, 54, }, { 8, 1, 0, 1, 116, 76, }, { 9, 1, 0, 1, 116, 127, }, { 0, 1, 0, 1, 120, 76, }, - { 2, 1, 0, 1, 120, 62, }, + { 2, 1, 0, 1, 120, 58, }, { 1, 1, 0, 1, 120, 76, }, { 3, 1, 0, 1, 120, 127, }, { 4, 1, 0, 1, 120, 76, }, @@ -42758,7 +42758,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 1, 120, 76, }, { 9, 1, 0, 1, 120, 127, }, { 0, 1, 0, 1, 124, 76, }, - { 2, 1, 0, 1, 124, 62, }, + { 2, 1, 0, 1, 124, 58, }, { 1, 1, 0, 1, 124, 76, }, { 3, 1, 0, 1, 124, 127, }, { 4, 1, 0, 1, 124, 76, }, @@ -42768,7 +42768,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 1, 124, 76, }, { 9, 1, 0, 1, 124, 127, }, { 0, 1, 0, 1, 128, 76, }, - { 2, 1, 0, 1, 128, 62, }, + { 2, 1, 0, 1, 128, 58, }, { 1, 1, 0, 1, 128, 76, }, { 3, 1, 0, 1, 128, 127, }, { 4, 1, 0, 1, 128, 76, }, @@ -42778,38 +42778,38 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 1, 128, 76, }, { 9, 1, 0, 1, 128, 127, }, { 0, 1, 0, 1, 132, 76, }, - { 2, 1, 0, 1, 132, 62, }, + { 2, 1, 0, 1, 132, 58, }, { 1, 1, 0, 1, 132, 76, }, { 3, 1, 0, 1, 132, 76, }, { 4, 1, 0, 1, 132, 76, }, - { 5, 1, 0, 1, 132, 62, }, + { 5, 1, 0, 1, 132, 58, }, { 6, 1, 0, 1, 132, 76, }, { 7, 1, 0, 1, 132, 54, }, { 8, 1, 0, 1, 132, 76, }, { 9, 1, 0, 1, 132, 127, }, { 0, 1, 0, 1, 136, 76, }, - { 2, 1, 0, 1, 136, 62, }, + { 2, 1, 0, 1, 136, 58, }, { 1, 1, 0, 1, 136, 76, }, { 3, 1, 0, 1, 136, 76, }, { 4, 1, 0, 1, 136, 76, }, - { 5, 1, 0, 1, 136, 62, }, + { 5, 1, 0, 1, 136, 58, }, { 6, 1, 0, 1, 136, 76, }, { 7, 1, 0, 1, 136, 54, }, { 8, 1, 0, 1, 136, 76, }, { 9, 1, 0, 1, 136, 127, }, - { 0, 1, 0, 1, 140, 72, }, - { 2, 1, 0, 1, 140, 62, }, + { 0, 1, 0, 1, 140, 74, }, + { 2, 1, 0, 1, 140, 58, }, { 1, 1, 0, 1, 140, 76, }, - { 3, 1, 0, 1, 140, 72, }, + { 3, 1, 0, 1, 140, 74, }, { 4, 1, 0, 1, 140, 76, }, - { 5, 1, 0, 1, 140, 62, }, + { 5, 1, 0, 1, 140, 58, }, { 6, 1, 0, 1, 140, 72, }, { 7, 1, 0, 1, 140, 54, }, { 8, 1, 0, 1, 140, 72, }, { 9, 1, 0, 1, 140, 127, }, { 0, 1, 0, 1, 144, 76, }, { 2, 1, 0, 1, 144, 127, }, - { 1, 1, 0, 1, 144, 127, }, + { 1, 1, 0, 1, 144, 76, }, { 3, 1, 0, 1, 144, 76, }, { 4, 1, 0, 1, 144, 76, }, { 5, 1, 0, 1, 144, 127, }, @@ -42818,7 +42818,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 1, 144, 76, }, { 9, 1, 0, 1, 144, 127, }, { 0, 1, 0, 1, 149, 76, }, - { 2, 1, 0, 1, 149, -128, }, + { 2, 1, 0, 1, 149, 28, }, { 1, 1, 0, 1, 149, 127, }, { 3, 1, 0, 1, 149, 76, }, { 4, 1, 0, 1, 149, 74, }, @@ -42826,9 +42826,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 1, 149, 76, }, { 7, 1, 0, 1, 149, 54, }, { 8, 1, 0, 1, 149, 76, }, - { 9, 1, 0, 1, 149, -128, }, + { 9, 1, 0, 1, 149, 28, }, { 0, 1, 0, 1, 153, 76, }, - { 2, 1, 0, 1, 153, -128, }, + { 2, 1, 0, 1, 153, 28, }, { 1, 1, 0, 1, 153, 127, }, { 3, 1, 0, 1, 153, 76, }, { 4, 1, 0, 1, 153, 74, }, @@ -42836,9 +42836,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 1, 153, 76, }, { 7, 1, 0, 1, 153, 54, }, { 8, 1, 0, 1, 153, 76, }, - { 9, 1, 0, 1, 153, -128, }, + { 9, 1, 0, 1, 153, 28, }, { 0, 1, 0, 1, 157, 76, }, - { 2, 1, 0, 1, 157, -128, }, + { 2, 1, 0, 1, 157, 28, }, { 1, 1, 0, 1, 157, 127, }, { 3, 1, 0, 1, 157, 76, }, { 4, 1, 0, 1, 157, 74, }, @@ -42846,9 +42846,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 1, 157, 76, }, { 7, 1, 0, 1, 157, 54, }, { 8, 1, 0, 1, 157, 76, }, - { 9, 1, 0, 1, 157, -128, }, + { 9, 1, 0, 1, 157, 28, }, { 0, 1, 0, 1, 161, 76, }, - { 2, 1, 0, 1, 161, -128, }, + { 2, 1, 0, 1, 161, 28, }, { 1, 1, 0, 1, 161, 127, }, { 3, 1, 0, 1, 161, 76, }, { 4, 1, 0, 1, 161, 74, }, @@ -42856,9 +42856,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 1, 161, 76, }, { 7, 1, 0, 1, 161, 54, }, { 8, 1, 0, 1, 161, 76, }, - { 9, 1, 0, 1, 161, -128, }, + { 9, 1, 0, 1, 161, 28, }, { 0, 1, 0, 1, 165, 76, }, - { 2, 1, 0, 1, 165, -128, }, + { 2, 1, 0, 1, 165, 28, }, { 1, 1, 0, 1, 165, 127, }, { 3, 1, 0, 1, 165, 76, }, { 4, 1, 0, 1, 165, 74, }, @@ -42866,139 +42866,139 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 1, 165, 76, }, { 7, 1, 0, 1, 165, 54, }, { 8, 1, 0, 1, 165, 76, }, - { 9, 1, 0, 1, 165, -128, }, - { 0, 1, 0, 2, 36, 72, }, - { 2, 1, 0, 2, 36, 62, }, - { 1, 1, 0, 2, 36, 62, }, + { 9, 1, 0, 1, 165, 28, }, + { 0, 1, 0, 2, 36, 70, }, + { 2, 1, 0, 2, 36, 58, }, + { 1, 1, 0, 2, 36, 64, }, { 3, 1, 0, 2, 36, 62, }, { 4, 1, 0, 2, 36, 76, }, - { 5, 1, 0, 2, 36, 62, }, + { 5, 1, 0, 2, 36, 58, }, { 6, 1, 0, 2, 36, 64, }, { 7, 1, 0, 2, 36, 54, }, { 8, 1, 0, 2, 36, 62, }, { 9, 1, 0, 2, 36, 62, }, { 0, 1, 0, 2, 40, 76, }, - { 2, 1, 0, 2, 40, 62, }, + { 2, 1, 0, 2, 40, 58, }, { 1, 1, 0, 2, 40, 62, }, { 3, 1, 0, 2, 40, 62, }, { 4, 1, 0, 2, 40, 76, }, - { 5, 1, 0, 2, 40, 62, }, + { 5, 1, 0, 2, 40, 58, }, { 6, 1, 0, 2, 40, 64, }, { 7, 1, 0, 2, 40, 54, }, { 8, 1, 0, 2, 40, 62, }, { 9, 1, 0, 2, 40, 62, }, { 0, 1, 0, 2, 44, 76, }, - { 2, 1, 0, 2, 44, 62, }, + { 2, 1, 0, 2, 44, 58, }, { 1, 1, 0, 2, 44, 62, }, { 3, 1, 0, 2, 44, 62, }, { 4, 1, 0, 2, 44, 76, }, - { 5, 1, 0, 2, 44, 62, }, + { 5, 1, 0, 2, 44, 58, }, { 6, 1, 0, 2, 44, 64, }, { 7, 1, 0, 2, 44, 54, }, { 8, 1, 0, 2, 44, 62, }, { 9, 1, 0, 2, 44, 62, }, { 0, 1, 0, 2, 48, 76, }, - { 2, 1, 0, 2, 48, 62, }, + { 2, 1, 0, 2, 48, 58, }, { 1, 1, 0, 2, 48, 62, }, { 3, 1, 0, 2, 48, 62, }, - { 4, 1, 0, 2, 48, 54, }, - { 5, 1, 0, 2, 48, 62, }, + { 4, 1, 0, 2, 48, 58, }, + { 5, 1, 0, 2, 48, 58, }, { 6, 1, 0, 2, 48, 64, }, { 7, 1, 0, 2, 48, 54, }, { 8, 1, 0, 2, 48, 62, }, { 9, 1, 0, 2, 48, 62, }, { 0, 1, 0, 2, 52, 76, }, - { 2, 1, 0, 2, 52, 62, }, + { 2, 1, 0, 2, 52, 58, }, { 1, 1, 0, 2, 52, 62, }, { 3, 1, 0, 2, 52, 64, }, { 4, 1, 0, 2, 52, 76, }, - { 5, 1, 0, 2, 52, 62, }, + { 5, 1, 0, 2, 52, 58, }, { 6, 1, 0, 2, 52, 76, }, { 7, 1, 0, 2, 52, 54, }, { 8, 1, 0, 2, 52, 76, }, { 9, 1, 0, 2, 52, 62, }, { 0, 1, 0, 2, 56, 76, }, - { 2, 1, 0, 2, 56, 62, }, + { 2, 1, 0, 2, 56, 58, }, { 1, 1, 0, 2, 56, 62, }, { 3, 1, 0, 2, 56, 64, }, { 4, 1, 0, 2, 56, 76, }, - { 5, 1, 0, 2, 56, 62, }, + { 5, 1, 0, 2, 56, 58, }, { 6, 1, 0, 2, 56, 76, }, { 7, 1, 0, 2, 56, 54, }, { 8, 1, 0, 2, 56, 76, }, { 9, 1, 0, 2, 56, 62, }, { 0, 1, 0, 2, 60, 76, }, - { 2, 1, 0, 2, 60, 62, }, + { 2, 1, 0, 2, 60, 58, }, { 1, 1, 0, 2, 60, 62, }, { 3, 1, 0, 2, 60, 64, }, { 4, 1, 0, 2, 60, 76, }, - { 5, 1, 0, 2, 60, 62, }, + { 5, 1, 0, 2, 60, 58, }, { 6, 1, 0, 2, 60, 76, }, { 7, 1, 0, 2, 60, 54, }, { 8, 1, 0, 2, 60, 76, }, { 9, 1, 0, 2, 60, 62, }, - { 0, 1, 0, 2, 64, 74, }, - { 2, 1, 0, 2, 64, 62, }, - { 1, 1, 0, 2, 64, 60, }, + { 0, 1, 0, 2, 64, 70, }, + { 2, 1, 0, 2, 64, 58, }, + { 1, 1, 0, 2, 64, 62, }, { 3, 1, 0, 2, 64, 64, }, { 4, 1, 0, 2, 64, 74, }, - { 5, 1, 0, 2, 64, 62, }, + { 5, 1, 0, 2, 64, 58, }, { 6, 1, 0, 2, 64, 74, }, { 7, 1, 0, 2, 64, 54, }, { 8, 1, 0, 2, 64, 74, }, { 9, 1, 0, 2, 64, 62, }, - { 0, 1, 0, 2, 100, 70, }, - { 2, 1, 0, 2, 100, 62, }, + { 0, 1, 0, 2, 100, 66, }, + { 2, 1, 0, 2, 100, 58, }, { 1, 1, 0, 2, 100, 76, }, - { 3, 1, 0, 2, 100, 70, }, + { 3, 1, 0, 2, 100, 66, }, { 4, 1, 0, 2, 100, 76, }, - { 5, 1, 0, 2, 100, 62, }, + { 5, 1, 0, 2, 100, 58, }, { 6, 1, 0, 2, 100, 70, }, { 7, 1, 0, 2, 100, 54, }, { 8, 1, 0, 2, 100, 70, }, { 9, 1, 0, 2, 100, 127, }, { 0, 1, 0, 2, 104, 76, }, - { 2, 1, 0, 2, 104, 62, }, + { 2, 1, 0, 2, 104, 58, }, { 1, 1, 0, 2, 104, 76, }, { 3, 1, 0, 2, 104, 76, }, { 4, 1, 0, 2, 104, 76, }, - { 5, 1, 0, 2, 104, 62, }, + { 5, 1, 0, 2, 104, 58, }, { 6, 1, 0, 2, 104, 76, }, { 7, 1, 0, 2, 104, 54, }, { 8, 1, 0, 2, 104, 76, }, { 9, 1, 0, 2, 104, 127, }, { 0, 1, 0, 2, 108, 76, }, - { 2, 1, 0, 2, 108, 62, }, + { 2, 1, 0, 2, 108, 58, }, { 1, 1, 0, 2, 108, 76, }, { 3, 1, 0, 2, 108, 76, }, { 4, 1, 0, 2, 108, 76, }, - { 5, 1, 0, 2, 108, 62, }, + { 5, 1, 0, 2, 108, 58, }, { 6, 1, 0, 2, 108, 76, }, { 7, 1, 0, 2, 108, 54, }, { 8, 1, 0, 2, 108, 76, }, { 9, 1, 0, 2, 108, 127, }, { 0, 1, 0, 2, 112, 76, }, - { 2, 1, 0, 2, 112, 62, }, + { 2, 1, 0, 2, 112, 58, }, { 1, 1, 0, 2, 112, 76, }, { 3, 1, 0, 2, 112, 76, }, { 4, 1, 0, 2, 112, 76, }, - { 5, 1, 0, 2, 112, 62, }, + { 5, 1, 0, 2, 112, 58, }, { 6, 1, 0, 2, 112, 76, }, { 7, 1, 0, 2, 112, 54, }, { 8, 1, 0, 2, 112, 76, }, { 9, 1, 0, 2, 112, 127, }, { 0, 1, 0, 2, 116, 76, }, - { 2, 1, 0, 2, 116, 62, }, + { 2, 1, 0, 2, 116, 58, }, { 1, 1, 0, 2, 116, 76, }, { 3, 1, 0, 2, 116, 76, }, { 4, 1, 0, 2, 116, 76, }, - { 5, 1, 0, 2, 116, 62, }, + { 5, 1, 0, 2, 116, 58, }, { 6, 1, 0, 2, 116, 76, }, { 7, 1, 0, 2, 116, 54, }, { 8, 1, 0, 2, 116, 76, }, { 9, 1, 0, 2, 116, 127, }, { 0, 1, 0, 2, 120, 76, }, - { 2, 1, 0, 2, 120, 62, }, + { 2, 1, 0, 2, 120, 58, }, { 1, 1, 0, 2, 120, 76, }, { 3, 1, 0, 2, 120, 127, }, { 4, 1, 0, 2, 120, 76, }, @@ -43008,7 +43008,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 2, 120, 76, }, { 9, 1, 0, 2, 120, 127, }, { 0, 1, 0, 2, 124, 76, }, - { 2, 1, 0, 2, 124, 62, }, + { 2, 1, 0, 2, 124, 58, }, { 1, 1, 0, 2, 124, 76, }, { 3, 1, 0, 2, 124, 127, }, { 4, 1, 0, 2, 124, 76, }, @@ -43018,7 +43018,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 2, 124, 76, }, { 9, 1, 0, 2, 124, 127, }, { 0, 1, 0, 2, 128, 76, }, - { 2, 1, 0, 2, 128, 62, }, + { 2, 1, 0, 2, 128, 58, }, { 1, 1, 0, 2, 128, 76, }, { 3, 1, 0, 2, 128, 127, }, { 4, 1, 0, 2, 128, 76, }, @@ -43028,38 +43028,38 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 2, 128, 76, }, { 9, 1, 0, 2, 128, 127, }, { 0, 1, 0, 2, 132, 76, }, - { 2, 1, 0, 2, 132, 62, }, + { 2, 1, 0, 2, 132, 58, }, { 1, 1, 0, 2, 132, 76, }, { 3, 1, 0, 2, 132, 76, }, { 4, 1, 0, 2, 132, 76, }, - { 5, 1, 0, 2, 132, 62, }, + { 5, 1, 0, 2, 132, 58, }, { 6, 1, 0, 2, 132, 76, }, { 7, 1, 0, 2, 132, 54, }, { 8, 1, 0, 2, 132, 76, }, { 9, 1, 0, 2, 132, 127, }, { 0, 1, 0, 2, 136, 76, }, - { 2, 1, 0, 2, 136, 62, }, + { 2, 1, 0, 2, 136, 58, }, { 1, 1, 0, 2, 136, 76, }, { 3, 1, 0, 2, 136, 76, }, { 4, 1, 0, 2, 136, 76, }, - { 5, 1, 0, 2, 136, 62, }, + { 5, 1, 0, 2, 136, 58, }, { 6, 1, 0, 2, 136, 76, }, { 7, 1, 0, 2, 136, 54, }, { 8, 1, 0, 2, 136, 76, }, { 9, 1, 0, 2, 136, 127, }, - { 0, 1, 0, 2, 140, 70, }, - { 2, 1, 0, 2, 140, 62, }, + { 0, 1, 0, 2, 140, 66, }, + { 2, 1, 0, 2, 140, 58, }, { 1, 1, 0, 2, 140, 76, }, - { 3, 1, 0, 2, 140, 70, }, + { 3, 1, 0, 2, 140, 66, }, { 4, 1, 0, 2, 140, 76, }, - { 5, 1, 0, 2, 140, 62, }, + { 5, 1, 0, 2, 140, 58, }, { 6, 1, 0, 2, 140, 70, }, { 7, 1, 0, 2, 140, 54, }, { 8, 1, 0, 2, 140, 70, }, { 9, 1, 0, 2, 140, 127, }, { 0, 1, 0, 2, 144, 76, }, { 2, 1, 0, 2, 144, 127, }, - { 1, 1, 0, 2, 144, 127, }, + { 1, 1, 0, 2, 144, 76, }, { 3, 1, 0, 2, 144, 76, }, { 4, 1, 0, 2, 144, 76, }, { 5, 1, 0, 2, 144, 127, }, @@ -43068,7 +43068,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 0, 2, 144, 76, }, { 9, 1, 0, 2, 144, 127, }, { 0, 1, 0, 2, 149, 76, }, - { 2, 1, 0, 2, 149, -128, }, + { 2, 1, 0, 2, 149, 28, }, { 1, 1, 0, 2, 149, 127, }, { 3, 1, 0, 2, 149, 76, }, { 4, 1, 0, 2, 149, 74, }, @@ -43076,9 +43076,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 2, 149, 76, }, { 7, 1, 0, 2, 149, 54, }, { 8, 1, 0, 2, 149, 76, }, - { 9, 1, 0, 2, 149, -128, }, + { 9, 1, 0, 2, 149, 28, }, { 0, 1, 0, 2, 153, 76, }, - { 2, 1, 0, 2, 153, -128, }, + { 2, 1, 0, 2, 153, 28, }, { 1, 1, 0, 2, 153, 127, }, { 3, 1, 0, 2, 153, 76, }, { 4, 1, 0, 2, 153, 74, }, @@ -43086,9 +43086,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 2, 153, 76, }, { 7, 1, 0, 2, 153, 54, }, { 8, 1, 0, 2, 153, 76, }, - { 9, 1, 0, 2, 153, -128, }, + { 9, 1, 0, 2, 153, 28, }, { 0, 1, 0, 2, 157, 76, }, - { 2, 1, 0, 2, 157, -128, }, + { 2, 1, 0, 2, 157, 28, }, { 1, 1, 0, 2, 157, 127, }, { 3, 1, 0, 2, 157, 76, }, { 4, 1, 0, 2, 157, 74, }, @@ -43096,9 +43096,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 2, 157, 76, }, { 7, 1, 0, 2, 157, 54, }, { 8, 1, 0, 2, 157, 76, }, - { 9, 1, 0, 2, 157, -128, }, + { 9, 1, 0, 2, 157, 28, }, { 0, 1, 0, 2, 161, 76, }, - { 2, 1, 0, 2, 161, -128, }, + { 2, 1, 0, 2, 161, 28, }, { 1, 1, 0, 2, 161, 127, }, { 3, 1, 0, 2, 161, 76, }, { 4, 1, 0, 2, 161, 74, }, @@ -43106,9 +43106,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 2, 161, 76, }, { 7, 1, 0, 2, 161, 54, }, { 8, 1, 0, 2, 161, 76, }, - { 9, 1, 0, 2, 161, -128, }, + { 9, 1, 0, 2, 161, 28, }, { 0, 1, 0, 2, 165, 76, }, - { 2, 1, 0, 2, 165, -128, }, + { 2, 1, 0, 2, 165, 28, }, { 1, 1, 0, 2, 165, 127, }, { 3, 1, 0, 2, 165, 76, }, { 4, 1, 0, 2, 165, 74, }, @@ -43116,262 +43116,262 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 0, 2, 165, 76, }, { 7, 1, 0, 2, 165, 54, }, { 8, 1, 0, 2, 165, 76, }, - { 9, 1, 0, 2, 165, -128, }, - { 0, 1, 0, 3, 36, 68, }, - { 2, 1, 0, 3, 36, 38, }, + { 9, 1, 0, 2, 165, 28, }, + { 0, 1, 0, 3, 36, 64, }, + { 2, 1, 0, 3, 36, 36, }, { 1, 1, 0, 3, 36, 50, }, { 3, 1, 0, 3, 36, 38, }, { 4, 1, 0, 3, 36, 66, }, - { 5, 1, 0, 3, 36, 38, }, + { 5, 1, 0, 3, 36, 36, }, { 6, 1, 0, 3, 36, 52, }, { 7, 1, 0, 3, 36, 30, }, { 8, 1, 0, 3, 36, 50, }, { 9, 1, 0, 3, 36, 38, }, { 0, 1, 0, 3, 40, 68, }, - { 2, 1, 0, 3, 40, 38, }, + { 2, 1, 0, 3, 40, 36, }, { 1, 1, 0, 3, 40, 50, }, { 3, 1, 0, 3, 40, 38, }, { 4, 1, 0, 3, 40, 66, }, - { 5, 1, 0, 3, 40, 38, }, + { 5, 1, 0, 3, 40, 36, }, { 6, 1, 0, 3, 40, 52, }, { 7, 1, 0, 3, 40, 30, }, { 8, 1, 0, 3, 40, 50, }, { 9, 1, 0, 3, 40, 38, }, { 0, 1, 0, 3, 44, 68, }, - { 2, 1, 0, 3, 44, 38, }, + { 2, 1, 0, 3, 44, 36, }, { 1, 1, 0, 3, 44, 50, }, { 3, 1, 0, 3, 44, 38, }, { 4, 1, 0, 3, 44, 66, }, - { 5, 1, 0, 3, 44, 38, }, + { 5, 1, 0, 3, 44, 36, }, { 6, 1, 0, 3, 44, 52, }, { 7, 1, 0, 3, 44, 30, }, { 8, 1, 0, 3, 44, 50, }, { 9, 1, 0, 3, 44, 38, }, { 0, 1, 0, 3, 48, 68, }, - { 2, 1, 0, 3, 48, 38, }, + { 2, 1, 0, 3, 48, 36, }, { 1, 1, 0, 3, 48, 50, }, { 3, 1, 0, 3, 48, 38, }, - { 4, 1, 0, 3, 48, 36, }, - { 5, 1, 0, 3, 48, 38, }, + { 4, 1, 0, 3, 48, 42, }, + { 5, 1, 0, 3, 48, 36, }, { 6, 1, 0, 3, 48, 52, }, { 7, 1, 0, 3, 48, 30, }, { 8, 1, 0, 3, 48, 50, }, { 9, 1, 0, 3, 48, 38, }, { 0, 1, 0, 3, 52, 68, }, - { 2, 1, 0, 3, 52, 38, }, + { 2, 1, 0, 3, 52, 36, }, { 1, 1, 0, 3, 52, 50, }, { 3, 1, 0, 3, 52, 40, }, { 4, 1, 0, 3, 52, 66, }, - { 5, 1, 0, 3, 52, 38, }, + { 5, 1, 0, 3, 52, 36, }, { 6, 1, 0, 3, 52, 68, }, { 7, 1, 0, 3, 52, 30, }, { 8, 1, 0, 3, 52, 68, }, { 9, 1, 0, 3, 52, 38, }, { 0, 1, 0, 3, 56, 68, }, - { 2, 1, 0, 3, 56, 38, }, + { 2, 1, 0, 3, 56, 36, }, { 1, 1, 0, 3, 56, 50, }, { 3, 1, 0, 3, 56, 40, }, { 4, 1, 0, 3, 56, 66, }, - { 5, 1, 0, 3, 56, 38, }, + { 5, 1, 0, 3, 56, 36, }, { 6, 1, 0, 3, 56, 68, }, { 7, 1, 0, 3, 56, 30, }, { 8, 1, 0, 3, 56, 68, }, { 9, 1, 0, 3, 56, 38, }, - { 0, 1, 0, 3, 60, 66, }, - { 2, 1, 0, 3, 60, 38, }, + { 0, 1, 0, 3, 60, 68, }, + { 2, 1, 0, 3, 60, 36, }, { 1, 1, 0, 3, 60, 50, }, { 3, 1, 0, 3, 60, 40, }, { 4, 1, 0, 3, 60, 66, }, - { 5, 1, 0, 3, 60, 38, }, + { 5, 1, 0, 3, 60, 36, }, { 6, 1, 0, 3, 60, 66, }, { 7, 1, 0, 3, 60, 30, }, { 8, 1, 0, 3, 60, 66, }, { 9, 1, 0, 3, 60, 38, }, - { 0, 1, 0, 3, 64, 68, }, - { 2, 1, 0, 3, 64, 38, }, + { 0, 1, 0, 3, 64, 66, }, + { 2, 1, 0, 3, 64, 36, }, { 1, 1, 0, 3, 64, 50, }, { 3, 1, 0, 3, 64, 40, }, { 4, 1, 0, 3, 64, 66, }, - { 5, 1, 0, 3, 64, 38, }, + { 5, 1, 0, 3, 64, 36, }, { 6, 1, 0, 3, 64, 68, }, { 7, 1, 0, 3, 64, 30, }, { 8, 1, 0, 3, 64, 68, }, { 9, 1, 0, 3, 64, 38, }, - { 0, 1, 0, 3, 100, 60, }, - { 2, 1, 0, 3, 100, 38, }, + { 0, 1, 0, 3, 100, 64, }, + { 2, 1, 0, 3, 100, 36, }, { 1, 1, 0, 3, 100, 70, }, - { 3, 1, 0, 3, 100, 60, }, - { 4, 1, 0, 3, 100, 64, }, - { 5, 1, 0, 3, 100, 38, }, + { 3, 1, 0, 3, 100, 64, }, + { 4, 1, 0, 3, 100, 66, }, + { 5, 1, 0, 3, 100, 36, }, { 6, 1, 0, 3, 100, 60, }, { 7, 1, 0, 3, 100, 30, }, { 8, 1, 0, 3, 100, 60, }, { 9, 1, 0, 3, 100, 127, }, { 0, 1, 0, 3, 104, 68, }, - { 2, 1, 0, 3, 104, 38, }, + { 2, 1, 0, 3, 104, 36, }, { 1, 1, 0, 3, 104, 70, }, { 3, 1, 0, 3, 104, 68, }, - { 4, 1, 0, 3, 104, 64, }, - { 5, 1, 0, 3, 104, 38, }, + { 4, 1, 0, 3, 104, 66, }, + { 5, 1, 0, 3, 104, 36, }, { 6, 1, 0, 3, 104, 68, }, { 7, 1, 0, 3, 104, 30, }, { 8, 1, 0, 3, 104, 68, }, { 9, 1, 0, 3, 104, 127, }, { 0, 1, 0, 3, 108, 68, }, - { 2, 1, 0, 3, 108, 38, }, + { 2, 1, 0, 3, 108, 36, }, { 1, 1, 0, 3, 108, 70, }, { 3, 1, 0, 3, 108, 68, }, - { 4, 1, 0, 3, 108, 64, }, - { 5, 1, 0, 3, 108, 38, }, + { 4, 1, 0, 3, 108, 66, }, + { 5, 1, 0, 3, 108, 36, }, { 6, 1, 0, 3, 108, 68, }, { 7, 1, 0, 3, 108, 30, }, { 8, 1, 0, 3, 108, 68, }, { 9, 1, 0, 3, 108, 127, }, { 0, 1, 0, 3, 112, 68, }, - { 2, 1, 0, 3, 112, 38, }, + { 2, 1, 0, 3, 112, 36, }, { 1, 1, 0, 3, 112, 70, }, { 3, 1, 0, 3, 112, 68, }, - { 4, 1, 0, 3, 112, 64, }, - { 5, 1, 0, 3, 112, 38, }, + { 4, 1, 0, 3, 112, 66, }, + { 5, 1, 0, 3, 112, 36, }, { 6, 1, 0, 3, 112, 68, }, { 7, 1, 0, 3, 112, 30, }, { 8, 1, 0, 3, 112, 68, }, { 9, 1, 0, 3, 112, 127, }, { 0, 1, 0, 3, 116, 68, }, - { 2, 1, 0, 3, 116, 38, }, + { 2, 1, 0, 3, 116, 36, }, { 1, 1, 0, 3, 116, 70, }, { 3, 1, 0, 3, 116, 68, }, - { 4, 1, 0, 3, 116, 64, }, - { 5, 1, 0, 3, 116, 38, }, + { 4, 1, 0, 3, 116, 66, }, + { 5, 1, 0, 3, 116, 36, }, { 6, 1, 0, 3, 116, 68, }, { 7, 1, 0, 3, 116, 30, }, { 8, 1, 0, 3, 116, 68, }, { 9, 1, 0, 3, 116, 127, }, { 0, 1, 0, 3, 120, 68, }, - { 2, 1, 0, 3, 120, 38, }, + { 2, 1, 0, 3, 120, 36, }, { 1, 1, 0, 3, 120, 70, }, { 3, 1, 0, 3, 120, 127, }, - { 4, 1, 0, 3, 120, 64, }, + { 4, 1, 0, 3, 120, 66, }, { 5, 1, 0, 3, 120, 127, }, { 6, 1, 0, 3, 120, 68, }, { 7, 1, 0, 3, 120, 30, }, { 8, 1, 0, 3, 120, 68, }, { 9, 1, 0, 3, 120, 127, }, { 0, 1, 0, 3, 124, 68, }, - { 2, 1, 0, 3, 124, 38, }, + { 2, 1, 0, 3, 124, 36, }, { 1, 1, 0, 3, 124, 70, }, { 3, 1, 0, 3, 124, 127, }, - { 4, 1, 0, 3, 124, 64, }, + { 4, 1, 0, 3, 124, 66, }, { 5, 1, 0, 3, 124, 127, }, { 6, 1, 0, 3, 124, 68, }, { 7, 1, 0, 3, 124, 30, }, { 8, 1, 0, 3, 124, 68, }, { 9, 1, 0, 3, 124, 127, }, { 0, 1, 0, 3, 128, 68, }, - { 2, 1, 0, 3, 128, 38, }, + { 2, 1, 0, 3, 128, 36, }, { 1, 1, 0, 3, 128, 70, }, { 3, 1, 0, 3, 128, 127, }, - { 4, 1, 0, 3, 128, 64, }, + { 4, 1, 0, 3, 128, 66, }, { 5, 1, 0, 3, 128, 127, }, { 6, 1, 0, 3, 128, 68, }, { 7, 1, 0, 3, 128, 30, }, { 8, 1, 0, 3, 128, 68, }, { 9, 1, 0, 3, 128, 127, }, { 0, 1, 0, 3, 132, 68, }, - { 2, 1, 0, 3, 132, 38, }, + { 2, 1, 0, 3, 132, 36, }, { 1, 1, 0, 3, 132, 70, }, { 3, 1, 0, 3, 132, 68, }, - { 4, 1, 0, 3, 132, 64, }, - { 5, 1, 0, 3, 132, 38, }, + { 4, 1, 0, 3, 132, 66, }, + { 5, 1, 0, 3, 132, 36, }, { 6, 1, 0, 3, 132, 68, }, { 7, 1, 0, 3, 132, 30, }, { 8, 1, 0, 3, 132, 68, }, { 9, 1, 0, 3, 132, 127, }, { 0, 1, 0, 3, 136, 68, }, - { 2, 1, 0, 3, 136, 38, }, + { 2, 1, 0, 3, 136, 36, }, { 1, 1, 0, 3, 136, 70, }, { 3, 1, 0, 3, 136, 68, }, - { 4, 1, 0, 3, 136, 64, }, - { 5, 1, 0, 3, 136, 38, }, + { 4, 1, 0, 3, 136, 66, }, + { 5, 1, 0, 3, 136, 36, }, { 6, 1, 0, 3, 136, 68, }, { 7, 1, 0, 3, 136, 30, }, { 8, 1, 0, 3, 136, 68, }, { 9, 1, 0, 3, 136, 127, }, - { 0, 1, 0, 3, 140, 60, }, - { 2, 1, 0, 3, 140, 38, }, + { 0, 1, 0, 3, 140, 58, }, + { 2, 1, 0, 3, 140, 36, }, { 1, 1, 0, 3, 140, 70, }, - { 3, 1, 0, 3, 140, 60, }, - { 4, 1, 0, 3, 140, 64, }, - { 5, 1, 0, 3, 140, 38, }, + { 3, 1, 0, 3, 140, 58, }, + { 4, 1, 0, 3, 140, 66, }, + { 5, 1, 0, 3, 140, 36, }, { 6, 1, 0, 3, 140, 60, }, { 7, 1, 0, 3, 140, 30, }, { 8, 1, 0, 3, 140, 60, }, { 9, 1, 0, 3, 140, 127, }, { 0, 1, 0, 3, 144, 68, }, { 2, 1, 0, 3, 144, 127, }, - { 1, 1, 0, 3, 144, 127, }, + { 1, 1, 0, 3, 144, 70, }, { 3, 1, 0, 3, 144, 68, }, - { 4, 1, 0, 3, 144, 64, }, + { 4, 1, 0, 3, 144, 66, }, { 5, 1, 0, 3, 144, 127, }, { 6, 1, 0, 3, 144, 68, }, { 7, 1, 0, 3, 144, 127, }, { 8, 1, 0, 3, 144, 68, }, { 9, 1, 0, 3, 144, 127, }, { 0, 1, 0, 3, 149, 76, }, - { 2, 1, 0, 3, 149, -128, }, + { 2, 1, 0, 3, 149, 4, }, { 1, 1, 0, 3, 149, 127, }, { 3, 1, 0, 3, 149, 76, }, - { 4, 1, 0, 3, 149, 60, }, + { 4, 1, 0, 3, 149, 62, }, { 5, 1, 0, 3, 149, 76, }, { 6, 1, 0, 3, 149, 76, }, { 7, 1, 0, 3, 149, 30, }, { 8, 1, 0, 3, 149, 72, }, - { 9, 1, 0, 3, 149, -128, }, + { 9, 1, 0, 3, 149, 4, }, { 0, 1, 0, 3, 153, 76, }, - { 2, 1, 0, 3, 153, -128, }, + { 2, 1, 0, 3, 153, 4, }, { 1, 1, 0, 3, 153, 127, }, { 3, 1, 0, 3, 153, 76, }, - { 4, 1, 0, 3, 153, 60, }, + { 4, 1, 0, 3, 153, 62, }, { 5, 1, 0, 3, 153, 76, }, { 6, 1, 0, 3, 153, 76, }, { 7, 1, 0, 3, 153, 30, }, { 8, 1, 0, 3, 153, 76, }, - { 9, 1, 0, 3, 153, -128, }, + { 9, 1, 0, 3, 153, 4, }, { 0, 1, 0, 3, 157, 76, }, - { 2, 1, 0, 3, 157, -128, }, + { 2, 1, 0, 3, 157, 4, }, { 1, 1, 0, 3, 157, 127, }, { 3, 1, 0, 3, 157, 76, }, - { 4, 1, 0, 3, 157, 60, }, + { 4, 1, 0, 3, 157, 62, }, { 5, 1, 0, 3, 157, 76, }, { 6, 1, 0, 3, 157, 76, }, { 7, 1, 0, 3, 157, 30, }, { 8, 1, 0, 3, 157, 76, }, - { 9, 1, 0, 3, 157, -128, }, + { 9, 1, 0, 3, 157, 4, }, { 0, 1, 0, 3, 161, 76, }, - { 2, 1, 0, 3, 161, -128, }, + { 2, 1, 0, 3, 161, 4, }, { 1, 1, 0, 3, 161, 127, }, { 3, 1, 0, 3, 161, 76, }, - { 4, 1, 0, 3, 161, 60, }, + { 4, 1, 0, 3, 161, 62, }, { 5, 1, 0, 3, 161, 76, }, { 6, 1, 0, 3, 161, 76, }, { 7, 1, 0, 3, 161, 30, }, { 8, 1, 0, 3, 161, 76, }, - { 9, 1, 0, 3, 161, -128, }, + { 9, 1, 0, 3, 161, 4, }, { 0, 1, 0, 3, 165, 76, }, - { 2, 1, 0, 3, 165, -128, }, + { 2, 1, 0, 3, 165, 4, }, { 1, 1, 0, 3, 165, 127, }, { 3, 1, 0, 3, 165, 76, }, - { 4, 1, 0, 3, 165, 60, }, + { 4, 1, 0, 3, 165, 62, }, { 5, 1, 0, 3, 165, 76, }, { 6, 1, 0, 3, 165, 76, }, { 7, 1, 0, 3, 165, 30, }, { 8, 1, 0, 3, 165, 76, }, - { 9, 1, 0, 3, 165, -128, }, + { 9, 1, 0, 3, 165, 4, }, { 0, 1, 1, 2, 38, 66, }, { 2, 1, 1, 2, 38, 64, }, - { 1, 1, 1, 2, 38, 62, }, + { 1, 1, 1, 2, 38, 64, }, { 3, 1, 1, 2, 38, 64, }, - { 4, 1, 1, 2, 38, 72, }, + { 4, 1, 1, 2, 38, 64, }, { 5, 1, 1, 2, 38, 64, }, { 6, 1, 1, 2, 38, 64, }, { 7, 1, 1, 2, 38, 54, }, @@ -43379,9 +43379,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 9, 1, 1, 2, 38, 64, }, { 0, 1, 1, 2, 46, 72, }, { 2, 1, 1, 2, 46, 64, }, - { 1, 1, 1, 2, 46, 62, }, + { 1, 1, 1, 2, 46, 64, }, { 3, 1, 1, 2, 46, 64, }, - { 4, 1, 1, 2, 46, 60, }, + { 4, 1, 1, 2, 46, 70, }, { 5, 1, 1, 2, 46, 64, }, { 6, 1, 1, 2, 46, 64, }, { 7, 1, 1, 2, 46, 54, }, @@ -43389,7 +43389,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 9, 1, 1, 2, 46, 64, }, { 0, 1, 1, 2, 54, 72, }, { 2, 1, 1, 2, 54, 64, }, - { 1, 1, 1, 2, 54, 62, }, + { 1, 1, 1, 2, 54, 64, }, { 3, 1, 1, 2, 54, 64, }, { 4, 1, 1, 2, 54, 72, }, { 5, 1, 1, 2, 54, 64, }, @@ -43397,21 +43397,21 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 7, 1, 1, 2, 54, 54, }, { 8, 1, 1, 2, 54, 72, }, { 9, 1, 1, 2, 54, 64, }, - { 0, 1, 1, 2, 62, 64, }, + { 0, 1, 1, 2, 62, 60, }, { 2, 1, 1, 2, 62, 64, }, { 1, 1, 1, 2, 62, 62, }, - { 3, 1, 1, 2, 62, 64, }, - { 4, 1, 1, 2, 62, 70, }, + { 3, 1, 1, 2, 62, 60, }, + { 4, 1, 1, 2, 62, 60, }, { 5, 1, 1, 2, 62, 64, }, { 6, 1, 1, 2, 62, 64, }, { 7, 1, 1, 2, 62, 54, }, { 8, 1, 1, 2, 62, 64, }, { 9, 1, 1, 2, 62, 64, }, - { 0, 1, 1, 2, 102, 58, }, + { 0, 1, 1, 2, 102, 60, }, { 2, 1, 1, 2, 102, 64, }, { 1, 1, 1, 2, 102, 72, }, - { 3, 1, 1, 2, 102, 58, }, - { 4, 1, 1, 2, 102, 72, }, + { 3, 1, 1, 2, 102, 60, }, + { 4, 1, 1, 2, 102, 64, }, { 5, 1, 1, 2, 102, 64, }, { 6, 1, 1, 2, 102, 58, }, { 7, 1, 1, 2, 102, 54, }, @@ -43459,7 +43459,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 9, 1, 1, 2, 134, 127, }, { 0, 1, 1, 2, 142, 72, }, { 2, 1, 1, 2, 142, 127, }, - { 1, 1, 1, 2, 142, 127, }, + { 1, 1, 1, 2, 142, 72, }, { 3, 1, 1, 2, 142, 72, }, { 4, 1, 1, 2, 142, 72, }, { 5, 1, 1, 2, 142, 127, }, @@ -43468,7 +43468,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 8, 1, 1, 2, 142, 72, }, { 9, 1, 1, 2, 142, 127, }, { 0, 1, 1, 2, 151, 72, }, - { 2, 1, 1, 2, 151, -128, }, + { 2, 1, 1, 2, 151, 28, }, { 1, 1, 1, 2, 151, 127, }, { 3, 1, 1, 2, 151, 72, }, { 4, 1, 1, 2, 151, 72, }, @@ -43476,9 +43476,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 1, 2, 151, 72, }, { 7, 1, 1, 2, 151, 54, }, { 8, 1, 1, 2, 151, 72, }, - { 9, 1, 1, 2, 151, -128, }, + { 9, 1, 1, 2, 151, 28, }, { 0, 1, 1, 2, 159, 72, }, - { 2, 1, 1, 2, 159, -128, }, + { 2, 1, 1, 2, 159, 28, }, { 1, 1, 1, 2, 159, 127, }, { 3, 1, 1, 2, 159, 72, }, { 4, 1, 1, 2, 159, 72, }, @@ -43486,12 +43486,12 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 1, 2, 159, 72, }, { 7, 1, 1, 2, 159, 54, }, { 8, 1, 1, 2, 159, 72, }, - { 9, 1, 1, 2, 159, -128, }, + { 9, 1, 1, 2, 159, 28, }, { 0, 1, 1, 3, 38, 60, }, { 2, 1, 1, 3, 38, 40, }, { 1, 1, 1, 3, 38, 50, }, { 3, 1, 1, 3, 38, 40, }, - { 4, 1, 1, 3, 38, 62, }, + { 4, 1, 1, 3, 38, 54, }, { 5, 1, 1, 3, 38, 40, }, { 6, 1, 1, 3, 38, 52, }, { 7, 1, 1, 3, 38, 30, }, @@ -43501,7 +43501,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 46, 40, }, { 1, 1, 1, 3, 46, 50, }, { 3, 1, 1, 3, 46, 40, }, - { 4, 1, 1, 3, 46, 46, }, + { 4, 1, 1, 3, 46, 54, }, { 5, 1, 1, 3, 46, 40, }, { 6, 1, 1, 3, 46, 52, }, { 7, 1, 1, 3, 46, 30, }, @@ -43511,7 +43511,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 54, 40, }, { 1, 1, 1, 3, 54, 50, }, { 3, 1, 1, 3, 54, 40, }, - { 4, 1, 1, 3, 54, 62, }, + { 4, 1, 1, 3, 54, 66, }, { 5, 1, 1, 3, 54, 40, }, { 6, 1, 1, 3, 54, 68, }, { 7, 1, 1, 3, 54, 30, }, @@ -43521,17 +43521,17 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 62, 40, }, { 1, 1, 1, 3, 62, 48, }, { 3, 1, 1, 3, 62, 40, }, - { 4, 1, 1, 3, 62, 58, }, + { 4, 1, 1, 3, 62, 50, }, { 5, 1, 1, 3, 62, 40, }, { 6, 1, 1, 3, 62, 58, }, { 7, 1, 1, 3, 62, 30, }, { 8, 1, 1, 3, 62, 58, }, { 9, 1, 1, 3, 62, 40, }, - { 0, 1, 1, 3, 102, 54, }, + { 0, 1, 1, 3, 102, 56, }, { 2, 1, 1, 3, 102, 40, }, { 1, 1, 1, 3, 102, 70, }, - { 3, 1, 1, 3, 102, 54, }, - { 4, 1, 1, 3, 102, 64, }, + { 3, 1, 1, 3, 102, 56, }, + { 4, 1, 1, 3, 102, 54, }, { 5, 1, 1, 3, 102, 40, }, { 6, 1, 1, 3, 102, 54, }, { 7, 1, 1, 3, 102, 30, }, @@ -43541,7 +43541,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 110, 40, }, { 1, 1, 1, 3, 110, 70, }, { 3, 1, 1, 3, 110, 68, }, - { 4, 1, 1, 3, 110, 64, }, + { 4, 1, 1, 3, 110, 66, }, { 5, 1, 1, 3, 110, 40, }, { 6, 1, 1, 3, 110, 68, }, { 7, 1, 1, 3, 110, 30, }, @@ -43551,7 +43551,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 118, 40, }, { 1, 1, 1, 3, 118, 70, }, { 3, 1, 1, 3, 118, 127, }, - { 4, 1, 1, 3, 118, 64, }, + { 4, 1, 1, 3, 118, 66, }, { 5, 1, 1, 3, 118, 127, }, { 6, 1, 1, 3, 118, 68, }, { 7, 1, 1, 3, 118, 30, }, @@ -43561,7 +43561,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 126, 40, }, { 1, 1, 1, 3, 126, 70, }, { 3, 1, 1, 3, 126, 127, }, - { 4, 1, 1, 3, 126, 64, }, + { 4, 1, 1, 3, 126, 66, }, { 5, 1, 1, 3, 126, 127, }, { 6, 1, 1, 3, 126, 68, }, { 7, 1, 1, 3, 126, 30, }, @@ -43571,7 +43571,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 2, 1, 1, 3, 134, 40, }, { 1, 1, 1, 3, 134, 70, }, { 3, 1, 1, 3, 134, 68, }, - { 4, 1, 1, 3, 134, 64, }, + { 4, 1, 1, 3, 134, 66, }, { 5, 1, 1, 3, 134, 40, }, { 6, 1, 1, 3, 134, 68, }, { 7, 1, 1, 3, 134, 30, }, @@ -43579,16 +43579,16 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 9, 1, 1, 3, 134, 127, }, { 0, 1, 1, 3, 142, 68, }, { 2, 1, 1, 3, 142, 127, }, - { 1, 1, 1, 3, 142, 127, }, + { 1, 1, 1, 3, 142, 70, }, { 3, 1, 1, 3, 142, 68, }, - { 4, 1, 1, 3, 142, 64, }, + { 4, 1, 1, 3, 142, 66, }, { 5, 1, 1, 3, 142, 127, }, { 6, 1, 1, 3, 142, 68, }, { 7, 1, 1, 3, 142, 127, }, { 8, 1, 1, 3, 142, 68, }, { 9, 1, 1, 3, 142, 127, }, { 0, 1, 1, 3, 151, 72, }, - { 2, 1, 1, 3, 151, -128, }, + { 2, 1, 1, 3, 151, 4, }, { 1, 1, 1, 3, 151, 127, }, { 3, 1, 1, 3, 151, 72, }, { 4, 1, 1, 3, 151, 66, }, @@ -43596,9 +43596,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 1, 3, 151, 72, }, { 7, 1, 1, 3, 151, 30, }, { 8, 1, 1, 3, 151, 68, }, - { 9, 1, 1, 3, 151, -128, }, + { 9, 1, 1, 3, 151, 4, }, { 0, 1, 1, 3, 159, 72, }, - { 2, 1, 1, 3, 159, -128, }, + { 2, 1, 1, 3, 159, 4, }, { 1, 1, 1, 3, 159, 127, }, { 3, 1, 1, 3, 159, 72, }, { 4, 1, 1, 3, 159, 66, }, @@ -43606,32 +43606,32 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 6, 1, 1, 3, 159, 72, }, { 7, 1, 1, 3, 159, 30, }, { 8, 1, 1, 3, 159, 72, }, - { 9, 1, 1, 3, 159, -128, }, - { 0, 1, 2, 4, 42, 64, }, + { 9, 1, 1, 3, 159, 4, }, + { 0, 1, 2, 4, 42, 68, }, { 2, 1, 2, 4, 42, 64, }, { 1, 1, 2, 4, 42, 64, }, { 3, 1, 2, 4, 42, 64, }, - { 4, 1, 2, 4, 42, 68, }, + { 4, 1, 2, 4, 42, 60, }, { 5, 1, 2, 4, 42, 64, }, { 6, 1, 2, 4, 42, 64, }, { 7, 1, 2, 4, 42, 54, }, { 8, 1, 2, 4, 42, 62, }, { 9, 1, 2, 4, 42, 64, }, - { 0, 1, 2, 4, 58, 62, }, + { 0, 1, 2, 4, 58, 60, }, { 2, 1, 2, 4, 58, 64, }, { 1, 1, 2, 4, 58, 64, }, - { 3, 1, 2, 4, 58, 62, }, - { 4, 1, 2, 4, 58, 64, }, + { 3, 1, 2, 4, 58, 60, }, + { 4, 1, 2, 4, 58, 56, }, { 5, 1, 2, 4, 58, 64, }, { 6, 1, 2, 4, 58, 62, }, { 7, 1, 2, 4, 58, 54, }, { 8, 1, 2, 4, 58, 62, }, { 9, 1, 2, 4, 58, 64, }, - { 0, 1, 2, 4, 106, 58, }, + { 0, 1, 2, 4, 106, 60, }, { 2, 1, 2, 4, 106, 64, }, { 1, 1, 2, 4, 106, 72, }, - { 3, 1, 2, 4, 106, 58, }, - { 4, 1, 2, 4, 106, 66, }, + { 3, 1, 2, 4, 106, 60, }, + { 4, 1, 2, 4, 106, 58, }, { 5, 1, 2, 4, 106, 64, }, { 6, 1, 2, 4, 106, 58, }, { 7, 1, 2, 4, 106, 54, }, @@ -43649,84 +43649,84 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 9, 1, 2, 4, 122, 127, }, { 0, 1, 2, 4, 138, 72, }, { 2, 1, 2, 4, 138, 127, }, - { 1, 1, 2, 4, 138, 127, }, + { 1, 1, 2, 4, 138, 72, }, { 3, 1, 2, 4, 138, 72, }, - { 4, 1, 2, 4, 138, 68, }, + { 4, 1, 2, 4, 138, 70, }, { 5, 1, 2, 4, 138, 127, }, { 6, 1, 2, 4, 138, 72, }, { 7, 1, 2, 4, 138, 127, }, { 8, 1, 2, 4, 138, 72, }, { 9, 1, 2, 4, 138, 127, }, { 0, 1, 2, 4, 155, 72, }, - { 2, 1, 2, 4, 155, -128, }, + { 2, 1, 2, 4, 155, 28, }, { 1, 1, 2, 4, 155, 127, }, { 3, 1, 2, 4, 155, 72, }, - { 4, 1, 2, 4, 155, 68, }, + { 4, 1, 2, 4, 155, 62, }, { 5, 1, 2, 4, 155, 72, }, { 6, 1, 2, 4, 155, 72, }, { 7, 1, 2, 4, 155, 54, }, { 8, 1, 2, 4, 155, 68, }, - { 9, 1, 2, 4, 155, -128, }, - { 0, 1, 2, 5, 42, 54, }, + { 9, 1, 2, 4, 155, 28, }, + { 0, 1, 2, 5, 42, 56, }, { 2, 1, 2, 5, 42, 40, }, { 1, 1, 2, 5, 42, 50, }, { 3, 1, 2, 5, 42, 40, }, - { 4, 1, 2, 5, 42, 58, }, + { 4, 1, 2, 5, 42, 50, }, { 5, 1, 2, 5, 42, 40, }, { 6, 1, 2, 5, 42, 52, }, { 7, 1, 2, 5, 42, 30, }, { 8, 1, 2, 5, 42, 50, }, { 9, 1, 2, 5, 42, 40, }, - { 0, 1, 2, 5, 58, 52, }, + { 0, 1, 2, 5, 58, 54, }, { 2, 1, 2, 5, 58, 40, }, { 1, 1, 2, 5, 58, 50, }, { 3, 1, 2, 5, 58, 40, }, - { 4, 1, 2, 5, 58, 56, }, + { 4, 1, 2, 5, 58, 46, }, { 5, 1, 2, 5, 58, 40, }, { 6, 1, 2, 5, 58, 52, }, { 7, 1, 2, 5, 58, 30, }, { 8, 1, 2, 5, 58, 52, }, { 9, 1, 2, 5, 58, 40, }, - { 0, 1, 2, 5, 106, 50, }, + { 0, 1, 2, 5, 106, 48, }, { 2, 1, 2, 5, 106, 40, }, { 1, 1, 2, 5, 106, 72, }, - { 3, 1, 2, 5, 106, 50, }, - { 4, 1, 2, 5, 106, 56, }, + { 3, 1, 2, 5, 106, 48, }, + { 4, 1, 2, 5, 106, 50, }, { 5, 1, 2, 5, 106, 40, }, { 6, 1, 2, 5, 106, 50, }, { 7, 1, 2, 5, 106, 30, }, { 8, 1, 2, 5, 106, 50, }, { 9, 1, 2, 5, 106, 127, }, - { 0, 1, 2, 5, 122, 66, }, + { 0, 1, 2, 5, 122, 70, }, { 2, 1, 2, 5, 122, 40, }, { 1, 1, 2, 5, 122, 72, }, { 3, 1, 2, 5, 122, 127, }, - { 4, 1, 2, 5, 122, 56, }, + { 4, 1, 2, 5, 122, 62, }, { 5, 1, 2, 5, 122, 127, }, { 6, 1, 2, 5, 122, 66, }, { 7, 1, 2, 5, 122, 30, }, { 8, 1, 2, 5, 122, 66, }, { 9, 1, 2, 5, 122, 127, }, - { 0, 1, 2, 5, 138, 66, }, + { 0, 1, 2, 5, 138, 70, }, { 2, 1, 2, 5, 138, 127, }, - { 1, 1, 2, 5, 138, 127, }, - { 3, 1, 2, 5, 138, 66, }, - { 4, 1, 2, 5, 138, 58, }, + { 1, 1, 2, 5, 138, 72, }, + { 3, 1, 2, 5, 138, 70, }, + { 4, 1, 2, 5, 138, 62, }, { 5, 1, 2, 5, 138, 127, }, { 6, 1, 2, 5, 138, 66, }, { 7, 1, 2, 5, 138, 127, }, { 8, 1, 2, 5, 138, 66, }, { 9, 1, 2, 5, 138, 127, }, - { 0, 1, 2, 5, 155, 62, }, - { 2, 1, 2, 5, 155, -128, }, + { 0, 1, 2, 5, 155, 72, }, + { 2, 1, 2, 5, 155, 4, }, { 1, 1, 2, 5, 155, 127, }, - { 3, 1, 2, 5, 155, 62, }, - { 4, 1, 2, 5, 155, 58, }, + { 3, 1, 2, 5, 155, 72, }, + { 4, 1, 2, 5, 155, 52, }, { 5, 1, 2, 5, 155, 72, }, { 6, 1, 2, 5, 155, 62, }, { 7, 1, 2, 5, 155, 30, }, { 8, 1, 2, 5, 155, 62, }, - { 9, 1, 2, 5, 155, -128, }, + { 9, 1, 2, 5, 155, 4, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type5); diff --git a/drivers/net/wireless/rsi/rsi_91x_ps.c b/drivers/net/wireless/rsi/rsi_91x_ps.c index fdaa5a7260dd..a02904921729 100644 --- a/drivers/net/wireless/rsi/rsi_91x_ps.c +++ b/drivers/net/wireless/rsi/rsi_91x_ps.c @@ -16,7 +16,6 @@ #include <linux/etherdevice.h> #include <linux/if.h> -#include <linux/version.h> #include "rsi_debugfs.h" #include "rsi_mgmt.h" #include "rsi_common.h" diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index fe0287b22a25..e0c502bc4270 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -1513,7 +1513,7 @@ static int rsi_restore(struct device *dev) } static const struct dev_pm_ops rsi_pm_ops = { .suspend = rsi_suspend, - .resume = rsi_resume, + .resume_noirq = rsi_resume, .freeze = rsi_freeze, .thaw = rsi_thaw, .restore = rsi_restore, diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h index c1cf19d1e376..30e03aa6a529 100644 --- a/drivers/net/wireless/rsi/rsi_boot_params.h +++ b/drivers/net/wireless/rsi/rsi_boot_params.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_coex.h b/drivers/net/wireless/rsi/rsi_coex.h index 0fdc67f37a56..2c14e4c651b9 100644 --- a/drivers/net/wireless/rsi/rsi_coex.h +++ b/drivers/net/wireless/rsi/rsi_coex.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2018 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h index 60f1f286b030..7aa5124575cf 100644 --- a/drivers/net/wireless/rsi/rsi_common.h +++ b/drivers/net/wireless/rsi/rsi_common.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h index 580ad3b3f710..a6a28640ad40 100644 --- a/drivers/net/wireless/rsi/rsi_debugfs.h +++ b/drivers/net/wireless/rsi/rsi_debugfs.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index 46e36df9e8e3..d044a440fa08 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2017 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 73a19e43106b..a1065e5a92b4 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 2ce2dcf57441..236b21482f38 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_ps.h b/drivers/net/wireless/rsi/rsi_ps.h index 98ff6a4ced57..0be2f1e201e5 100644 --- a/drivers/net/wireless/rsi/rsi_ps.h +++ b/drivers/net/wireless/rsi/rsi_ps.h @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2017 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 1c756263cf15..7c91b126b350 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -1,4 +1,4 @@ -/** +/* * @section LICENSE * Copyright (c) 2014 Redpine Signals Inc. * diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 8702f434b569..254d19b66412 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -1,4 +1,4 @@ -/** +/* * @section LICENSE * Copyright (c) 2014 Redpine Signals Inc. * diff --git a/drivers/net/wireless/st/cw1200/bh.c b/drivers/net/wireless/st/cw1200/bh.c index c364a3987618..8bade5d89f12 100644 --- a/drivers/net/wireless/st/cw1200/bh.c +++ b/drivers/net/wireless/st/cw1200/bh.c @@ -42,9 +42,6 @@ enum cw1200_bh_pm_state { CW1200_BH_RESUME, }; -typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv, - u8 *data, size_t size); - static void cw1200_bh_work(struct work_struct *work) { struct cw1200_common *priv = diff --git a/drivers/net/wireless/st/cw1200/wsm.h b/drivers/net/wireless/st/cw1200/wsm.h index 1ffa47994bb9..89fdc9115e9d 100644 --- a/drivers/net/wireless/st/cw1200/wsm.h +++ b/drivers/net/wireless/st/cw1200/wsm.h @@ -785,8 +785,6 @@ struct wsm_tx_confirm { }; /* 3.15 */ -typedef void (*wsm_tx_confirm_cb) (struct cw1200_common *priv, - struct wsm_tx_confirm *arg); /* Note that ideology of wsm_tx struct is different against the rest of * WSM API. wsm_hdr is /not/ a caller-adapted struct to be used as an input @@ -862,9 +860,6 @@ struct wsm_rx { /* = sizeof(generic hi hdr) + sizeof(wsm hdr) */ #define WSM_RX_EXTRA_HEADROOM (16) -typedef void (*wsm_rx_cb) (struct cw1200_common *priv, struct wsm_rx *arg, - struct sk_buff **skb_p); - /* 3.17 */ struct wsm_event { /* WSM_STATUS_... */ @@ -1180,8 +1175,6 @@ struct wsm_switch_channel { int wsm_switch_channel(struct cw1200_common *priv, const struct wsm_switch_channel *arg); -typedef void (*wsm_channel_switch_cb) (struct cw1200_common *priv); - #define WSM_START_REQ_ID 0x0017 #define WSM_START_RESP_ID 0x0417 @@ -1240,8 +1233,6 @@ int wsm_start_find(struct cw1200_common *priv); int wsm_stop_find(struct cw1200_common *priv); -typedef void (*wsm_find_complete_cb) (struct cw1200_common *priv, u32 status); - struct wsm_suspend_resume { /* See 3.52 */ /* Link ID */ @@ -1256,9 +1247,6 @@ struct wsm_suspend_resume { /* [out] */ int queue; }; -typedef void (*wsm_suspend_resume_cb) (struct cw1200_common *priv, - struct wsm_suspend_resume *arg); - /* 3.54 Update-IE request. */ struct wsm_update_ie { /* WSM_UPDATE_IE_... */ diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c index e14d88e558f0..85abd0a2d1c9 100644 --- a/drivers/net/wireless/ti/wlcore/boot.c +++ b/drivers/net/wireless/ti/wlcore/boot.c @@ -72,6 +72,7 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl) unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ? wl->min_mr_fw_ver : wl->min_sr_fw_ver; char min_fw_str[32] = ""; + int off = 0; int i; /* the chip must be exactly equal */ @@ -105,13 +106,15 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl) return 0; fail: - for (i = 0; i < NUM_FW_VER; i++) + for (i = 0; i < NUM_FW_VER && off < sizeof(min_fw_str); i++) if (min_ver[i] == WLCORE_FW_VER_IGNORE) - snprintf(min_fw_str, sizeof(min_fw_str), - "%s*.", min_fw_str); + off += snprintf(min_fw_str + off, + sizeof(min_fw_str) - off, + "*."); else - snprintf(min_fw_str, sizeof(min_fw_str), - "%s%u.", min_fw_str, min_ver[i]); + off += snprintf(min_fw_str + off, + sizeof(min_fw_str) - off, + "%u.", min_ver[i]); wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n" "Please use at least FW %s\n" diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h index b143293e694f..715edfa5f89f 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.h +++ b/drivers/net/wireless/ti/wlcore/debugfs.h @@ -78,13 +78,14 @@ static ssize_t sub## _ ##name## _read(struct file *file, \ struct wl1271 *wl = file->private_data; \ struct struct_type *stats = wl->stats.fw_stats; \ char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = ""; \ + int pos = 0; \ int i; \ \ wl1271_debugfs_update_stats(wl); \ \ - for (i = 0; i < len; i++) \ - snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \ - buf, i, stats->sub.name[i]); \ + for (i = 0; i < len && pos < sizeof(buf); i++) \ + pos += snprintf(buf + pos, sizeof(buf), \ + "[%d] = %d\n", i, stats->sub.name[i]); \ \ return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \ } \ diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h index e98e04ee9a2c..5779ffbe5d0f 100644 --- a/drivers/net/wireless/wl3501.h +++ b/drivers/net/wireless/wl3501.h @@ -240,7 +240,7 @@ struct iw_mgmt_essid_pset { } __packed; /* - * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly + * According to 802.11 Wireless Networks, the definitive guide - O'Reilly * Pg 75 */ #define IW_DATA_RATE_MAX_LABELS 8 diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig new file mode 100644 index 000000000000..7ad1920120bc --- /dev/null +++ b/drivers/net/wwan/Kconfig @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Wireless WAN device configuration +# + +menuconfig WWAN + bool "Wireless WAN" + help + This section contains Wireless WAN configuration for WWAN framework + and drivers. + +if WWAN + +config WWAN_CORE + tristate "WWAN Driver Core" + help + Say Y here if you want to use the WWAN driver core. This driver + provides a common framework for WWAN drivers. + + To compile this driver as a module, choose M here: the module will be + called wwan. + +config MHI_WWAN_CTRL + tristate "MHI WWAN control driver for QCOM-based PCIe modems" + select WWAN_CORE + depends on MHI_BUS + help + MHI WWAN CTRL allows QCOM-based PCIe modems to expose different modem + control protocols/ports to userspace, including AT, MBIM, QMI, DIAG + and FIREHOSE. These protocols can be accessed directly from userspace + (e.g. AT commands) or via libraries/tools (e.g. libmbim, libqmi, + libqcdm...). + + To compile this driver as a module, choose M here: the module will be + called mhi_wwan_ctrl. + +endif # WWAN diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile new file mode 100644 index 000000000000..556cd90958ca --- /dev/null +++ b/drivers/net/wwan/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux WWAN device drivers. +# + +obj-$(CONFIG_WWAN_CORE) += wwan.o +wwan-objs += wwan_core.o + +obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c new file mode 100644 index 000000000000..11475ade4be5 --- /dev/null +++ b/drivers/net/wwan/mhi_wwan_ctrl.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */ +#include <linux/kernel.h> +#include <linux/mhi.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/wwan.h> + +/* MHI wwan flags */ +#define MHI_WWAN_DL_CAP BIT(0) +#define MHI_WWAN_UL_CAP BIT(1) +#define MHI_WWAN_RX_REFILL BIT(2) + +#define MHI_WWAN_MAX_MTU 0x8000 + +struct mhi_wwan_dev { + /* Lower level is a mhi dev, upper level is a wwan port */ + struct mhi_device *mhi_dev; + struct wwan_port *wwan_port; + + /* State and capabilities */ + unsigned long flags; + size_t mtu; + + /* Protect against concurrent TX and TX-completion (bh) */ + spinlock_t tx_lock; + + /* Protect RX budget and rx_refill scheduling */ + spinlock_t rx_lock; + struct work_struct rx_refill; + + /* RX budget is initially set to the size of the MHI RX queue and is + * used to limit the number of allocated and queued packets. It is + * decremented on data queueing and incremented on data release. + */ + unsigned int rx_budget; +}; + +/* Increment RX budget and schedule RX refill if necessary */ +static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan) +{ + spin_lock(&mhiwwan->rx_lock); + + mhiwwan->rx_budget++; + + if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags)) + schedule_work(&mhiwwan->rx_refill); + + spin_unlock(&mhiwwan->rx_lock); +} + +/* Decrement RX budget if non-zero and return true on success */ +static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan) +{ + bool ret = false; + + spin_lock(&mhiwwan->rx_lock); + + if (mhiwwan->rx_budget) + mhiwwan->rx_budget--; + + if (mhiwwan->rx_budget && test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags)) + ret = true; + + spin_unlock(&mhiwwan->rx_lock); + + return ret; +} + +static void __mhi_skb_destructor(struct sk_buff *skb) +{ + /* RX buffer has been consumed, increase the allowed budget */ + mhi_wwan_rx_budget_inc(skb_shinfo(skb)->destructor_arg); +} + +static void mhi_wwan_ctrl_refill_work(struct work_struct *work) +{ + struct mhi_wwan_dev *mhiwwan = container_of(work, struct mhi_wwan_dev, rx_refill); + struct mhi_device *mhi_dev = mhiwwan->mhi_dev; + + while (mhi_wwan_rx_budget_dec(mhiwwan)) { + struct sk_buff *skb; + + skb = alloc_skb(mhiwwan->mtu, GFP_KERNEL); + if (!skb) { + mhi_wwan_rx_budget_inc(mhiwwan); + break; + } + + /* To prevent unlimited buffer allocation if nothing consumes + * the RX buffers (passed to WWAN core), track their lifespan + * to not allocate more than allowed budget. + */ + skb->destructor = __mhi_skb_destructor; + skb_shinfo(skb)->destructor_arg = mhiwwan; + + if (mhi_queue_skb(mhi_dev, DMA_FROM_DEVICE, skb, mhiwwan->mtu, MHI_EOT)) { + dev_err(&mhi_dev->dev, "Failed to queue buffer\n"); + kfree_skb(skb); + break; + } + } +} + +static int mhi_wwan_ctrl_start(struct wwan_port *port) +{ + struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port); + int ret; + + /* Start mhi device's channel(s) */ + ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev); + if (ret) + return ret; + + /* Don't allocate more buffers than MHI channel queue size */ + mhiwwan->rx_budget = mhi_get_free_desc_count(mhiwwan->mhi_dev, DMA_FROM_DEVICE); + + /* Add buffers to the MHI inbound queue */ + if (test_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags)) { + set_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags); + mhi_wwan_ctrl_refill_work(&mhiwwan->rx_refill); + } + + return 0; +} + +static void mhi_wwan_ctrl_stop(struct wwan_port *port) +{ + struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port); + + spin_lock(&mhiwwan->rx_lock); + clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags); + spin_unlock(&mhiwwan->rx_lock); + + cancel_work_sync(&mhiwwan->rx_refill); + + mhi_unprepare_from_transfer(mhiwwan->mhi_dev); +} + +static int mhi_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) +{ + struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port); + int ret; + + if (skb->len > mhiwwan->mtu) + return -EMSGSIZE; + + if (!test_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags)) + return -EOPNOTSUPP; + + /* Queue the packet for MHI transfer and check fullness of the queue */ + spin_lock_bh(&mhiwwan->tx_lock); + ret = mhi_queue_skb(mhiwwan->mhi_dev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); + if (mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE)) + wwan_port_txoff(port); + spin_unlock_bh(&mhiwwan->tx_lock); + + return ret; +} + +static const struct wwan_port_ops wwan_pops = { + .start = mhi_wwan_ctrl_start, + .stop = mhi_wwan_ctrl_stop, + .tx = mhi_wwan_ctrl_tx, +}; + +static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev); + struct wwan_port *port = mhiwwan->wwan_port; + struct sk_buff *skb = mhi_result->buf_addr; + + dev_dbg(&mhi_dev->dev, "%s: status: %d xfer_len: %zu\n", __func__, + mhi_result->transaction_status, mhi_result->bytes_xferd); + + /* MHI core has done with the buffer, release it */ + consume_skb(skb); + + /* There is likely new slot available in the MHI queue, re-allow TX */ + spin_lock_bh(&mhiwwan->tx_lock); + if (!mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE)) + wwan_port_txon(port); + spin_unlock_bh(&mhiwwan->tx_lock); +} + +static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev); + struct wwan_port *port = mhiwwan->wwan_port; + struct sk_buff *skb = mhi_result->buf_addr; + + dev_dbg(&mhi_dev->dev, "%s: status: %d receive_len: %zu\n", __func__, + mhi_result->transaction_status, mhi_result->bytes_xferd); + + if (mhi_result->transaction_status && + mhi_result->transaction_status != -EOVERFLOW) { + kfree_skb(skb); + return; + } + + /* MHI core does not update skb->len, do it before forward */ + skb_put(skb, mhi_result->bytes_xferd); + wwan_port_rx(port, skb); + + /* Do not increment rx budget nor refill RX buffers now, wait for the + * buffer to be consumed. Done from __mhi_skb_destructor(). + */ +} + +static int mhi_wwan_ctrl_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; + struct mhi_wwan_dev *mhiwwan; + struct wwan_port *port; + + mhiwwan = kzalloc(sizeof(*mhiwwan), GFP_KERNEL); + if (!mhiwwan) + return -ENOMEM; + + mhiwwan->mhi_dev = mhi_dev; + mhiwwan->mtu = MHI_WWAN_MAX_MTU; + INIT_WORK(&mhiwwan->rx_refill, mhi_wwan_ctrl_refill_work); + spin_lock_init(&mhiwwan->tx_lock); + spin_lock_init(&mhiwwan->rx_lock); + + if (mhi_dev->dl_chan) + set_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags); + if (mhi_dev->ul_chan) + set_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags); + + dev_set_drvdata(&mhi_dev->dev, mhiwwan); + + /* Register as a wwan port, id->driver_data contains wwan port type */ + port = wwan_create_port(&cntrl->mhi_dev->dev, id->driver_data, + &wwan_pops, mhiwwan); + if (IS_ERR(port)) { + kfree(mhiwwan); + return PTR_ERR(port); + } + + mhiwwan->wwan_port = port; + + return 0; +}; + +static void mhi_wwan_ctrl_remove(struct mhi_device *mhi_dev) +{ + struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev); + + wwan_remove_port(mhiwwan->wwan_port); + kfree(mhiwwan); +} + +static const struct mhi_device_id mhi_wwan_ctrl_match_table[] = { + { .chan = "DUN", .driver_data = WWAN_PORT_AT }, + { .chan = "MBIM", .driver_data = WWAN_PORT_MBIM }, + { .chan = "QMI", .driver_data = WWAN_PORT_QMI }, + { .chan = "DIAG", .driver_data = WWAN_PORT_QCDM }, + { .chan = "FIREHOSE", .driver_data = WWAN_PORT_FIREHOSE }, + {}, +}; +MODULE_DEVICE_TABLE(mhi, mhi_wwan_ctrl_match_table); + +static struct mhi_driver mhi_wwan_ctrl_driver = { + .id_table = mhi_wwan_ctrl_match_table, + .remove = mhi_wwan_ctrl_remove, + .probe = mhi_wwan_ctrl_probe, + .ul_xfer_cb = mhi_ul_xfer_cb, + .dl_xfer_cb = mhi_dl_xfer_cb, + .driver = { + .name = "mhi_wwan_ctrl", + }, +}; + +module_mhi_driver(mhi_wwan_ctrl_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI WWAN CTRL Driver"); +MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c new file mode 100644 index 000000000000..b618b7937846 --- /dev/null +++ b/drivers/net/wwan/wwan_core.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */ + +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/idr.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/poll.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/wwan.h> + +#define WWAN_MAX_MINORS 256 /* 256 minors allowed with register_chrdev() */ + +static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */ +static DEFINE_IDA(minors); /* minors for WWAN port chardevs */ +static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */ +static struct class *wwan_class; +static int wwan_major; + +#define to_wwan_dev(d) container_of(d, struct wwan_device, dev) +#define to_wwan_port(d) container_of(d, struct wwan_port, dev) + +/* WWAN port flags */ +#define WWAN_PORT_TX_OFF BIT(0) + +/** + * struct wwan_device - The structure that defines a WWAN device + * + * @id: WWAN device unique ID. + * @dev: Underlying device. + * @port_id: Current available port ID to pick. + */ +struct wwan_device { + unsigned int id; + struct device dev; + atomic_t port_id; +}; + +/** + * struct wwan_port - The structure that defines a WWAN port + * @type: Port type + * @start_count: Port start counter + * @flags: Store port state and capabilities + * @ops: Pointer to WWAN port operations + * @ops_lock: Protect port ops + * @dev: Underlying device + * @rxq: Buffer inbound queue + * @waitqueue: The waitqueue for port fops (read/write/poll) + */ +struct wwan_port { + enum wwan_port_type type; + unsigned int start_count; + unsigned long flags; + const struct wwan_port_ops *ops; + struct mutex ops_lock; /* Serialize ops + protect against removal */ + struct device dev; + struct sk_buff_head rxq; + wait_queue_head_t waitqueue; +}; + +static void wwan_dev_destroy(struct device *dev) +{ + struct wwan_device *wwandev = to_wwan_dev(dev); + + ida_free(&wwan_dev_ids, wwandev->id); + kfree(wwandev); +} + +static const struct device_type wwan_dev_type = { + .name = "wwan_dev", + .release = wwan_dev_destroy, +}; + +static int wwan_dev_parent_match(struct device *dev, const void *parent) +{ + return (dev->type == &wwan_dev_type && dev->parent == parent); +} + +static struct wwan_device *wwan_dev_get_by_parent(struct device *parent) +{ + struct device *dev; + + dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match); + if (!dev) + return ERR_PTR(-ENODEV); + + return to_wwan_dev(dev); +} + +/* This function allocates and registers a new WWAN device OR if a WWAN device + * already exist for the given parent, it gets a reference and return it. + * This function is not exported (for now), it is called indirectly via + * wwan_create_port(). + */ +static struct wwan_device *wwan_create_dev(struct device *parent) +{ + struct wwan_device *wwandev; + int err, id; + + /* The 'find-alloc-register' operation must be protected against + * concurrent execution, a WWAN device is possibly shared between + * multiple callers or concurrently unregistered from wwan_remove_dev(). + */ + mutex_lock(&wwan_register_lock); + + /* If wwandev already exists, return it */ + wwandev = wwan_dev_get_by_parent(parent); + if (!IS_ERR(wwandev)) + goto done_unlock; + + id = ida_alloc(&wwan_dev_ids, GFP_KERNEL); + if (id < 0) + goto done_unlock; + + wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL); + if (!wwandev) { + ida_free(&wwan_dev_ids, id); + goto done_unlock; + } + + wwandev->dev.parent = parent; + wwandev->dev.class = wwan_class; + wwandev->dev.type = &wwan_dev_type; + wwandev->id = id; + dev_set_name(&wwandev->dev, "wwan%d", wwandev->id); + + err = device_register(&wwandev->dev); + if (err) { + put_device(&wwandev->dev); + wwandev = NULL; + } + +done_unlock: + mutex_unlock(&wwan_register_lock); + + return wwandev; +} + +static int is_wwan_child(struct device *dev, void *data) +{ + return dev->class == wwan_class; +} + +static void wwan_remove_dev(struct wwan_device *wwandev) +{ + int ret; + + /* Prevent concurrent picking from wwan_create_dev */ + mutex_lock(&wwan_register_lock); + + /* WWAN device is created and registered (get+add) along with its first + * child port, and subsequent port registrations only grab a reference + * (get). The WWAN device must then be unregistered (del+put) along with + * its latest port, and reference simply dropped (put) otherwise. + */ + ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child); + if (!ret) + device_unregister(&wwandev->dev); + else + put_device(&wwandev->dev); + + mutex_unlock(&wwan_register_lock); +} + +/* ------- WWAN port management ------- */ + +static void wwan_port_destroy(struct device *dev) +{ + struct wwan_port *port = to_wwan_port(dev); + + ida_free(&minors, MINOR(port->dev.devt)); + skb_queue_purge(&port->rxq); + mutex_destroy(&port->ops_lock); + kfree(port); +} + +static const struct device_type wwan_port_dev_type = { + .name = "wwan_port", + .release = wwan_port_destroy, +}; + +static int wwan_port_minor_match(struct device *dev, const void *minor) +{ + return (dev->type == &wwan_port_dev_type && + MINOR(dev->devt) == *(unsigned int *)minor); +} + +static struct wwan_port *wwan_port_get_by_minor(unsigned int minor) +{ + struct device *dev; + + dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match); + if (!dev) + return ERR_PTR(-ENODEV); + + return to_wwan_port(dev); +} + +/* Keep aligned with wwan_port_type enum */ +static const char * const wwan_port_type_str[] = { + "AT", + "MBIM", + "QMI", + "QCDM", + "FIREHOSE" +}; + +struct wwan_port *wwan_create_port(struct device *parent, + enum wwan_port_type type, + const struct wwan_port_ops *ops, + void *drvdata) +{ + struct wwan_device *wwandev; + struct wwan_port *port; + int minor, err = -ENOMEM; + + if (type >= WWAN_PORT_MAX || !ops) + return ERR_PTR(-EINVAL); + + /* A port is always a child of a WWAN device, retrieve (allocate or + * pick) the WWAN device based on the provided parent device. + */ + wwandev = wwan_create_dev(parent); + if (IS_ERR(wwandev)) + return ERR_CAST(wwandev); + + /* A port is exposed as character device, get a minor */ + minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL); + if (minor < 0) + goto error_wwandev_remove; + + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) { + ida_free(&minors, minor); + goto error_wwandev_remove; + } + + port->type = type; + port->ops = ops; + mutex_init(&port->ops_lock); + skb_queue_head_init(&port->rxq); + init_waitqueue_head(&port->waitqueue); + + port->dev.parent = &wwandev->dev; + port->dev.class = wwan_class; + port->dev.type = &wwan_port_dev_type; + port->dev.devt = MKDEV(wwan_major, minor); + dev_set_drvdata(&port->dev, drvdata); + + /* create unique name based on wwan device id, port index and type */ + dev_set_name(&port->dev, "wwan%up%u%s", wwandev->id, + atomic_inc_return(&wwandev->port_id), + wwan_port_type_str[port->type]); + + err = device_register(&port->dev); + if (err) + goto error_put_device; + + return port; + +error_put_device: + put_device(&port->dev); +error_wwandev_remove: + wwan_remove_dev(wwandev); + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(wwan_create_port); + +void wwan_remove_port(struct wwan_port *port) +{ + struct wwan_device *wwandev = to_wwan_dev(port->dev.parent); + + mutex_lock(&port->ops_lock); + if (port->start_count) + port->ops->stop(port); + port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */ + mutex_unlock(&port->ops_lock); + + wake_up_interruptible(&port->waitqueue); + + skb_queue_purge(&port->rxq); + dev_set_drvdata(&port->dev, NULL); + device_unregister(&port->dev); + + /* Release related wwan device */ + wwan_remove_dev(wwandev); +} +EXPORT_SYMBOL_GPL(wwan_remove_port); + +void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb) +{ + skb_queue_tail(&port->rxq, skb); + wake_up_interruptible(&port->waitqueue); +} +EXPORT_SYMBOL_GPL(wwan_port_rx); + +void wwan_port_txon(struct wwan_port *port) +{ + clear_bit(WWAN_PORT_TX_OFF, &port->flags); + wake_up_interruptible(&port->waitqueue); +} +EXPORT_SYMBOL_GPL(wwan_port_txon); + +void wwan_port_txoff(struct wwan_port *port) +{ + set_bit(WWAN_PORT_TX_OFF, &port->flags); +} +EXPORT_SYMBOL_GPL(wwan_port_txoff); + +void *wwan_port_get_drvdata(struct wwan_port *port) +{ + return dev_get_drvdata(&port->dev); +} +EXPORT_SYMBOL_GPL(wwan_port_get_drvdata); + +static int wwan_port_op_start(struct wwan_port *port) +{ + int ret = 0; + + mutex_lock(&port->ops_lock); + if (!port->ops) { /* Port got unplugged */ + ret = -ENODEV; + goto out_unlock; + } + + /* If port is already started, don't start again */ + if (!port->start_count) + ret = port->ops->start(port); + + if (!ret) + port->start_count++; + +out_unlock: + mutex_unlock(&port->ops_lock); + + return ret; +} + +static void wwan_port_op_stop(struct wwan_port *port) +{ + mutex_lock(&port->ops_lock); + port->start_count--; + if (port->ops && !port->start_count) + port->ops->stop(port); + mutex_unlock(&port->ops_lock); +} + +static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb) +{ + int ret; + + mutex_lock(&port->ops_lock); + if (!port->ops) { /* Port got unplugged */ + ret = -ENODEV; + goto out_unlock; + } + + ret = port->ops->tx(port, skb); + +out_unlock: + mutex_unlock(&port->ops_lock); + + return ret; +} + +static bool is_read_blocked(struct wwan_port *port) +{ + return skb_queue_empty(&port->rxq) && port->ops; +} + +static bool is_write_blocked(struct wwan_port *port) +{ + return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops; +} + +static int wwan_wait_rx(struct wwan_port *port, bool nonblock) +{ + if (!is_read_blocked(port)) + return 0; + + if (nonblock) + return -EAGAIN; + + if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port))) + return -ERESTARTSYS; + + return 0; +} + +static int wwan_wait_tx(struct wwan_port *port, bool nonblock) +{ + if (!is_write_blocked(port)) + return 0; + + if (nonblock) + return -EAGAIN; + + if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port))) + return -ERESTARTSYS; + + return 0; +} + +static int wwan_port_fops_open(struct inode *inode, struct file *file) +{ + struct wwan_port *port; + int err = 0; + + port = wwan_port_get_by_minor(iminor(inode)); + if (IS_ERR(port)) + return PTR_ERR(port); + + file->private_data = port; + stream_open(inode, file); + + err = wwan_port_op_start(port); + if (err) + put_device(&port->dev); + + return err; +} + +static int wwan_port_fops_release(struct inode *inode, struct file *filp) +{ + struct wwan_port *port = filp->private_data; + + wwan_port_op_stop(port); + put_device(&port->dev); + + return 0; +} + +static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct wwan_port *port = filp->private_data; + struct sk_buff *skb; + size_t copied; + int ret; + + ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK)); + if (ret) + return ret; + + skb = skb_dequeue(&port->rxq); + if (!skb) + return -EIO; + + copied = min_t(size_t, count, skb->len); + if (copy_to_user(buf, skb->data, copied)) { + kfree_skb(skb); + return -EFAULT; + } + skb_pull(skb, copied); + + /* skb is not fully consumed, keep it in the queue */ + if (skb->len) + skb_queue_head(&port->rxq, skb); + else + consume_skb(skb); + + return copied; +} + +static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf, + size_t count, loff_t *offp) +{ + struct wwan_port *port = filp->private_data; + struct sk_buff *skb; + int ret; + + ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK)); + if (ret) + return ret; + + skb = alloc_skb(count, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + if (copy_from_user(skb_put(skb, count), buf, count)) { + kfree_skb(skb); + return -EFAULT; + } + + ret = wwan_port_op_tx(port, skb); + if (ret) { + kfree_skb(skb); + return ret; + } + + return count; +} + +static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait) +{ + struct wwan_port *port = filp->private_data; + __poll_t mask = 0; + + poll_wait(filp, &port->waitqueue, wait); + + if (!is_write_blocked(port)) + mask |= EPOLLOUT | EPOLLWRNORM; + if (!is_read_blocked(port)) + mask |= EPOLLIN | EPOLLRDNORM; + + return mask; +} + +static const struct file_operations wwan_port_fops = { + .owner = THIS_MODULE, + .open = wwan_port_fops_open, + .release = wwan_port_fops_release, + .read = wwan_port_fops_read, + .write = wwan_port_fops_write, + .poll = wwan_port_fops_poll, + .llseek = noop_llseek, +}; + +static int __init wwan_init(void) +{ + wwan_class = class_create(THIS_MODULE, "wwan"); + if (IS_ERR(wwan_class)) + return PTR_ERR(wwan_class); + + /* chrdev used for wwan ports */ + wwan_major = register_chrdev(0, "wwan_port", &wwan_port_fops); + if (wwan_major < 0) { + class_destroy(wwan_class); + return wwan_major; + } + + return 0; +} + +static void __exit wwan_exit(void) +{ + unregister_chrdev(wwan_major, "wwan_port"); + class_destroy(wwan_class); +} + +module_init(wwan_init); +module_exit(wwan_exit); + +MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); +MODULE_DESCRIPTION("WWAN core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index a5439c130130..d24b7a7993aa 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -824,11 +824,15 @@ static void connect(struct backend_info *be) xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, - hotplug_status_changed, - "%s/%s", dev->nodename, "hotplug-status"); - if (!err) + if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, + NULL, hotplug_status_changed, + "%s/%s", dev->nodename, + "hotplug-status"); + if (err) + goto err; be->have_hotplug_status_watch = 1; + } netif_tx_wake_all_queues(be->vif->dev); diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c index 8db323adebf0..09df6ea65840 100644 --- a/drivers/nfc/st-nci/spi.c +++ b/drivers/nfc/st-nci/spi.c @@ -95,17 +95,14 @@ static int st_nci_spi_write(void *phy_id, struct sk_buff *skb) */ if (!r) { skb_rx = alloc_skb(skb->len, GFP_KERNEL); - if (!skb_rx) { - r = -ENOMEM; - goto exit; - } + if (!skb_rx) + return -ENOMEM; skb_put(skb_rx, skb->len); memcpy(skb_rx->data, buf, skb->len); ndlc_recv(phy->ndlc, skb_rx); } -exit: return r; } diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 48f0985ca8a0..3a777d0073b7 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -631,16 +631,14 @@ void nvdimm_check_and_set_ro(struct gendisk *disk) struct nd_region *nd_region = to_nd_region(dev->parent); int disk_ro = get_disk_ro(disk); - /* - * Upgrade to read-only if the region is read-only preserve as - * read-only if the disk is already read-only. - */ - if (disk_ro || nd_region->ro == disk_ro) + /* catch the disk up with the region ro state */ + if (disk_ro == nd_region->ro) return; - dev_info(dev, "%s read-only, marking %s read-only\n", - dev_name(&nd_region->dev), disk->disk_name); - set_disk_ro(disk, 1); + dev_info(dev, "%s read-%s, marking %s read-%s\n", + dev_name(&nd_region->dev), nd_region->ro ? "only" : "write", + disk->disk_name, nd_region->ro ? "only" : "write"); + set_disk_ro(disk, nd_region->ro); } EXPORT_SYMBOL(nvdimm_check_and_set_ro); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index b8a85bfb2e95..7daac795db39 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -26,6 +26,7 @@ #include <linux/mm.h> #include <asm/cacheflush.h> #include "pmem.h" +#include "btt.h" #include "pfn.h" #include "nd.h" @@ -585,7 +586,7 @@ static void nd_pmem_shutdown(struct device *dev) nvdimm_flush(to_nd_region(dev->parent), NULL); } -static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) +static void pmem_revalidate_poison(struct device *dev) { struct nd_region *nd_region; resource_size_t offset = 0, end_trunc = 0; @@ -595,9 +596,6 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) struct range range; struct kernfs_node *bb_state; - if (event != NVDIMM_REVALIDATE_POISON) - return; - if (is_nd_btt(dev)) { struct nd_btt *nd_btt = to_nd_btt(dev); @@ -635,6 +633,37 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) sysfs_notify_dirent(bb_state); } +static void pmem_revalidate_region(struct device *dev) +{ + struct pmem_device *pmem; + + if (is_nd_btt(dev)) { + struct nd_btt *nd_btt = to_nd_btt(dev); + struct btt *btt = nd_btt->btt; + + nvdimm_check_and_set_ro(btt->btt_disk); + return; + } + + pmem = dev_get_drvdata(dev); + nvdimm_check_and_set_ro(pmem->disk); +} + +static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) +{ + switch (event) { + case NVDIMM_REVALIDATE_POISON: + pmem_revalidate_poison(dev); + break; + case NVDIMM_REVALIDATE_REGION: + pmem_revalidate_region(dev); + break; + default: + dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event); + break; + } +} + MODULE_ALIAS("pmem"); MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index ef23119db574..9ccf3d608799 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -518,6 +518,12 @@ static ssize_t read_only_show(struct device *dev, return sprintf(buf, "%d\n", nd_region->ro); } +static int revalidate_read_only(struct device *dev, void *data) +{ + nd_device_notify(dev, NVDIMM_REVALIDATE_REGION); + return 0; +} + static ssize_t read_only_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { @@ -529,6 +535,7 @@ static ssize_t read_only_store(struct device *dev, return rc; nd_region->ro = ro; + device_for_each_child(dev, NULL, revalidate_read_only); return len; } static DEVICE_ATTR_RW(read_only); @@ -1239,6 +1246,11 @@ int nvdimm_has_flush(struct nd_region *nd_region) || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) return -ENXIO; + /* Test if an explicit flush function is defined */ + if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush) + return 1; + + /* Test if any flush hints for the region are available */ for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; @@ -1249,8 +1261,8 @@ int nvdimm_has_flush(struct nd_region *nd_region) } /* - * The platform defines dimm devices without hints, assume - * platform persistence mechanism like ADR + * The platform defines dimm devices without hints nor explicit flush, + * assume platform persistence mechanism like ADR */ return 0; } diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c index bc0a27de69d4..dbac3a172a11 100644 --- a/drivers/of/of_net.c +++ b/drivers/of/of_net.c @@ -11,6 +11,7 @@ #include <linux/phy.h> #include <linux/export.h> #include <linux/device.h> +#include <linux/nvmem-consumer.h> /** * of_get_phy_mode - Get phy mode for given device_node @@ -45,42 +46,59 @@ int of_get_phy_mode(struct device_node *np, phy_interface_t *interface) } EXPORT_SYMBOL_GPL(of_get_phy_mode); -static const void *of_get_mac_addr(struct device_node *np, const char *name) +static int of_get_mac_addr(struct device_node *np, const char *name, u8 *addr) { struct property *pp = of_find_property(np, name, NULL); - if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value)) - return pp->value; - return NULL; + if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value)) { + memcpy(addr, pp->value, ETH_ALEN); + return 0; + } + return -ENODEV; } -static const void *of_get_mac_addr_nvmem(struct device_node *np) +static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr) { - int ret; - const void *mac; - u8 nvmem_mac[ETH_ALEN]; struct platform_device *pdev = of_find_device_by_node(np); + struct nvmem_cell *cell; + const void *mac; + size_t len; + int ret; - if (!pdev) - return ERR_PTR(-ENODEV); - - ret = nvmem_get_mac_address(&pdev->dev, &nvmem_mac); - if (ret) { + /* Try lookup by device first, there might be a nvmem_cell_lookup + * associated with a given device. + */ + if (pdev) { + ret = nvmem_get_mac_address(&pdev->dev, addr); put_device(&pdev->dev); - return ERR_PTR(ret); + return ret; } - mac = devm_kmemdup(&pdev->dev, nvmem_mac, ETH_ALEN, GFP_KERNEL); - put_device(&pdev->dev); - if (!mac) - return ERR_PTR(-ENOMEM); + cell = of_nvmem_cell_get(np, "mac-address"); + if (IS_ERR(cell)) + return PTR_ERR(cell); - return mac; + mac = nvmem_cell_read(cell, &len); + nvmem_cell_put(cell); + + if (IS_ERR(mac)) + return PTR_ERR(mac); + + if (len != ETH_ALEN || !is_valid_ether_addr(mac)) { + kfree(mac); + return -EINVAL; + } + + memcpy(addr, mac, ETH_ALEN); + kfree(mac); + + return 0; } /** * of_get_mac_address() * @np: Caller's Device Node + * @addr: Pointer to a six-byte array for the result * * Search the device tree for the best MAC address to use. 'mac-address' is * checked first, because that is supposed to contain to "most recent" MAC @@ -101,24 +119,27 @@ static const void *of_get_mac_addr_nvmem(struct device_node *np) * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists * but is all zeros. * - * Return: Will be a valid pointer on success and ERR_PTR in case of error. + * Return: 0 on success and errno in case of error. */ -const void *of_get_mac_address(struct device_node *np) +int of_get_mac_address(struct device_node *np, u8 *addr) { - const void *addr; + int ret; + + if (!np) + return -ENODEV; - addr = of_get_mac_addr(np, "mac-address"); - if (addr) - return addr; + ret = of_get_mac_addr(np, "mac-address", addr); + if (!ret) + return 0; - addr = of_get_mac_addr(np, "local-mac-address"); - if (addr) - return addr; + ret = of_get_mac_addr(np, "local-mac-address", addr); + if (!ret) + return 0; - addr = of_get_mac_addr(np, "address"); - if (addr) - return addr; + ret = of_get_mac_addr(np, "address", addr); + if (!ret) + return 0; - return of_get_mac_addr_nvmem(np); + return of_get_mac_addr_nvmem(np, addr); } EXPORT_SYMBOL(of_get_mac_address); diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index ddecf25b5dd4..d7894f178bd4 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -309,11 +309,20 @@ static bool sanity_check(struct ce_array *ca) return ret; } +/** + * cec_add_elem - Add an element to the CEC array. + * @pfn: page frame number to insert + * + * Return values: + * - <0: on error + * - 0: on success + * - >0: when the inserted pfn was offlined + */ static int cec_add_elem(u64 pfn) { struct ce_array *ca = &ce_arr; + int count, err, ret = 0; unsigned int to = 0; - int count, ret = 0; /* * We can be called very early on the identify_cpu() path where we are @@ -330,8 +339,8 @@ static int cec_add_elem(u64 pfn) if (ca->n == MAX_ELEMS) WARN_ON(!del_lru_elem_unlocked(ca)); - ret = find_elem(ca, pfn, &to); - if (ret < 0) { + err = find_elem(ca, pfn, &to); + if (err < 0) { /* * Shift range [to-end] to make room for one more element. */ diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index d126bb877250..ba6a3aa8d954 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -20,6 +20,11 @@ #ifndef HPSA_CMD_H #define HPSA_CMD_H +#include <linux/compiler.h> + +#include <linux/build_bug.h> /* static_assert */ +#include <linux/stddef.h> /* offsetof */ + /* general boundary defintions */ #define SENSEINFOBYTES 32 /* may vary between hbas */ #define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */ @@ -200,12 +205,10 @@ union u64bit { MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */ /* SCSI-3 Commands */ -#pragma pack(1) - #define HPSA_INQUIRY 0x12 struct InquiryData { u8 data_byte[36]; -}; +} __packed; #define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */ #define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ @@ -221,7 +224,7 @@ struct raid_map_disk_data { u8 xor_mult[2]; /**< XOR multipliers for this position, * valid for data disks only */ u8 reserved[2]; -}; +} __packed; struct raid_map_data { __le32 structure_size; /* Size of entire structure in bytes */ @@ -247,14 +250,14 @@ struct raid_map_data { __le16 dekindex; /* Data encryption key index. */ u8 reserved[16]; struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; -}; +} __packed; struct ReportLUNdata { u8 LUNListLength[4]; u8 extended_response_flag; u8 reserved[3]; u8 LUN[HPSA_MAX_LUN][8]; -}; +} __packed; struct ext_report_lun_entry { u8 lunid[8]; @@ -269,20 +272,20 @@ struct ext_report_lun_entry { u8 lun_count; /* multi-lun device, how many luns */ u8 redundant_paths; u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */ -}; +} __packed; struct ReportExtendedLUNdata { u8 LUNListLength[4]; u8 extended_response_flag; u8 reserved[3]; struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN]; -}; +} __packed; struct SenseSubsystem_info { u8 reserved[36]; u8 portname[8]; u8 reserved1[1108]; -}; +} __packed; /* BMIC commands */ #define BMIC_READ 0x26 @@ -317,7 +320,7 @@ union SCSI3Addr { u8 Targ:6; u8 Mode:2; /* b10 */ } LogUnit; -}; +} __packed; struct PhysDevAddr { u32 TargetId:24; @@ -325,20 +328,20 @@ struct PhysDevAddr { u32 Mode:2; /* 2 level target device addr */ union SCSI3Addr Target[2]; -}; +} __packed; struct LogDevAddr { u32 VolId:30; u32 Mode:2; u8 reserved[4]; -}; +} __packed; union LUNAddr { u8 LunAddrBytes[8]; union SCSI3Addr SCSI3Lun[4]; struct PhysDevAddr PhysDev; struct LogDevAddr LogDev; -}; +} __packed; struct CommandListHeader { u8 ReplyQueue; @@ -346,7 +349,7 @@ struct CommandListHeader { __le16 SGTotal; __le64 tag; union LUNAddr LUN; -}; +} __packed; struct RequestBlock { u8 CDBLen; @@ -365,18 +368,18 @@ struct RequestBlock { #define GET_DIR(tad) (((tad) >> 6) & 0x03) u16 Timeout; u8 CDB[16]; -}; +} __packed; struct ErrDescriptor { __le64 Addr; __le32 Len; -}; +} __packed; struct SGDescriptor { __le64 Addr; __le32 Len; __le32 Ext; -}; +} __packed; union MoreErrInfo { struct { @@ -390,7 +393,8 @@ union MoreErrInfo { u8 offense_num; /* byte # of offense 0-base */ u32 offense_value; } Invalid_Cmd; -}; +} __packed; + struct ErrorInfo { u8 ScsiStatus; u8 SenseLen; @@ -398,7 +402,7 @@ struct ErrorInfo { u32 ResidualCnt; union MoreErrInfo MoreErrInfo; u8 SenseInfo[SENSEINFOBYTES]; -}; +} __packed; /* Command types */ #define CMD_IOCTL_PEND 0x01 #define CMD_SCSI 0x03 @@ -453,6 +457,15 @@ struct CommandList { atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */ } __aligned(COMMANDLIST_ALIGNMENT); +/* + * Make sure our embedded atomic variable is aligned. Otherwise we break atomic + * operations on architectures that don't support unaligned atomics like IA64. + * + * The assert guards against reintroductin against unwanted __packed to + * the struct CommandList. + */ +static_assert(offsetof(struct CommandList, refcount) % __alignof__(atomic_t) == 0); + /* Max S/G elements in I/O accelerator command */ #define IOACCEL1_MAXSGENTRIES 24 #define IOACCEL2_MAXSGENTRIES 28 @@ -489,7 +502,7 @@ struct io_accel1_cmd { __le64 host_addr; /* 0x70 - 0x77 */ u8 CISS_LUN[8]; /* 0x78 - 0x7F */ struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; -} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); +} __packed __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); #define IOACCEL1_FUNCTION_SCSIIO 0x00 #define IOACCEL1_SGLOFFSET 32 @@ -519,7 +532,7 @@ struct ioaccel2_sg_element { u8 chain_indicator; #define IOACCEL2_CHAIN 0x80 #define IOACCEL2_LAST_SG 0x40 -}; +} __packed; /* * SCSI Response Format structure for IO Accelerator Mode 2 @@ -559,7 +572,7 @@ struct io_accel2_scsi_response { u8 sense_data_len; /* sense/response data length */ u8 resid_cnt[4]; /* residual count */ u8 sense_data_buff[32]; /* sense/response data buffer */ -}; +} __packed; /* * Structure for I/O accelerator (mode 2 or m2) commands. @@ -592,7 +605,7 @@ struct io_accel2_cmd { __le32 tweak_upper; /* Encryption tweak, upper 4 bytes */ struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; struct io_accel2_scsi_response error_data; -} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); +} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); /* * defines for Mode 2 command struct @@ -618,7 +631,7 @@ struct hpsa_tmf_struct { __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */ __le64 error_ptr; /* Error Pointer */ __le32 error_len; /* Error Length */ -} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); +} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); /* Configuration Table Structure */ struct HostWrite { @@ -626,7 +639,7 @@ struct HostWrite { __le32 command_pool_addr_hi; __le32 CoalIntDelay; __le32 CoalIntCount; -}; +} __packed; #define SIMPLE_MODE 0x02 #define PERFORMANT_MODE 0x04 @@ -675,7 +688,7 @@ struct CfgTable { #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30) #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31) __le32 clear_event_notify; -}; +} __packed; #define NUM_BLOCKFETCH_ENTRIES 8 struct TransTable_struct { @@ -686,14 +699,14 @@ struct TransTable_struct { __le32 RepQCtrAddrHigh32; #define MAX_REPLY_QUEUES 64 struct vals32 RepQAddr[MAX_REPLY_QUEUES]; -}; +} __packed; struct hpsa_pci_info { unsigned char bus; unsigned char dev_fn; unsigned short domain; u32 board_id; -}; +} __packed; struct bmic_identify_controller { u8 configured_logical_drive_count; /* offset 0 */ @@ -702,7 +715,7 @@ struct bmic_identify_controller { u8 pad2[136]; u8 controller_mode; /* offset 292 */ u8 pad3[32]; -}; +} __packed; struct bmic_identify_physical_device { @@ -845,7 +858,7 @@ struct bmic_identify_physical_device { u8 max_link_rate[256]; u8 neg_phys_link_rate[256]; u8 box_conn_name[8]; -} __attribute((aligned(512))); +} __packed __attribute((aligned(512))); struct bmic_sense_subsystem_info { u8 primary_slot_number; @@ -858,7 +871,7 @@ struct bmic_sense_subsystem_info { u8 secondary_array_serial_number[32]; u8 secondary_cache_serial_number[32]; u8 pad[332]; -}; +} __packed; struct bmic_sense_storage_box_params { u8 reserved[36]; @@ -870,7 +883,6 @@ struct bmic_sense_storage_box_params { u8 reserver_3[84]; u8 phys_connector[2]; u8 reserved_4[296]; -}; +} __packed; -#pragma pack() #endif /* HPSA_CMD_H */ diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 49bf2f70a470..31e5455d280c 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -223,7 +223,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) PM8001_EVENT_LOG_SIZE; pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; - for (i = 0; i < PM8001_MAX_INB_NUM; i++) { + for (i = 0; i < pm8001_ha->max_q_num; i++) { pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); pm8001_ha->inbnd_q_tbl[i].upper_base_addr = @@ -249,7 +249,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; } - for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { + for (i = 0; i < pm8001_ha->max_q_num; i++) { pm8001_ha->outbnd_q_tbl[i].element_size_cnt = PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); pm8001_ha->outbnd_q_tbl[i].upper_base_addr = @@ -671,9 +671,9 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) read_outbnd_queue_table(pm8001_ha); /* update main config table ,inbound table and outbound table */ update_main_config_table(pm8001_ha); - for (i = 0; i < PM8001_MAX_INB_NUM; i++) + for (i = 0; i < pm8001_ha->max_q_num; i++) update_inbnd_queue_table(pm8001_ha, i); - for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) + for (i = 0; i < pm8001_ha->max_q_num; i++) update_outbnd_queue_table(pm8001_ha, i); /* 8081 controller donot require these operations */ if (deviceid != 0x8081 && deviceid != 0x0042) { diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 1e939a2a387f..98a34ed10f1a 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport) res = mutex_lock_interruptible(&rport->mutex); if (res) goto out; - if (rport->state != SRP_RPORT_FAIL_FAST) + if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST) /* * sdev state must be SDEV_TRANSPORT_OFFLINE, transition * to SDEV_BLOCK is illegal. Calling scsi_target_unblock() diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index c86760788c72..d3d05e997c13 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -6386,37 +6386,34 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, DECLARE_COMPLETION_ONSTACK(wait); struct request *req; unsigned long flags; - int free_slot, task_tag, err; + int task_tag, err; /* - * Get free slot, sleep if slots are unavailable. - * Even though we use wait_event() which sleeps indefinitely, - * the maximum wait time is bounded by %TM_CMD_TIMEOUT. + * blk_get_request() is used here only to get a free tag. */ req = blk_get_request(q, REQ_OP_DRV_OUT, 0); if (IS_ERR(req)) return PTR_ERR(req); req->end_io_data = &wait; - free_slot = req->tag; - WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs); ufshcd_hold(hba, false); spin_lock_irqsave(host->host_lock, flags); - task_tag = hba->nutrs + free_slot; + blk_mq_start_request(req); + task_tag = req->tag; treq->req_header.dword_0 |= cpu_to_be32(task_tag); - memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq)); - ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function); + memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); + ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); /* send command to the controller */ - __set_bit(free_slot, &hba->outstanding_tasks); + __set_bit(task_tag, &hba->outstanding_tasks); /* Make sure descriptors are ready before ringing the task doorbell */ wmb(); - ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); @@ -6436,24 +6433,24 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); - if (ufshcd_clear_tm_cmd(hba, free_slot)) - dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", - __func__, free_slot); + if (ufshcd_clear_tm_cmd(hba, task_tag)) + dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", + __func__, task_tag); err = -ETIMEDOUT; } else { err = 0; - memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); + memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); } spin_lock_irqsave(hba->host->host_lock, flags); - __clear_bit(free_slot, &hba->outstanding_tasks); + __clear_bit(task_tag, &hba->outstanding_tasks); spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_release(hba); blk_put_request(req); - ufshcd_release(hba); return err; } diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 5dea6e96ec90..da7c2cd8ebb8 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -407,14 +407,10 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) int cvm_oct_common_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); - const u8 *mac = NULL; + int ret; - if (priv->of_node) - mac = of_get_mac_address(priv->of_node); - - if (!IS_ERR_OR_NULL(mac)) - ether_addr_copy(dev->dev_addr, mac); - else + ret = of_get_mac_address(priv->of_node, dev->dev_addr); + if (ret) eth_hw_addr_random(dev); /* diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c index e7bc1988124a..4b9fdf99981b 100644 --- a/drivers/staging/wfx/main.c +++ b/drivers/staging/wfx/main.c @@ -334,7 +334,6 @@ int wfx_probe(struct wfx_dev *wdev) { int i; int err; - const void *macaddr; struct gpio_desc *gpio_saved; // During first part of boot, gpio_wakeup cannot yet been used. So @@ -423,9 +422,9 @@ int wfx_probe(struct wfx_dev *wdev) for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) { eth_zero_addr(wdev->addresses[i].addr); - macaddr = of_get_mac_address(wdev->dev->of_node); - if (!IS_ERR_OR_NULL(macaddr)) { - ether_addr_copy(wdev->addresses[i].addr, macaddr); + err = of_get_mac_address(wdev->dev->of_node, + wdev->addresses[i].addr); + if (!err) { wdev->addresses[i].addr[ETH_ALEN - 1] += i; } else { ether_addr_copy(wdev->addresses[i].addr, diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d0e7ed8f28cc..e5c443bfbdf9 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1166,6 +1166,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, target_get_sess_cmd(&cmd->se_cmd, true); + cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb); if (cmd->sense_reason) { if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { @@ -1180,8 +1181,6 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (cmd->sense_reason) goto attach_cmd; - /* only used for printks or comparing with ->ref_task_tag */ - cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd); if (cmd->sense_reason) goto attach_cmd; diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index 620bcf586ee2..c44fad2b9fbb 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -347,7 +347,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) ret = tb_retimer_nvm_add(rt); if (ret) { dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret); - device_del(&rt->dev); + device_unregister(&rt->dev); return ret; } @@ -406,7 +406,7 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index) */ int tb_retimer_scan(struct tb_port *port) { - u32 status[TB_MAX_RETIMER_INDEX] = {}; + u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; int ret, i, last_idx = 0; if (!port->cap_usb4) diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c index f2ebbacd932e..d7d4bdd57f46 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.c +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -1128,6 +1128,10 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep, return -ESHUTDOWN; } + /* Requests has been dequeued during disabling endpoint. */ + if (!(pep->ep_state & EP_ENABLED)) + return 0; + spin_lock_irqsave(&pdev->lock, flags); ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request)); spin_unlock_irqrestore(&pdev->lock, flags); diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 8f1de1fbbeed..d8d3892e5a69 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -63,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a dev_info(dev, "stub up\n"); + mutex_lock(&sdev->ud.sysfs_lock); spin_lock_irq(&sdev->ud.lock); if (sdev->ud.status != SDEV_ST_AVAILABLE) { @@ -87,13 +88,13 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); if (IS_ERR(tcp_rx)) { sockfd_put(socket); - return -EINVAL; + goto unlock_mutex; } tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); if (IS_ERR(tcp_tx)) { kthread_stop(tcp_rx); sockfd_put(socket); - return -EINVAL; + goto unlock_mutex; } /* get task structs now */ @@ -112,6 +113,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a wake_up_process(sdev->ud.tcp_rx); wake_up_process(sdev->ud.tcp_tx); + mutex_unlock(&sdev->ud.sysfs_lock); + } else { dev_info(dev, "stub down\n"); @@ -122,6 +125,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a spin_unlock_irq(&sdev->ud.lock); usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN); + mutex_unlock(&sdev->ud.sysfs_lock); } return count; @@ -130,6 +134,8 @@ sock_err: sockfd_put(socket); err: spin_unlock_irq(&sdev->ud.lock); +unlock_mutex: + mutex_unlock(&sdev->ud.sysfs_lock); return -EINVAL; } static DEVICE_ATTR_WO(usbip_sockfd); @@ -270,6 +276,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev) sdev->ud.side = USBIP_STUB; sdev->ud.status = SDEV_ST_AVAILABLE; spin_lock_init(&sdev->ud.lock); + mutex_init(&sdev->ud.sysfs_lock); sdev->ud.tcp_socket = NULL; sdev->ud.sockfd = -1; diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index d60ce17d3dd2..ea2a20e6d27d 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -263,6 +263,9 @@ struct usbip_device { /* lock for status */ spinlock_t lock; + /* mutex for synchronizing sysfs store paths */ + struct mutex sysfs_lock; + int sockfd; struct socket *tcp_socket; diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c index 5d88917c9631..086ca76dd053 100644 --- a/drivers/usb/usbip/usbip_event.c +++ b/drivers/usb/usbip/usbip_event.c @@ -70,6 +70,7 @@ static void event_handler(struct work_struct *work) while ((ud = get_event()) != NULL) { usbip_dbg_eh("pending event %lx\n", ud->event); + mutex_lock(&ud->sysfs_lock); /* * NOTE: shutdown must come first. * Shutdown the device. @@ -90,6 +91,7 @@ static void event_handler(struct work_struct *work) ud->eh_ops.unusable(ud); unset_event(ud, USBIP_EH_UNUSABLE); } + mutex_unlock(&ud->sysfs_lock); wake_up(&ud->eh_waitq); } diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index a20a8380ca0c..4ba6bcdaa8e9 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -1101,6 +1101,7 @@ static void vhci_device_init(struct vhci_device *vdev) vdev->ud.side = USBIP_VHCI; vdev->ud.status = VDEV_ST_NULL; spin_lock_init(&vdev->ud.lock); + mutex_init(&vdev->ud.sysfs_lock); INIT_LIST_HEAD(&vdev->priv_rx); INIT_LIST_HEAD(&vdev->priv_tx); diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index c4b4256e5dad..e2847cd3e6e3 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -185,6 +185,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) usbip_dbg_vhci_sysfs("enter\n"); + mutex_lock(&vdev->ud.sysfs_lock); + /* lock */ spin_lock_irqsave(&vhci->lock, flags); spin_lock(&vdev->ud.lock); @@ -195,6 +197,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) /* unlock */ spin_unlock(&vdev->ud.lock); spin_unlock_irqrestore(&vhci->lock, flags); + mutex_unlock(&vdev->ud.sysfs_lock); return -EINVAL; } @@ -205,6 +208,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN); + mutex_unlock(&vdev->ud.sysfs_lock); + return 0; } @@ -349,30 +354,36 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, else vdev = &vhci->vhci_hcd_hs->vdev[rhport]; + mutex_lock(&vdev->ud.sysfs_lock); + /* Extract socket from fd. */ socket = sockfd_lookup(sockfd, &err); if (!socket) { dev_err(dev, "failed to lookup sock"); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; } if (socket->type != SOCK_STREAM) { dev_err(dev, "Expecting SOCK_STREAM - found %d", socket->type); sockfd_put(socket); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; } /* create threads before locking */ tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); if (IS_ERR(tcp_rx)) { sockfd_put(socket); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; } tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); if (IS_ERR(tcp_tx)) { kthread_stop(tcp_rx); sockfd_put(socket); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; } /* get task structs now */ @@ -397,7 +408,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, * Will be retried from userspace * if there's another free port. */ - return -EBUSY; + err = -EBUSY; + goto unlock_mutex; } dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n", @@ -423,7 +435,15 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, rh_port_connect(vdev, speed); + dev_info(dev, "Device attached\n"); + + mutex_unlock(&vdev->ud.sysfs_lock); + return count; + +unlock_mutex: + mutex_unlock(&vdev->ud.sysfs_lock); + return err; } static DEVICE_ATTR_WO(attach); diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c index c8eeabdd9b56..2bc428f2e261 100644 --- a/drivers/usb/usbip/vudc_dev.c +++ b/drivers/usb/usbip/vudc_dev.c @@ -572,6 +572,7 @@ static int init_vudc_hw(struct vudc *udc) init_waitqueue_head(&udc->tx_waitq); spin_lock_init(&ud->lock); + mutex_init(&ud->sysfs_lock); ud->status = SDEV_ST_AVAILABLE; ud->side = USBIP_VUDC; diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index 7383a543c6d1..f7633ee655a1 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c @@ -112,6 +112,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, dev_err(dev, "no device"); return -ENODEV; } + mutex_lock(&udc->ud.sysfs_lock); spin_lock_irqsave(&udc->lock, flags); /* Don't export what we don't have */ if (!udc->driver || !udc->pullup) { @@ -187,6 +188,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, wake_up_process(udc->ud.tcp_rx); wake_up_process(udc->ud.tcp_tx); + + mutex_unlock(&udc->ud.sysfs_lock); return count; } else { @@ -207,6 +210,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, } spin_unlock_irqrestore(&udc->lock, flags); + mutex_unlock(&udc->ud.sysfs_lock); return count; @@ -216,6 +220,7 @@ unlock_ud: spin_unlock_irq(&udc->ud.lock); unlock: spin_unlock_irqrestore(&udc->lock, flags); + mutex_unlock(&udc->ud.sysfs_lock); return ret; } diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 65e7e6b44578..5023e23db3bc 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -1656,6 +1656,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) + return -EINVAL; if (vma->vm_end < vma->vm_start) return -EINVAL; if ((vma->vm_flags & VM_SHARED) == 0) @@ -1664,7 +1666,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) int regnum = index - VFIO_PCI_NUM_REGIONS; struct vfio_pci_region *region = vdev->region + regnum; - if (region && region->ops && region->ops->mmap && + if (region->ops && region->ops->mmap && (region->flags & VFIO_REGION_INFO_FLAG_MMAP)) return region->ops->mmap(vdev, region, vma); return -EINVAL; diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c index e5dcb26d85f0..1635f421ef2c 100644 --- a/drivers/watchdog/armada_37xx_wdt.c +++ b/drivers/watchdog/armada_37xx_wdt.c @@ -2,7 +2,7 @@ /* * Watchdog driver for Marvell Armada 37xx SoCs * - * Author: Marek Behun <marek.behun@nic.cz> + * Author: Marek BehĂșn <kabel@kernel.org> */ #include <linux/clk.h> @@ -366,7 +366,7 @@ static struct platform_driver armada_37xx_wdt_driver = { module_platform_driver(armada_37xx_wdt_driver); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); MODULE_DESCRIPTION("Armada 37xx CPU Watchdog"); MODULE_LICENSE("GPL v2"); |