diff options
Diffstat (limited to 'drivers')
521 files changed, 4511 insertions, 2704 deletions
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c index ece8c1a921cc..88c8af455ea3 100644 --- a/drivers/acpi/acpi_configfs.c +++ b/drivers/acpi/acpi_configfs.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/configfs.h> #include <linux/acpi.h> +#include <linux/security.h> #include "acpica/accommon.h" #include "acpica/actables.h" @@ -28,7 +29,10 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg, { const struct acpi_table_header *header = data; struct acpi_table *table; - int ret; + int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); + + if (ret) + return ret; table = container_of(cfg, struct acpi_table, cfg); diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c index 5fab7e350db8..92b996a564d0 100644 --- a/drivers/acpi/dptf/dptf_power.c +++ b/drivers/acpi/dptf/dptf_power.c @@ -228,6 +228,7 @@ static const struct acpi_device_id int3407_device_ids[] = { {"INT3407", 0}, {"INT3532", 0}, {"INTC1047", 0}, + {"INTC1050", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, int3407_device_ids); diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 873e039ad4b7..62873388b24f 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -25,8 +25,8 @@ static int acpi_fan_remove(struct platform_device *pdev); static const struct acpi_device_id fan_device_ids[] = { {"PNP0C0B", 0}, - {"INT1044", 0}, {"INT3404", 0}, + {"INTC1044", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, fan_device_ids); diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 3a89909b50a6..76c668c05fa0 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -938,13 +938,13 @@ static void __exit interrupt_stats_exit(void) } static ssize_t -acpi_show_profile(struct device *dev, struct device_attribute *attr, +acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); } -static const struct device_attribute pm_profile_attr = +static const struct kobj_attribute pm_profile_attr = __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); static ssize_t hotplug_enabled_show(struct kobject *kobj, diff --git a/drivers/android/binder.c b/drivers/android/binder.c index e47c8a4c83db..f50c5f182bb5 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -4686,8 +4686,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc) static void binder_free_proc(struct binder_proc *proc) { + struct binder_device *device; + BUG_ON(!list_empty(&proc->todo)); BUG_ON(!list_empty(&proc->delivered_death)); + device = container_of(proc->context, struct binder_device, context); + if (refcount_dec_and_test(&device->ref)) { + kfree(proc->context->name); + kfree(device); + } binder_alloc_deferred_release(&proc->alloc); put_task_struct(proc->tsk); binder_stats_deleted(BINDER_STAT_PROC); @@ -5406,7 +5413,6 @@ static int binder_node_release(struct binder_node *node, int refs) static void binder_deferred_release(struct binder_proc *proc) { struct binder_context *context = proc->context; - struct binder_device *device; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; @@ -5423,12 +5429,6 @@ static void binder_deferred_release(struct binder_proc *proc) context->binder_context_mgr_node = NULL; } mutex_unlock(&context->context_mgr_node_lock); - device = container_of(proc->context, struct binder_device, context); - if (refcount_dec_and_test(&device->ref)) { - kfree(context->name); - kfree(device); - } - proc->context = NULL; binder_inner_proc_lock(proc); /* * Make sure proc stays alive after we diff --git a/drivers/base/base.h b/drivers/base/base.h index 95c22c0f9036..40fb069a8a7e 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -153,7 +153,6 @@ extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); extern void device_block_probing(void); extern void device_unblock_probing(void); -extern void driver_deferred_probe_force_trigger(void); /* /sys/devices directory */ extern struct kset *devices_kset; diff --git a/drivers/base/core.c b/drivers/base/core.c index 67d39a90b45c..05d414e9e8a4 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -50,6 +50,7 @@ static DEFINE_MUTEX(wfs_lock); static LIST_HEAD(deferred_sync); static unsigned int defer_sync_state_count = 1; static unsigned int defer_fw_devlink_count; +static LIST_HEAD(deferred_fw_devlink); static DEFINE_MUTEX(defer_fw_devlink_lock); static bool fw_devlink_is_permissive(void); @@ -754,11 +755,11 @@ static void __device_links_queue_sync_state(struct device *dev, */ dev->state_synced = true; - if (WARN_ON(!list_empty(&dev->links.defer_sync))) + if (WARN_ON(!list_empty(&dev->links.defer_hook))) return; get_device(dev); - list_add_tail(&dev->links.defer_sync, list); + list_add_tail(&dev->links.defer_hook, list); } /** @@ -776,8 +777,8 @@ static void device_links_flush_sync_list(struct list_head *list, { struct device *dev, *tmp; - list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { - list_del_init(&dev->links.defer_sync); + list_for_each_entry_safe(dev, tmp, list, links.defer_hook) { + list_del_init(&dev->links.defer_hook); if (dev != dont_lock_dev) device_lock(dev); @@ -815,12 +816,12 @@ void device_links_supplier_sync_state_resume(void) if (defer_sync_state_count) goto out; - list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) { + list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) { /* * Delete from deferred_sync list before queuing it to - * sync_list because defer_sync is used for both lists. + * sync_list because defer_hook is used for both lists. */ - list_del_init(&dev->links.defer_sync); + list_del_init(&dev->links.defer_hook); __device_links_queue_sync_state(dev, &sync_list); } out: @@ -838,8 +839,8 @@ late_initcall(sync_state_resume_initcall); static void __device_links_supplier_defer_sync(struct device *sup) { - if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) - list_add_tail(&sup->links.defer_sync, &deferred_sync); + if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup)) + list_add_tail(&sup->links.defer_hook, &deferred_sync); } static void device_link_drop_managed(struct device_link *link) @@ -1052,7 +1053,7 @@ void device_links_driver_cleanup(struct device *dev) WRITE_ONCE(link->status, DL_STATE_DORMANT); } - list_del_init(&dev->links.defer_sync); + list_del_init(&dev->links.defer_hook); __device_links_no_driver(dev); device_links_write_unlock(); @@ -1244,6 +1245,12 @@ static void fw_devlink_link_device(struct device *dev) fw_ret = -EAGAIN; } else { fw_ret = -ENODEV; + /* + * defer_hook is not used to add device to deferred_sync list + * until device is bound. Since deferred fw devlink also blocks + * probing, same list hook can be used for deferred_fw_devlink. + */ + list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink); } if (fw_ret == -ENODEV) @@ -1312,6 +1319,9 @@ void fw_devlink_pause(void) */ void fw_devlink_resume(void) { + struct device *dev, *tmp; + LIST_HEAD(probe_list); + mutex_lock(&defer_fw_devlink_lock); if (!defer_fw_devlink_count) { WARN(true, "Unmatched fw_devlink pause/resume!"); @@ -1323,9 +1333,19 @@ void fw_devlink_resume(void) goto out; device_link_add_missing_supplier_links(); - driver_deferred_probe_force_trigger(); + list_splice_tail_init(&deferred_fw_devlink, &probe_list); out: mutex_unlock(&defer_fw_devlink_lock); + + /* + * bus_probe_device() can cause new devices to get added and they'll + * try to grab defer_fw_devlink_lock. So, this needs to be done outside + * the defer_fw_devlink_lock. + */ + list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) { + list_del_init(&dev->links.defer_hook); + bus_probe_device(dev); + } } /* Device links support end. */ @@ -2172,7 +2192,7 @@ void device_initialize(struct device *dev) INIT_LIST_HEAD(&dev->links.consumers); INIT_LIST_HEAD(&dev->links.suppliers); INIT_LIST_HEAD(&dev->links.needs_suppliers); - INIT_LIST_HEAD(&dev->links.defer_sync); + INIT_LIST_HEAD(&dev->links.defer_hook); dev->links.status = DL_DEV_NO_DRIVER; } EXPORT_SYMBOL_GPL(device_initialize); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 9a1d940342ac..48ca81cb8ebc 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -164,11 +164,6 @@ static void driver_deferred_probe_trigger(void) if (!driver_deferred_probe_enable) return; - driver_deferred_probe_force_trigger(); -} - -void driver_deferred_probe_force_trigger(void) -{ /* * A successful probe means that all the devices in the pending list * should be triggered to be reprobed. Move all the deferred devices diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 977d27bd1a22..a97f33d0c59f 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -265,14 +265,14 @@ static struct notifier_block pm_trace_nb = { .notifier_call = pm_trace_notify, }; -static int early_resume_init(void) +static int __init early_resume_init(void) { hash_value_early_read = read_magic_time(); register_pm_notifier(&pm_trace_nb); return 0; } -static int late_resume_init(void) +static int __init late_resume_init(void) { unsigned int val = hash_value_early_read; unsigned int user, file, dev; diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 0fd6f97ee523..1d1d26b0d279 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -4,7 +4,7 @@ # subsystems should select the appropriate symbols. config REGMAP - default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SCCB || REGMAP_I3C) + default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C) select IRQ_DOMAIN if REGMAP_IRQ bool diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 089e5dc7144a..f58baff2be0a 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -463,29 +463,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_only); - ssize_t result; - bool was_enabled, require_sync = false; + bool new_val, require_sync = false; int err; - map->lock(map->lock_arg); + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; - was_enabled = map->cache_only; + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) { - map->unlock(map->lock_arg); - return result; - } + map->lock(map->lock_arg); - if (map->cache_only && !was_enabled) { + if (new_val && !map->cache_only) { dev_warn(map->dev, "debugfs cache_only=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_only && was_enabled) { + } else if (!new_val && map->cache_only) { dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); require_sync = true; } + map->cache_only = new_val; map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); if (require_sync) { err = regcache_sync(map); @@ -493,7 +495,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file, dev_err(map->dev, "Failed to sync cache %d\n", err); } - return result; + return count; } static const struct file_operations regmap_cache_only_fops = { @@ -508,28 +510,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_bypass); - ssize_t result; - bool was_enabled; + bool new_val; + int err; - map->lock(map->lock_arg); + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; - was_enabled = map->cache_bypass; + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) - goto out; + map->lock(map->lock_arg); - if (map->cache_bypass && !was_enabled) { + if (new_val && !map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_bypass && was_enabled) { + } else if (!new_val && map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); } + map->cache_bypass = new_val; -out: map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); - return result; + return count; } static const struct file_operations regmap_cache_bypass_fops = { diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index c472f624382d..795a62a04022 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -17,6 +17,7 @@ #include <linux/delay.h> #include <linux/log2.h> #include <linux/hwspinlock.h> +#include <asm/unaligned.h> #define CREATE_TRACE_POINTS #include "trace.h" @@ -249,22 +250,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) { - __be16 *b = buf; - - b[0] = cpu_to_be16(val << shift); + put_unaligned_be16(val << shift, buf); } static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) { - __le16 *b = buf; - - b[0] = cpu_to_le16(val << shift); + put_unaligned_le16(val << shift, buf); } static void regmap_format_16_native(void *buf, unsigned int val, unsigned int shift) { - *(u16 *)buf = val << shift; + u16 v = val << shift; + + memcpy(buf, &v, sizeof(v)); } static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) @@ -280,43 +279,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) { - __be32 *b = buf; - - b[0] = cpu_to_be32(val << shift); + put_unaligned_be32(val << shift, buf); } static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) { - __le32 *b = buf; - - b[0] = cpu_to_le32(val << shift); + put_unaligned_le32(val << shift, buf); } static void regmap_format_32_native(void *buf, unsigned int val, unsigned int shift) { - *(u32 *)buf = val << shift; + u32 v = val << shift; + + memcpy(buf, &v, sizeof(v)); } #ifdef CONFIG_64BIT static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) { - __be64 *b = buf; - - b[0] = cpu_to_be64((u64)val << shift); + put_unaligned_be64((u64) val << shift, buf); } static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) { - __le64 *b = buf; - - b[0] = cpu_to_le64((u64)val << shift); + put_unaligned_le64((u64) val << shift, buf); } static void regmap_format_64_native(void *buf, unsigned int val, unsigned int shift) { - *(u64 *)buf = (u64)val << shift; + u64 v = (u64) val << shift; + + memcpy(buf, &v, sizeof(v)); } #endif @@ -333,35 +328,34 @@ static unsigned int regmap_parse_8(const void *buf) static unsigned int regmap_parse_16_be(const void *buf) { - const __be16 *b = buf; - - return be16_to_cpu(b[0]); + return get_unaligned_be16(buf); } static unsigned int regmap_parse_16_le(const void *buf) { - const __le16 *b = buf; - - return le16_to_cpu(b[0]); + return get_unaligned_le16(buf); } static void regmap_parse_16_be_inplace(void *buf) { - __be16 *b = buf; + u16 v = get_unaligned_be16(buf); - b[0] = be16_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_16_le_inplace(void *buf) { - __le16 *b = buf; + u16 v = get_unaligned_le16(buf); - b[0] = le16_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_16_native(const void *buf) { - return *(u16 *)buf; + u16 v; + + memcpy(&v, buf, sizeof(v)); + return v; } static unsigned int regmap_parse_24(const void *buf) @@ -376,69 +370,67 @@ static unsigned int regmap_parse_24(const void *buf) static unsigned int regmap_parse_32_be(const void *buf) { - const __be32 *b = buf; - - return be32_to_cpu(b[0]); + return get_unaligned_be32(buf); } static unsigned int regmap_parse_32_le(const void *buf) { - const __le32 *b = buf; - - return le32_to_cpu(b[0]); + return get_unaligned_le32(buf); } static void regmap_parse_32_be_inplace(void *buf) { - __be32 *b = buf; + u32 v = get_unaligned_be32(buf); - b[0] = be32_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_32_le_inplace(void *buf) { - __le32 *b = buf; + u32 v = get_unaligned_le32(buf); - b[0] = le32_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_32_native(const void *buf) { - return *(u32 *)buf; + u32 v; + + memcpy(&v, buf, sizeof(v)); + return v; } #ifdef CONFIG_64BIT static unsigned int regmap_parse_64_be(const void *buf) { - const __be64 *b = buf; - - return be64_to_cpu(b[0]); + return get_unaligned_be64(buf); } static unsigned int regmap_parse_64_le(const void *buf) { - const __le64 *b = buf; - - return le64_to_cpu(b[0]); + return get_unaligned_le64(buf); } static void regmap_parse_64_be_inplace(void *buf) { - __be64 *b = buf; + u64 v = get_unaligned_be64(buf); - b[0] = be64_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_64_le_inplace(void *buf) { - __le64 *b = buf; + u64 v = get_unaligned_le64(buf); - b[0] = le64_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_64_native(const void *buf) { - return *(u64 *)buf; + u64 v; + + memcpy(&v, buf, sizeof(v)); + return v; } #endif @@ -1357,6 +1349,7 @@ void regmap_exit(struct regmap *map) if (map->hwlock) hwspin_lock_free(map->hwlock); kfree_const(map->name); + kfree(map->patch); kfree(map); } EXPORT_SYMBOL_GPL(regmap_exit); @@ -1371,7 +1364,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data) /* If the user didn't specify a name match any */ if (data) - return (*r)->name == data; + return !strcmp((*r)->name, data); else return 1; } @@ -2944,8 +2937,9 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base); * @reg: Register to read from * @bits: Bits to test * - * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the - * tested bits is not set and 1 if all tested bits are set. + * Returns 0 if at least one of the tested bits is not set, 1 if all tested + * bits are set and a negative error number if the underlying regmap_read() + * fails. */ int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) { diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 43cff01a5a67..ce7e9f223b20 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1033,25 +1033,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, test_bit(NBD_RT_BOUND, &config->runtime_flags))) { dev_err(disk_to_dev(nbd->disk), "Device being setup by another task"); - sockfd_put(sock); - return -EBUSY; + err = -EBUSY; + goto put_socket; + } + + nsock = kzalloc(sizeof(*nsock), GFP_KERNEL); + if (!nsock) { + err = -ENOMEM; + goto put_socket; } socks = krealloc(config->socks, (config->num_connections + 1) * sizeof(struct nbd_sock *), GFP_KERNEL); if (!socks) { - sockfd_put(sock); - return -ENOMEM; + kfree(nsock); + err = -ENOMEM; + goto put_socket; } config->socks = socks; - nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); - if (!nsock) { - sockfd_put(sock); - return -ENOMEM; - } - nsock->fallback_index = -1; nsock->dead = false; mutex_init(&nsock->tx_lock); @@ -1063,6 +1064,10 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, atomic_inc(&config->live_connections); return 0; + +put_socket: + sockfd_put(sock); + return err; } static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 9d21bf0f155e..980df853ee49 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -878,6 +878,7 @@ out_put_disk: put_disk(vblk->disk); out_free_vq: vdev->config->del_vqs(vdev); + kfree(vblk->vqs); out_free_vblk: kfree(vblk); out_free_index: diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 6e2ad90b17a3..270dd810be54 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -2021,7 +2021,8 @@ static ssize_t hot_add_show(struct class *class, return ret; return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } -static CLASS_ATTR_RO(hot_add); +static struct class_attribute class_attr_hot_add = + __ATTR(hot_add, 0400, hot_add_show, NULL); static ssize_t hot_remove_store(struct class *class, struct class_attribute *attr, diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 3affd180baac..4f513fa3362f 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -221,6 +221,34 @@ static u32 sysc_read_sysstatus(struct sysc *ddata) return sysc_read(ddata, offset); } +/* Poll on reset status */ +static int sysc_wait_softreset(struct sysc *ddata) +{ + u32 sysc_mask, syss_done, rstval; + int syss_offset, error = 0; + + syss_offset = ddata->offsets[SYSC_SYSSTATUS]; + sysc_mask = BIT(ddata->cap->regbits->srst_shift); + + if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED) + syss_done = 0; + else + syss_done = ddata->cfg.syss_mask; + + if (syss_offset >= 0) { + error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata, + rstval, (rstval & ddata->cfg.syss_mask) == + syss_done, 100, MAX_MODULE_SOFTRESET_WAIT); + + } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { + error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata, + rstval, !(rstval & sysc_mask), + 100, MAX_MODULE_SOFTRESET_WAIT); + } + + return error; +} + static int sysc_add_named_clock_from_child(struct sysc *ddata, const char *name, const char *optfck_name) @@ -925,18 +953,47 @@ static int sysc_enable_module(struct device *dev) struct sysc *ddata; const struct sysc_regbits *regbits; u32 reg, idlemodes, best_mode; + int error; ddata = dev_get_drvdata(dev); + + /* + * Some modules like DSS reset automatically on idle. Enable optional + * reset clocks and wait for OCP softreset to complete. + */ + if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) { + error = sysc_enable_opt_clocks(ddata); + if (error) { + dev_err(ddata->dev, + "Optional clocks failed for enable: %i\n", + error); + return error; + } + } + error = sysc_wait_softreset(ddata); + if (error) + dev_warn(ddata->dev, "OCP softreset timed out\n"); + if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) + sysc_disable_opt_clocks(ddata); + + /* + * Some subsystem private interconnects, like DSS top level module, + * need only the automatic OCP softreset handling with no sysconfig + * register bits to configure. + */ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV) return 0; regbits = ddata->cap->regbits; reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); - /* Set CLOCKACTIVITY, we only use it for ick */ + /* + * Set CLOCKACTIVITY, we only use it for ick. And we only configure it + * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware + * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag. + */ if (regbits->clkact_shift >= 0 && - (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT || - ddata->cfg.sysc_val & BIT(regbits->clkact_shift))) + (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT)) reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift; /* Set SIDLE mode */ @@ -991,6 +1048,9 @@ set_autoidle: sysc_write_sysconfig(ddata, reg); } + /* Flush posted write */ + sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); + if (ddata->module_enable_quirk) ddata->module_enable_quirk(ddata); @@ -1071,6 +1131,9 @@ set_sidle: reg |= 1 << regbits->autoidle_shift; sysc_write_sysconfig(ddata, reg); + /* Flush posted write */ + sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); + return 0; } @@ -1215,7 +1278,8 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) + if (ddata->cfg.quirks & + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) return 0; return pm_runtime_force_suspend(dev); @@ -1227,7 +1291,8 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) + if (ddata->cfg.quirks & + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) return 0; return pm_runtime_force_resume(dev); @@ -1488,7 +1553,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false; const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1); int manager_count; - bool framedonetv_irq; + bool framedonetv_irq = true; u32 val, irq_mask = 0; switch (sysc_soc->soc) { @@ -1505,6 +1570,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, break; case SOC_AM4: manager_count = 1; + framedonetv_irq = false; break; case SOC_UNKNOWN: default: @@ -1663,8 +1729,8 @@ static void sysc_quirk_rtc(struct sysc *ddata, bool lock) local_irq_save(flags); /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */ - error = readl_poll_timeout(ddata->module_va + 0x44, val, - !(val & BIT(0)), 100, 50); + error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val, + !(val & BIT(0)), 100, 50); if (error) dev_warn(ddata->dev, "rtc busy timeout\n"); /* Now we have ~15 microseconds to read/write various registers */ @@ -1822,11 +1888,10 @@ static int sysc_legacy_init(struct sysc *ddata) */ static int sysc_reset(struct sysc *ddata) { - int sysc_offset, syss_offset, sysc_val, rstval, error = 0; - u32 sysc_mask, syss_done; + int sysc_offset, sysc_val, error; + u32 sysc_mask; sysc_offset = ddata->offsets[SYSC_SYSCONFIG]; - syss_offset = ddata->offsets[SYSC_SYSSTATUS]; if (ddata->legacy_mode || ddata->cap->regbits->srst_shift < 0 || @@ -1835,11 +1900,6 @@ static int sysc_reset(struct sysc *ddata) sysc_mask = BIT(ddata->cap->regbits->srst_shift); - if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED) - syss_done = 0; - else - syss_done = ddata->cfg.syss_mask; - if (ddata->pre_reset_quirk) ddata->pre_reset_quirk(ddata); @@ -1856,18 +1916,9 @@ static int sysc_reset(struct sysc *ddata) if (ddata->post_reset_quirk) ddata->post_reset_quirk(ddata); - /* Poll on reset status */ - if (syss_offset >= 0) { - error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval, - (rstval & ddata->cfg.syss_mask) == - syss_done, - 100, MAX_MODULE_SOFTRESET_WAIT); - - } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { - error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval, - !(rstval & sysc_mask), - 100, MAX_MODULE_SOFTRESET_WAIT); - } + error = sysc_wait_softreset(ddata); + if (error) + dev_warn(ddata->dev, "OCP softreset timed out\n"); if (ddata->reset_done_quirk) ddata->reset_done_quirk(ddata); diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c index 35333b65acd1..7c617edff4ca 100644 --- a/drivers/char/tpm/st33zp24/i2c.c +++ b/drivers/char/tpm/st33zp24/i2c.c @@ -210,7 +210,7 @@ static int st33zp24_i2c_request_resources(struct i2c_client *client) /* * st33zp24_i2c_probe initialize the TPM device - * @param: client, the i2c_client drescription (TPM I2C description). + * @param: client, the i2c_client description (TPM I2C description). * @param: id, the i2c_device_id struct. * @return: 0 in case of success. * -1 in other case. diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c index 26e09de50f1e..a75dafd39445 100644 --- a/drivers/char/tpm/st33zp24/spi.c +++ b/drivers/char/tpm/st33zp24/spi.c @@ -329,7 +329,7 @@ static int st33zp24_spi_request_resources(struct spi_device *dev) /* * st33zp24_spi_probe initialize the TPM device - * @param: dev, the spi_device drescription (TPM SPI description). + * @param: dev, the spi_device description (TPM SPI description). * @return: 0 in case of success. * or a negative value describing the error. */ @@ -378,7 +378,7 @@ static int st33zp24_spi_probe(struct spi_device *dev) /* * st33zp24_spi_remove remove the TPM device - * @param: client, the spi_device drescription (TPM SPI description). + * @param: client, the spi_device description (TPM SPI description). * @return: 0 in case of success. */ static int st33zp24_spi_remove(struct spi_device *dev) diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index 37bb13f516be..4ec10ab5e576 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -502,7 +502,7 @@ static const struct tpm_class_ops st33zp24_tpm = { /* * st33zp24_probe initialize the TPM device - * @param: client, the i2c_client drescription (TPM I2C description). + * @param: client, the i2c_client description (TPM I2C description). * @param: id, the i2c_device_id struct. * @return: 0 in case of success. * -1 in other case. diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 87f449340202..1784530b8387 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -189,15 +189,6 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, goto out; } - /* atomic tpm command send and result receive. We only hold the ops - * lock during this period so that the tpm can be unregistered even if - * the char dev is held open. - */ - if (tpm_try_get_ops(priv->chip)) { - ret = -EPIPE; - goto out; - } - priv->response_length = 0; priv->response_read = false; *off = 0; @@ -211,11 +202,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, if (file->f_flags & O_NONBLOCK) { priv->command_enqueued = true; queue_work(tpm_dev_wq, &priv->async_work); - tpm_put_ops(priv->chip); mutex_unlock(&priv->buffer_mutex); return size; } + /* atomic tpm command send and result receive. We only hold the ops + * lock during this period so that the tpm can be unregistered even if + * the char dev is held open. + */ + if (tpm_try_get_ops(priv->chip)) { + ret = -EPIPE; + goto out; + } + ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 09fe45246b8c..994385bf37c0 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -683,13 +683,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, if (rc) goto init_irq_cleanup; - if (!strcmp(id->compat, "IBM,vtpm20")) { - chip->flags |= TPM_CHIP_FLAG_TPM2; - rc = tpm2_get_cc_attrs_tbl(chip); - if (rc) - goto init_irq_cleanup; - } - if (!wait_event_timeout(ibmvtpm->crq_queue.wq, ibmvtpm->rtce_buf != NULL, HZ)) { @@ -697,6 +690,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, goto init_irq_cleanup; } + if (!strcmp(id->compat, "IBM,vtpm20")) { + chip->flags |= TPM_CHIP_FLAG_TPM2; + rc = tpm2_get_cc_attrs_tbl(chip); + if (rc) + goto init_irq_cleanup; + } + return tpm_chip_register(chip); init_irq_cleanup: do { diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index e7df342a317d..0b214963539d 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -235,6 +235,13 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, return tpm_tis_init(&pnp_dev->dev, &tpm_info); } +/* + * There is a known bug caused by 93e1b7d42e1e ("[PATCH] tpm: add HID module + * parameter"). This commit added IFX0102 device ID, which is also used by + * tpm_infineon but ignored to add quirks to probe which driver ought to be + * used. + */ + static struct pnp_device_id tpm_pnp_tbl[] = { {"PNP0C31", 0}, /* TPM */ {"ATM1200", 0}, /* Atmel */ diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 2435216bd10a..65ab1b027949 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -1085,7 +1085,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, return 0; out_err: - if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL)) + if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, false); tpm_tis_remove(chip); diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c index d96755935529..3856f6ebcb34 100644 --- a/drivers/char/tpm/tpm_tis_spi_main.c +++ b/drivers/char/tpm/tpm_tis_spi_main.c @@ -53,8 +53,6 @@ static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy, if ((phy->iobuf[3] & 0x01) == 0) { // handle SPI wait states - phy->iobuf[0] = 0; - for (i = 0; i < TPM_RETRY; i++) { spi_xfer->len = 1; spi_message_init(&m); @@ -104,6 +102,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, if (ret < 0) goto exit; + /* Flow control transfers are receive only */ + spi_xfer.tx_buf = NULL; ret = phy->flow_control(phy, &spi_xfer); if (ret < 0) goto exit; @@ -113,9 +113,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, spi_xfer.delay.value = 5; spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS; - if (in) { - spi_xfer.tx_buf = NULL; - } else if (out) { + if (out) { + spi_xfer.tx_buf = phy->iobuf; spi_xfer.rx_buf = NULL; memcpy(phy->iobuf, out, transfer_len); out += transfer_len; @@ -288,6 +287,7 @@ static struct spi_driver tpm_tis_spi_driver = { .pm = &tpm_tis_pm, .of_match_table = of_match_ptr(of_tis_spi_match), .acpi_match_table = ACPI_PTR(acpi_tis_spi_match), + .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .probe = tpm_tis_spi_driver_probe, .remove = tpm_tis_spi_remove, diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 00c5e3acee46..ca691bce9791 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2116,6 +2116,7 @@ static struct virtio_device_id id_table[] = { { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, { 0 }, }; +MODULE_DEVICE_TABLE(virtio, id_table); static unsigned int features[] = { VIRTIO_CONSOLE_F_SIZE, @@ -2128,6 +2129,7 @@ static struct virtio_device_id rproc_serial_id_table[] = { #endif { 0 }, }; +MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table); static unsigned int rproc_serial_features[] = { }; @@ -2280,6 +2282,5 @@ static void __exit fini(void) module_init(init); module_exit(fini); -MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio console driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 69934c0c3dd8..326f91b2dda9 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -50,6 +50,7 @@ source "drivers/clk/versatile/Kconfig" config CLK_HSDK bool "PLL Driver for HSDK platform" depends on OF || COMPILE_TEST + depends on IOMEM help This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs control. diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c index 99afc949925f..177368cac6dd 100644 --- a/drivers/clk/clk-ast2600.c +++ b/drivers/clk/clk-ast2600.c @@ -131,6 +131,18 @@ static const struct clk_div_table ast2600_eclk_div_table[] = { { 0 } }; +static const struct clk_div_table ast2600_emmc_extclk_div_table[] = { + { 0x0, 2 }, + { 0x1, 4 }, + { 0x2, 6 }, + { 0x3, 8 }, + { 0x4, 10 }, + { 0x5, 12 }, + { 0x6, 14 }, + { 0x7, 16 }, + { 0 } +}; + static const struct clk_div_table ast2600_mac_div_table[] = { { 0x0, 4 }, { 0x1, 4 }, @@ -390,6 +402,11 @@ static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev, return hw; } +static const char *const emmc_extclk_parent_names[] = { + "emmc_extclk_hpll_in", + "mpll", +}; + static const char * const vclk_parent_names[] = { "dpll", "d1pll", @@ -459,16 +476,32 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev) return PTR_ERR(hw); aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw; - /* EMMC ext clock divider */ - hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "hpll", 0, - scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0, - &aspeed_g6_clk_lock); + /* EMMC ext clock */ + hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll", + 0, 1, 2); if (IS_ERR(hw)) return PTR_ERR(hw); - hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0, - scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0, - ast2600_div_table, - &aspeed_g6_clk_lock); + + hw = clk_hw_register_mux(dev, "emmc_extclk_mux", + emmc_extclk_parent_names, + ARRAY_SIZE(emmc_extclk_parent_names), 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1, + 0, &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux", + 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1, + 15, 0, &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hw = clk_hw_register_divider_table(dev, "emmc_extclk", + "emmc_extclk_gate", 0, + scu_g6_base + + ASPEED_G6_CLK_SELECTION1, 12, + 3, 0, ast2600_emmc_extclk_div_table, + &aspeed_g6_clk_lock); if (IS_ERR(hw)) return PTR_ERR(hw); aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw; diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig index ded07b0bd0d5..557d6213783c 100644 --- a/drivers/clk/mvebu/Kconfig +++ b/drivers/clk/mvebu/Kconfig @@ -42,6 +42,7 @@ config ARMADA_AP806_SYSCON config ARMADA_AP_CPU_CLK bool + select ARMADA_AP_CP_HELPER config ARMADA_CP110_SYSCON bool diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c index 6282ee2f361c..a8901f90a61a 100644 --- a/drivers/clk/sifive/fu540-prci.c +++ b/drivers/clk/sifive/fu540-prci.c @@ -586,7 +586,10 @@ static int sifive_fu540_prci_probe(struct platform_device *pdev) struct __prci_data *pd; int r; - pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); + pd = devm_kzalloc(dev, + struct_size(pd, hw_clks.hws, + ARRAY_SIZE(__prci_init_clocks)), + GFP_KERNEL); if (!pd) return -ENOMEM; diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index ecf7b7db2d05..6c3e84180146 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -480,6 +480,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .set_next_event_virt = erratum_set_next_event_tval_virt, }, #endif +#ifdef CONFIG_ARM64_ERRATUM_1418040 + { + .match_type = ate_match_local_cap_id, + .id = (void *)ARM64_WORKAROUND_1418040, + .desc = "ARM erratum 1418040", + .disable_compat_vdso = true, + }, +#endif }; typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, @@ -566,6 +574,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa if (wa->read_cntvct_el0) { clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE; vdso_default = VDSO_CLOCKMODE_NONE; + } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) { + vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT; + clocksource_counter.vdso_clock_mode = vdso_default; } } diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c index aa13708c2bc3..d22cfae1b019 100644 --- a/drivers/counter/104-quad-8.c +++ b/drivers/counter/104-quad-8.c @@ -1274,18 +1274,26 @@ static ssize_t quad8_signal_cable_fault_read(struct counter_device *counter, struct counter_signal *signal, void *private, char *buf) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; const size_t channel_id = signal->id / 2; - const bool disabled = !(priv->cable_fault_enable & BIT(channel_id)); + bool disabled; unsigned int status; unsigned int fault; - if (disabled) + mutex_lock(&priv->lock); + + disabled = !(priv->cable_fault_enable & BIT(channel_id)); + + if (disabled) { + mutex_unlock(&priv->lock); return -EINVAL; + } /* Logic 0 = cable fault */ status = inb(priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS); + mutex_unlock(&priv->lock); + /* Mask respective channel and invert logic */ fault = !(status & BIT(channel_id)); @@ -1317,6 +1325,8 @@ static ssize_t quad8_signal_cable_fault_enable_write( if (ret) return ret; + mutex_lock(&priv->lock); + if (enable) priv->cable_fault_enable |= BIT(channel_id); else @@ -1327,6 +1337,8 @@ static ssize_t quad8_signal_cable_fault_enable_write( outb(cable_fault_enable, priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS); + mutex_unlock(&priv->lock); + return len; } @@ -1353,6 +1365,8 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter, if (ret) return ret; + mutex_lock(&priv->lock); + priv->fck_prescaler[channel_id] = prescaler; /* Reset Byte Pointer */ @@ -1363,6 +1377,8 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter, outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC, base_offset + 1); + mutex_unlock(&priv->lock); + return len; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 8e23a698ce04..7e0f7880b21a 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2464,7 +2464,7 @@ static struct cpufreq_driver intel_cpufreq = { .name = "intel_cpufreq", }; -static struct cpufreq_driver *default_driver = &intel_pstate; +static struct cpufreq_driver *default_driver; static void intel_pstate_driver_cleanup(void) { @@ -2677,6 +2677,8 @@ static struct acpi_platform_list plat_info[] __initdata = { { } /* End */ }; +#define BITMASK_OOB (BIT(8) | BIT(18)) + static bool __init intel_pstate_platform_pwr_mgmt_exists(void) { const struct x86_cpu_id *id; @@ -2686,8 +2688,9 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) id = x86_match_cpu(intel_pstate_cpu_oob_ids); if (id) { rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); - if (misc_pwr & (1 << 8)) { - pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n"); + if (misc_pwr & BITMASK_OOB) { + pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); + pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); return true; } } @@ -2755,6 +2758,7 @@ static int __init intel_pstate_init(void) hwp_active++; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; + default_driver = &intel_pstate; goto hwp_cpu_matched; } } else { @@ -2772,7 +2776,8 @@ static int __init intel_pstate_init(void) return -ENODEV; } /* Without HWP start in the passive mode. */ - default_driver = &intel_cpufreq; + if (!default_driver) + default_driver = &intel_cpufreq; hwp_cpu_matched: /* @@ -2817,6 +2822,8 @@ static int __init intel_pstate_setup(char *str) if (!strcmp(str, "disable")) { no_load = 1; + } else if (!strcmp(str, "active")) { + default_driver = &intel_pstate; } else if (!strcmp(str, "passive")) { default_driver = &intel_cpufreq; no_hwp = 1; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index c149d9e20dfd..87197319ab06 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -186,9 +186,10 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) * be frozen safely. */ index = find_deepest_state(drv, dev, U64_MAX, 0, true); - if (index > 0) + if (index > 0) { enter_s2idle_proper(drv, dev, index); - + local_irq_enable(); + } return index; } #endif /* CONFIG_SUSPEND */ diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index e018ef80451e..1699a8e309ef 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -45,46 +45,20 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) size_t ret = 0; dmabuf = dentry->d_fsdata; - dma_resv_lock(dmabuf->resv, NULL); + spin_lock(&dmabuf->name_lock); if (dmabuf->name) ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); - dma_resv_unlock(dmabuf->resv); + spin_unlock(&dmabuf->name_lock); return dynamic_dname(dentry, buffer, buflen, "/%s:%s", dentry->d_name.name, ret > 0 ? name : ""); } -static const struct dentry_operations dma_buf_dentry_ops = { - .d_dname = dmabuffs_dname, -}; - -static struct vfsmount *dma_buf_mnt; - -static int dma_buf_fs_init_context(struct fs_context *fc) -{ - struct pseudo_fs_context *ctx; - - ctx = init_pseudo(fc, DMA_BUF_MAGIC); - if (!ctx) - return -ENOMEM; - ctx->dops = &dma_buf_dentry_ops; - return 0; -} - -static struct file_system_type dma_buf_fs_type = { - .name = "dmabuf", - .init_fs_context = dma_buf_fs_init_context, - .kill_sb = kill_anon_super, -}; - -static int dma_buf_release(struct inode *inode, struct file *file) +static void dma_buf_release(struct dentry *dentry) { struct dma_buf *dmabuf; - if (!is_dma_buf_file(file)) - return -EINVAL; - - dmabuf = file->private_data; + dmabuf = dentry->d_fsdata; BUG_ON(dmabuf->vmapping_counter); @@ -110,9 +84,32 @@ static int dma_buf_release(struct inode *inode, struct file *file) module_put(dmabuf->owner); kfree(dmabuf->name); kfree(dmabuf); +} + +static const struct dentry_operations dma_buf_dentry_ops = { + .d_dname = dmabuffs_dname, + .d_release = dma_buf_release, +}; + +static struct vfsmount *dma_buf_mnt; + +static int dma_buf_fs_init_context(struct fs_context *fc) +{ + struct pseudo_fs_context *ctx; + + ctx = init_pseudo(fc, DMA_BUF_MAGIC); + if (!ctx) + return -ENOMEM; + ctx->dops = &dma_buf_dentry_ops; return 0; } +static struct file_system_type dma_buf_fs_type = { + .name = "dmabuf", + .init_fs_context = dma_buf_fs_init_context, + .kill_sb = kill_anon_super, +}; + static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) { struct dma_buf *dmabuf; @@ -341,8 +338,10 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) kfree(name); goto out_unlock; } + spin_lock(&dmabuf->name_lock); kfree(dmabuf->name); dmabuf->name = name; + spin_unlock(&dmabuf->name_lock); out_unlock: dma_resv_unlock(dmabuf->resv); @@ -405,14 +404,13 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) /* Don't count the temporary reference taken inside procfs seq_show */ seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); - dma_resv_lock(dmabuf->resv, NULL); + spin_lock(&dmabuf->name_lock); if (dmabuf->name) seq_printf(m, "name:\t%s\n", dmabuf->name); - dma_resv_unlock(dmabuf->resv); + spin_unlock(&dmabuf->name_lock); } static const struct file_operations dma_buf_fops = { - .release = dma_buf_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, .poll = dma_buf_poll, @@ -546,6 +544,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->size = exp_info->size; dmabuf->exp_name = exp_info->exp_name; dmabuf->owner = exp_info->owner; + spin_lock_init(&dmabuf->name_lock); init_waitqueue_head(&dmabuf->poll); dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index b175229a4b01..604f80357931 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -1176,6 +1176,8 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) } else if (dmatest_run) { if (!is_threaded_test_pending(info)) { pr_info("No channels configured, continue with any\n"); + if (!is_threaded_test_run(info)) + stop_threaded_test(info); add_threaded_test(info); } start_threaded_tests(info); diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 21cb2a58dbd2..a1b56f52db2f 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -118,16 +118,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); - if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) - return; - dw->initialize_chan(dwc); /* Enable interrupts */ channel_set_bit(dw, MASK.XFER, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask); - - set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); } /*----------------------------------------------------------------------*/ @@ -954,8 +949,6 @@ static void dwc_issue_pending(struct dma_chan *chan) void do_dw_dma_off(struct dw_dma *dw) { - unsigned int i; - dma_writel(dw, CFG, 0); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); @@ -966,9 +959,6 @@ void do_dw_dma_off(struct dw_dma *dw) while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) cpu_relax(); - - for (i = 0; i < dw->dma.chancnt; i++) - clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); } void do_dw_dma_on(struct dw_dma *dw) @@ -1032,8 +1022,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) /* Clear custom channel configuration */ memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); - clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); - /* Disable interrupts */ channel_clear_bit(dw, MASK.XFER, dwc->mask); channel_clear_bit(dw, MASK.BLOCK, dwc->mask); diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 5697c3622699..930ae268c497 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -352,26 +352,28 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, /* * TCD parameters are stored in struct fsl_edma_hw_tcd in little * endian format. However, we need to load the TCD registers in - * big- or little-endian obeying the eDMA engine model endian. + * big- or little-endian obeying the eDMA engine model endian, + * and this is performed from specific edma_write functions */ edma_writew(edma, 0, ®s->tcd[ch].csr); - edma_writel(edma, le32_to_cpu(tcd->saddr), ®s->tcd[ch].saddr); - edma_writel(edma, le32_to_cpu(tcd->daddr), ®s->tcd[ch].daddr); - edma_writew(edma, le16_to_cpu(tcd->attr), ®s->tcd[ch].attr); - edma_writew(edma, le16_to_cpu(tcd->soff), ®s->tcd[ch].soff); + edma_writel(edma, (s32)tcd->saddr, ®s->tcd[ch].saddr); + edma_writel(edma, (s32)tcd->daddr, ®s->tcd[ch].daddr); - edma_writel(edma, le32_to_cpu(tcd->nbytes), ®s->tcd[ch].nbytes); - edma_writel(edma, le32_to_cpu(tcd->slast), ®s->tcd[ch].slast); + edma_writew(edma, (s16)tcd->attr, ®s->tcd[ch].attr); + edma_writew(edma, tcd->soff, ®s->tcd[ch].soff); - edma_writew(edma, le16_to_cpu(tcd->citer), ®s->tcd[ch].citer); - edma_writew(edma, le16_to_cpu(tcd->biter), ®s->tcd[ch].biter); - edma_writew(edma, le16_to_cpu(tcd->doff), ®s->tcd[ch].doff); + edma_writel(edma, (s32)tcd->nbytes, ®s->tcd[ch].nbytes); + edma_writel(edma, (s32)tcd->slast, ®s->tcd[ch].slast); - edma_writel(edma, le32_to_cpu(tcd->dlast_sga), + edma_writew(edma, (s16)tcd->citer, ®s->tcd[ch].citer); + edma_writew(edma, (s16)tcd->biter, ®s->tcd[ch].biter); + edma_writew(edma, (s16)tcd->doff, ®s->tcd[ch].doff); + + edma_writel(edma, (s32)tcd->dlast_sga, ®s->tcd[ch].dlast_sga); - edma_writew(edma, le16_to_cpu(tcd->csr), ®s->tcd[ch].csr); + edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr); } static inline @@ -589,6 +591,8 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) { struct virt_dma_desc *vdesc; + lockdep_assert_held(&fsl_chan->vchan.lock); + vdesc = vchan_next_desc(&fsl_chan->vchan); if (!vdesc) return; diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 67e422590c9a..ec1169741de1 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -33,7 +33,7 @@ #define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0) #define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1) #define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1)) -#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0)) +#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0)) #define EDMA_TCD_ATTR_SSIZE_8BIT 0 #define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8) #define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8) diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index eff7ebd8cf35..90bb72af306c 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -45,6 +45,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) fsl_chan = &fsl_edma->chans[ch]; spin_lock(&fsl_chan->vchan.lock); + + if (!fsl_chan->edesc) { + /* terminate_all called before */ + spin_unlock(&fsl_chan->vchan.lock); + continue; + } + if (!fsl_chan->edesc->iscyclic) { list_del(&fsl_chan->edesc->vdesc.node); vchan_cookie_complete(&fsl_chan->edesc->vdesc); diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index ff49847e37a8..cb376cf6a2d2 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -74,6 +74,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) struct idxd_device *idxd; struct idxd_wq *wq; struct device *dev; + int rc = 0; wq = inode_wq(inode); idxd = wq->idxd; @@ -81,17 +82,27 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); - if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) - return -EBUSY; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; + mutex_lock(&wq->wq_lock); + + if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) { + rc = -EBUSY; + goto failed; + } + ctx->wq = wq; filp->private_data = ctx; idxd_wq_get(wq); + mutex_unlock(&wq->wq_lock); return 0; + + failed: + mutex_unlock(&wq->wq_lock); + kfree(ctx); + return rc; } static int idxd_cdev_release(struct inode *node, struct file *filep) @@ -105,7 +116,9 @@ static int idxd_cdev_release(struct inode *node, struct file *filep) filep->private_data = NULL; kfree(ctx); + mutex_lock(&wq->wq_lock); idxd_wq_put(wq); + mutex_unlock(&wq->wq_lock); return 0; } diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 8d79a8787104..8d2718c585dc 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -320,6 +320,31 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq) devm_iounmap(dev, wq->dportal); } +void idxd_wq_disable_cleanup(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + int i, wq_offset; + + lockdep_assert_held(&idxd->dev_lock); + memset(&wq->wqcfg, 0, sizeof(wq->wqcfg)); + wq->type = IDXD_WQT_NONE; + wq->size = 0; + wq->group = NULL; + wq->threshold = 0; + wq->priority = 0; + clear_bit(WQ_FLAG_DEDICATED, &wq->flags); + memset(wq->name, 0, WQ_NAME_SIZE); + + for (i = 0; i < 8; i++) { + wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32); + iowrite32(0, idxd->reg_base + wq_offset); + dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", + wq->id, i, wq_offset, + ioread32(idxd->reg_base + wq_offset)); + } +} + /* Device control bits */ static inline bool idxd_is_enabled(struct idxd_device *idxd) { diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index b8f8a363b4a7..908c8d0ef3ab 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -290,6 +290,7 @@ int idxd_wq_enable(struct idxd_wq *wq); int idxd_wq_disable(struct idxd_wq *wq); int idxd_wq_map_portal(struct idxd_wq *wq); void idxd_wq_unmap_portal(struct idxd_wq *wq); +void idxd_wq_disable_cleanup(struct idxd_wq *wq); /* submission */ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index 6510791b9921..8a35f58da689 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -141,7 +141,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data) iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); if (!err) - return IRQ_HANDLED; + goto out; gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); if (gensts.state == IDXD_DEVICE_STATE_HALT) { @@ -162,6 +162,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data) spin_unlock_bh(&idxd->dev_lock); } + out: idxd_unmask_msix_vector(idxd, irq_entry->id); return IRQ_HANDLED; } diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 052dae5d6ddd..2e2c5082f322 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -315,6 +315,11 @@ static int idxd_config_bus_remove(struct device *dev) idxd_unregister_dma_device(idxd); spin_lock_irqsave(&idxd->dev_lock, flags); rc = idxd_device_disable(idxd); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + idxd_wq_disable_cleanup(wq); + } spin_unlock_irqrestore(&idxd->dev_lock, flags); module_put(THIS_MODULE); if (rc < 0) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 91774039ae5d..270992c4fe47 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1331,8 +1331,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) sdma_channel_synchronize(chan); - if (sdmac->event_id0 >= 0) - sdma_event_disable(sdmac, sdmac->event_id0); + sdma_event_disable(sdmac, sdmac->event_id0); if (sdmac->event_id1) sdma_event_disable(sdmac, sdmac->event_id1); @@ -1632,11 +1631,9 @@ static int sdma_config(struct dma_chan *chan, memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); /* Set ENBLn earlier to make sure dma request triggered after that */ - if (sdmac->event_id0 >= 0) { - if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) - return -EINVAL; - sdma_event_enable(sdmac, sdmac->event_id0); - } + if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) + return -EINVAL; + sdma_event_enable(sdmac, sdmac->event_id0); if (sdmac->event_id1) { if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 8ad0ad861c86..fd782aee02d9 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -26,6 +26,18 @@ #include "../dmaengine.h" +int completion_timeout = 200; +module_param(completion_timeout, int, 0644); +MODULE_PARM_DESC(completion_timeout, + "set ioat completion timeout [msec] (default 200 [msec])"); +int idle_timeout = 2000; +module_param(idle_timeout, int, 0644); +MODULE_PARM_DESC(idle_timeout, + "set ioat idel timeout [msec] (default 2000 [msec])"); + +#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout) +#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout) + static char *chanerr_str[] = { "DMA Transfer Source Address Error", "DMA Transfer Destination Address Error", diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index e6b622e1ba92..f7f31fdf14cf 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -104,8 +104,6 @@ struct ioatdma_chan { #define IOAT_RUN 5 #define IOAT_CHAN_ACTIVE 6 struct timer_list timer; - #define COMPLETION_TIMEOUT msecs_to_jiffies(100) - #define IDLE_TIMEOUT msecs_to_jiffies(2000) #define RESET_DELAY msecs_to_jiffies(100) struct ioatdma_device *ioat_dma; dma_addr_t completion_dma; diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index e15bd15a9ef6..e12b754e6398 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c @@ -35,6 +35,13 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id) mcf_chan = &mcf_edma->chans[ch]; spin_lock(&mcf_chan->vchan.lock); + + if (!mcf_chan->edesc) { + /* terminate_all called before */ + spin_unlock(&mcf_chan->vchan.lock); + continue; + } + if (!mcf_chan->edesc->iscyclic) { list_del(&mcf_chan->edesc->vdesc.node); vchan_cookie_complete(&mcf_chan->edesc->vdesc); diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index b218a013c260..8f7ceb698226 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -586,6 +586,8 @@ static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan) desc->residue = usb_dmac_get_current_residue(chan, desc, desc->sg_index - 1); desc->done_cookie = desc->vd.tx.cookie; + desc->vd.tx_result.result = DMA_TRANS_NOERROR; + desc->vd.tx_result.residue = desc->residue; vchan_cookie_complete(&desc->vd); /* Restart the next transfer if this driver has a next desc */ diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index db58d7e4f9fe..c5fa2ef74abc 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -658,6 +658,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) ret = pm_runtime_get_sync(tdc2dev(tdc)); if (ret < 0) { + pm_runtime_put_noidle(tdc2dev(tdc)); free_irq(tdc->irq, tdc); return ret; } @@ -869,8 +870,10 @@ static int tegra_adma_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); goto rpm_disable; + } ret = tegra_adma_init(tdma); if (ret) diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c index 0b8f3dd6b146..77e8e67d995b 100644 --- a/drivers/dma/ti/k3-udma-private.c +++ b/drivers/dma/ti/k3-udma-private.c @@ -42,6 +42,7 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) ud = platform_get_drvdata(pdev); if (!ud) { pr_debug("UDMA has not been probed\n"); + put_device(&pdev->dev); return ERR_PTR(-EPROBE_DEFER); } diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index c91e2dc1bb72..6c879a734360 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1753,7 +1753,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) dev_err(ud->ddev.dev, "Descriptor pool allocation failed\n"); uc->use_dma_pool = false; - return -ENOMEM; + ret = -ENOMEM; + goto err_cleanup; } } @@ -1773,16 +1774,18 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) ret = udma_get_chan_pair(uc); if (ret) - return ret; + goto err_cleanup; ret = udma_alloc_tx_resources(uc); - if (ret) - return ret; + if (ret) { + udma_put_rchan(uc); + goto err_cleanup; + } ret = udma_alloc_rx_resources(uc); if (ret) { udma_free_tx_resources(uc); - return ret; + goto err_cleanup; } uc->config.src_thread = ud->psil_base + uc->tchan->id; @@ -1800,10 +1803,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) uc->id); ret = udma_alloc_tx_resources(uc); - if (ret) { - uc->config.remote_thread_id = -1; - return ret; - } + if (ret) + goto err_cleanup; uc->config.src_thread = ud->psil_base + uc->tchan->id; uc->config.dst_thread = uc->config.remote_thread_id; @@ -1820,10 +1821,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) uc->id); ret = udma_alloc_rx_resources(uc); - if (ret) { - uc->config.remote_thread_id = -1; - return ret; - } + if (ret) + goto err_cleanup; uc->config.src_thread = uc->config.remote_thread_id; uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | @@ -1838,7 +1837,9 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) /* Can not happen */ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", __func__, uc->id, uc->config.dir); - return -EINVAL; + ret = -EINVAL; + goto err_cleanup; + } /* check if the channel configuration was successful */ @@ -1847,7 +1848,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) if (udma_is_chan_running(uc)) { dev_warn(ud->dev, "chan%d: is running!\n", uc->id); - udma_stop(uc); + udma_reset_chan(uc, false); if (udma_is_chan_running(uc)) { dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); ret = -EBUSY; @@ -1906,8 +1907,6 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) udma_reset_rings(uc); - INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, - udma_check_tx_completion); return 0; err_irq_free: @@ -1919,7 +1918,7 @@ err_psi_free: err_res_free: udma_free_tx_resources(uc); udma_free_rx_resources(uc); - +err_cleanup: udma_reset_uchan(uc); if (uc->use_dma_pool) { @@ -3019,7 +3018,6 @@ static void udma_free_chan_resources(struct dma_chan *chan) } cancel_delayed_work_sync(&uc->tx_drain.work); - destroy_delayed_work_on_stack(&uc->tx_drain.work); if (uc->irq_num_ring > 0) { free_irq(uc->irq_num_ring, uc); @@ -3593,7 +3591,7 @@ static int udma_probe(struct platform_device *pdev) return ret; } - ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype); + ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype); if (!ret && ud->atype > 2) { dev_err(dev, "Invalid atype: %u\n", ud->atype); return -EINVAL; @@ -3711,6 +3709,7 @@ static int udma_probe(struct platform_device *pdev) tasklet_init(&uc->vc.task, udma_vchan_complete, (unsigned long)&uc->vc); init_completion(&uc->teardown_completed); + INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); } ret = dma_async_device_register(&ud->ddev); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index ef90070a9194..6262f6370c5d 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -269,6 +269,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci) if (pvt->model == 0x60) amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); + else + amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); } else { amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); } diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index e6fc022bc87e..3939699e62fe 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -278,3 +278,14 @@ config EFI_EARLYCON depends on SERIAL_EARLYCON && !ARM && !IA64 select FONT_SUPPORT select ARCH_USE_MEMREMAP_PROT + +config EFI_CUSTOM_SSDT_OVERLAYS + bool "Load custom ACPI SSDT overlay from an EFI variable" + depends on EFI_VARS && ACPI + default ACPI_TABLE_UPGRADE + help + Allow loading of an ACPI SSDT overlay from an EFI variable specified + by a kernel command line option. + + See Documentation/admin-guide/acpi/ssdt-overlays.rst for more + information. diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index c697e70ca7e7..71c445d20258 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -52,9 +52,11 @@ static phys_addr_t __init efi_to_phys(unsigned long addr) } static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR; +static __initdata unsigned long cpu_state_table = EFI_INVALID_TABLE_ADDR; static const efi_config_table_type_t arch_tables[] __initconst = { {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table}, + {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table}, {} }; @@ -62,7 +64,8 @@ static void __init init_screen_info(void) { struct screen_info *si; - if (screen_info_table != EFI_INVALID_TABLE_ADDR) { + if (IS_ENABLED(CONFIG_ARM) && + screen_info_table != EFI_INVALID_TABLE_ADDR) { si = early_memremap_ro(screen_info_table, sizeof(*si)); if (!si) { pr_err("Could not map screen_info config table\n"); @@ -116,7 +119,8 @@ static int __init uefi_init(u64 efi_system_table) goto out; } retval = efi_config_parse_tables(config_tables, systab->nr_tables, - arch_tables); + IS_ENABLED(CONFIG_ARM) ? arch_tables + : NULL); early_memunmap(config_tables, table_size); out: @@ -238,9 +242,37 @@ void __init efi_init(void) init_screen_info(); +#ifdef CONFIG_ARM /* ARM does not permit early mappings to persist across paging_init() */ - if (IS_ENABLED(CONFIG_ARM)) - efi_memmap_unmap(); + efi_memmap_unmap(); + + if (cpu_state_table != EFI_INVALID_TABLE_ADDR) { + struct efi_arm_entry_state *state; + bool dump_state = true; + + state = early_memremap_ro(cpu_state_table, + sizeof(struct efi_arm_entry_state)); + if (state == NULL) { + pr_warn("Unable to map CPU entry state table.\n"); + return; + } + + if ((state->sctlr_before_ebs & 1) == 0) + pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n"); + else if ((state->sctlr_after_ebs & 1) == 0) + pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n"); + else + dump_state = false; + + if (dump_state || efi_enabled(EFI_DBG)) { + pr_info("CPSR at EFI stub entry : 0x%08x\n", state->cpsr_before_ebs); + pr_info("SCTLR at EFI stub entry : 0x%08x\n", state->sctlr_before_ebs); + pr_info("CPSR after ExitBootServices() : 0x%08x\n", state->cpsr_after_ebs); + pr_info("SCTLR after ExitBootServices(): 0x%08x\n", state->sctlr_after_ebs); + } + early_memunmap(state, sizeof(struct efi_arm_entry_state)); + } +#endif } static bool efifb_overlaps_pci_range(const struct of_pci_range *range) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 7f1657b6c30d..5114cae4ec97 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -189,7 +189,7 @@ static void generic_ops_unregister(void) efivars_unregister(&generic_efivars); } -#if IS_ENABLED(CONFIG_ACPI) +#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS #define EFIVAR_SSDT_NAME_MAX 16 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; static int __init efivar_ssdt_setup(char *str) @@ -622,7 +622,8 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, rsv = (void *)(p + prsv % PAGE_SIZE); /* reserve the entry itself */ - memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size)); + memblock_reserve(prsv, + struct_size(rsv, entry, rsv->size)); for (i = 0; i < atomic_read(&rsv->count); i++) { memblock_reserve(rsv->entry[i].base, diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index e3d692696583..d5915272141f 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -181,7 +181,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num) rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, "entry%d", entry_num); if (rc) { - kfree(entry); + kobject_put(&entry->kobj); return rc; } } diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 75daaf20374e..4cce372edaf4 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -6,7 +6,8 @@ # enabled, even if doing so doesn't break the build. # cflags-$(CONFIG_X86_32) := -march=i386 -cflags-$(CONFIG_X86_64) := -mcmodel=small +cflags-$(CONFIG_X86_64) := -mcmodel=small \ + $(call cc-option,-maccumulate-outgoing-args) cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \ -fPIC -fno-strict-aliasing -mno-red-zone \ -mno-mmx -mno-sse -fshort-wchar \ diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c index 40243f524556..d08e5d55838c 100644 --- a/drivers/firmware/efi/libstub/arm32-stub.c +++ b/drivers/firmware/efi/libstub/arm32-stub.c @@ -7,10 +7,49 @@ #include "efistub.h" +static efi_guid_t cpu_state_guid = LINUX_EFI_ARM_CPU_STATE_TABLE_GUID; + +struct efi_arm_entry_state *efi_entry_state; + +static void get_cpu_state(u32 *cpsr, u32 *sctlr) +{ + asm("mrs %0, cpsr" : "=r"(*cpsr)); + if ((*cpsr & MODE_MASK) == HYP_MODE) + asm("mrc p15, 4, %0, c1, c0, 0" : "=r"(*sctlr)); + else + asm("mrc p15, 0, %0, c1, c0, 0" : "=r"(*sctlr)); +} + efi_status_t check_platform_features(void) { + efi_status_t status; + u32 cpsr, sctlr; int block; + get_cpu_state(&cpsr, &sctlr); + + efi_info("Entering in %s mode with MMU %sabled\n", + ((cpsr & MODE_MASK) == HYP_MODE) ? "HYP" : "SVC", + (sctlr & 1) ? "en" : "dis"); + + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + sizeof(*efi_entry_state), + (void **)&efi_entry_state); + if (status != EFI_SUCCESS) { + efi_err("allocate_pool() failed\n"); + return status; + } + + efi_entry_state->cpsr_before_ebs = cpsr; + efi_entry_state->sctlr_before_ebs = sctlr; + + status = efi_bs_call(install_configuration_table, &cpu_state_guid, + efi_entry_state); + if (status != EFI_SUCCESS) { + efi_err("install_configuration_table() failed\n"); + goto free_state; + } + /* non-LPAE kernels can run anywhere */ if (!IS_ENABLED(CONFIG_ARM_LPAE)) return EFI_SUCCESS; @@ -19,9 +58,22 @@ efi_status_t check_platform_features(void) block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0); if (block < 5) { efi_err("This LPAE kernel is not supported by your CPU\n"); - return EFI_UNSUPPORTED; + status = EFI_UNSUPPORTED; + goto drop_table; } return EFI_SUCCESS; + +drop_table: + efi_bs_call(install_configuration_table, &cpu_state_guid, NULL); +free_state: + efi_bs_call(free_pool, efi_entry_state); + return status; +} + +void efi_handle_post_ebs_state(void) +{ + get_cpu_state(&efi_entry_state->cpsr_after_ebs, + &efi_entry_state->sctlr_after_ebs); } static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID; diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 7f6a57dec513..e5bfac79e5ac 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -35,13 +35,16 @@ efi_status_t check_platform_features(void) } /* - * Relocatable kernels can fix up the misalignment with respect to - * MIN_KIMG_ALIGN, so they only require a minimum alignment of EFI_KIMG_ALIGN - * (which accounts for the alignment of statically allocated objects such as - * the swapper stack.) + * Although relocatable kernels can fix up the misalignment with respect to + * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of + * sync with those recorded in the vmlinux when kaslr is disabled but the + * image required relocation anyway. Therefore retain 2M alignment unless + * KASLR is in use. */ -static const u64 min_kimg_align = IS_ENABLED(CONFIG_RELOCATABLE) ? EFI_KIMG_ALIGN - : MIN_KIMG_ALIGN; +static u64 min_kimg_align(void) +{ + return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; +} efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, @@ -74,21 +77,21 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, kernel_size = _edata - _text; kernel_memsize = kernel_size + (_end - _edata); - *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align; + *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align(); if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) { /* * If KASLR is enabled, and we have some randomness available, * locate the kernel at a randomized offset in physical memory. */ - status = efi_random_alloc(*reserve_size, min_kimg_align, + status = efi_random_alloc(*reserve_size, min_kimg_align(), reserve_addr, phys_seed); } else { status = EFI_OUT_OF_RESOURCES; } if (status != EFI_SUCCESS) { - if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align)) { + if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align())) { /* * Just execute from wherever we were loaded by the * UEFI PE/COFF loader if the alignment is suitable. @@ -99,7 +102,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, } status = efi_allocate_pages_aligned(*reserve_size, reserve_addr, - ULONG_MAX, min_kimg_align); + ULONG_MAX, min_kimg_align()); if (status != EFI_SUCCESS) { efi_err("Failed to relocate kernel\n"); @@ -108,7 +111,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, } } - *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align; + *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align(); memcpy((void *)*image_addr, _text, kernel_size); return EFI_SUCCESS; diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 89f075275300..6bca70bbb43d 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -19,7 +19,7 @@ #include "efistub.h" bool efi_nochunk; -bool efi_nokaslr; +bool efi_nokaslr = !IS_ENABLED(CONFIG_RANDOMIZE_BASE); bool efi_noinitrd; int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT; bool efi_novamap; @@ -32,6 +32,10 @@ bool __pure __efi_soft_reserve_enabled(void) return !efi_nosoftreserve; } +/** + * efi_char16_puts() - Write a UCS-2 encoded string to the console + * @str: UCS-2 encoded string + */ void efi_char16_puts(efi_char16_t *str) { efi_call_proto(efi_table_attr(efi_system_table, con_out), @@ -83,6 +87,10 @@ u32 utf8_to_utf32(const u8 **s8) return c32; } +/** + * efi_puts() - Write a UTF-8 encoded string to the console + * @str: UTF-8 encoded string + */ void efi_puts(const char *str) { efi_char16_t buf[128]; @@ -113,6 +121,16 @@ void efi_puts(const char *str) } } +/** + * efi_printk() - Print a kernel message + * @fmt: format string + * + * The first letter of the format string is used to determine the logging level + * of the message. If the level is less then the current EFI logging level, the + * message is suppressed. The message will be truncated to 255 bytes. + * + * Return: number of printed characters + */ int efi_printk(const char *fmt, ...) { char printf_buf[256]; @@ -154,13 +172,18 @@ int efi_printk(const char *fmt, ...) return printed; } -/* - * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi= +/** + * efi_parse_options() - Parse EFI command line options + * @cmdline: kernel command line + * + * Parse the ASCII string @cmdline for EFI options, denoted by the efi= * option, e.g. efi=nochunk. * * It should be noted that efi= is parsed in two very different * environments, first in the early boot environment of the EFI boot * stub, and subsequently during the kernel boot. + * + * Return: status code */ efi_status_t efi_parse_options(char const *cmdline) { @@ -286,13 +309,21 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len) return (char *)cmdline_addr; } -/* +/** + * efi_exit_boot_services() - Exit boot services + * @handle: handle of the exiting image + * @map: pointer to receive the memory map + * @priv: argument to be passed to @priv_func + * @priv_func: function to process the memory map before exiting boot services + * * Handle calling ExitBootServices according to the requirements set out by the * spec. Obtains the current memory map, and returns that info after calling * ExitBootServices. The client must specify a function to perform any * processing of the memory map data prior to ExitBootServices. A client * specific structure may be passed to the function via priv. The client * function may be called multiple times. + * + * Return: status code */ efi_status_t efi_exit_boot_services(void *handle, struct efi_boot_memmap *map, @@ -361,6 +392,11 @@ fail: return status; } +/** + * get_efi_config_table() - retrieve UEFI configuration table + * @guid: GUID of the configuration table to be retrieved + * Return: pointer to the configuration table or NULL + */ void *get_efi_config_table(efi_guid_t guid) { unsigned long tables = efi_table_attr(efi_system_table, tables); @@ -408,17 +444,18 @@ static const struct { }; /** - * efi_load_initrd_dev_path - load the initrd from the Linux initrd device path + * efi_load_initrd_dev_path() - load the initrd from the Linux initrd device path * @load_addr: pointer to store the address where the initrd was loaded * @load_size: pointer to store the size of the loaded initrd * @max: upper limit for the initrd memory allocation - * @return: %EFI_SUCCESS if the initrd was loaded successfully, in which - * case @load_addr and @load_size are assigned accordingly - * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd - * device path - * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL - * %EFI_OUT_OF_RESOURCES if memory allocation failed - * %EFI_LOAD_ERROR in all other cases + * + * Return: + * * %EFI_SUCCESS if the initrd was loaded successfully, in which + * case @load_addr and @load_size are assigned accordingly + * * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd device path + * * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL + * * %EFI_OUT_OF_RESOURCES if memory allocation failed + * * %EFI_LOAD_ERROR in all other cases */ static efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr, @@ -481,6 +518,16 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image, load_addr, load_size); } +/** + * efi_load_initrd() - Load initial RAM disk + * @image: EFI loaded image protocol + * @load_addr: pointer to loaded initrd + * @load_size: size of loaded initrd + * @soft_limit: preferred size of allocated memory for loading the initrd + * @hard_limit: minimum size of allocated memory + * + * Return: status code + */ efi_status_t efi_load_initrd(efi_loaded_image_t *image, unsigned long *load_addr, unsigned long *load_size, @@ -505,6 +552,15 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image, return status; } +/** + * efi_wait_for_key() - Wait for key stroke + * @usec: number of microseconds to wait for key stroke + * @key: key entered + * + * Wait for up to @usec microseconds for a key stroke. + * + * Return: status code, EFI_SUCCESS if key received + */ efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key) { efi_event_t events[2], timer; diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index e97370bdfdb0..3318ec3f8e5b 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -329,6 +329,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, if (status != EFI_SUCCESS) goto fail_free_initrd; + if (IS_ENABLED(CONFIG_ARM)) + efi_handle_post_ebs_state(); + efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr)); /* not reached */ diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index bcd8c0a785f0..2c9d42264c29 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -157,8 +157,14 @@ typedef void (__efiapi *efi_event_notify_t)(efi_event_t, void *); #define EFI_EVT_NOTIFY_WAIT 0x00000100U #define EFI_EVT_NOTIFY_SIGNAL 0x00000200U -/* - * boottime->wait_for_event takes an array of events as input. +/** + * efi_set_event_at() - add event to events array + * + * @events: array of UEFI events + * @ids: index where to put the event in the array + * @event: event to add to the aray + * + * boottime->wait_for_event() takes an array of events as input. * Provide a helper to set it up correctly for mixed mode. */ static inline @@ -771,4 +777,6 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image, unsigned long soft_limit, unsigned long hard_limit); +void efi_handle_post_ebs_state(void); + #endif diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c index 2005e33b33d5..630caa6b1f4c 100644 --- a/drivers/firmware/efi/libstub/file.c +++ b/drivers/firmware/efi/libstub/file.c @@ -102,12 +102,20 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len, if (!found) return 0; + /* Skip any leading slashes */ + while (cmdline[i] == L'/' || cmdline[i] == L'\\') + i++; + while (--result_len > 0 && i < cmdline_len) { - if (cmdline[i] == L'\0' || - cmdline[i] == L'\n' || - cmdline[i] == L' ') + efi_char16_t c = cmdline[i++]; + + if (c == L'\0' || c == L'\n' || c == L' ') break; - *result++ = cmdline[i++]; + else if (c == L'/') + /* Replace UNIX dir separators with EFI standard ones */ + *result++ = L'\\'; + else + *result++ = c; } *result = L'\0'; return i; diff --git a/drivers/firmware/efi/libstub/skip_spaces.c b/drivers/firmware/efi/libstub/skip_spaces.c index a700b3c7f7d0..159fb4e456c6 100644 --- a/drivers/firmware/efi/libstub/skip_spaces.c +++ b/drivers/firmware/efi/libstub/skip_spaces.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/ctype.h> +#include <linux/string.h> #include <linux/types.h> char *skip_spaces(const char *str) diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index 873841af8d57..3d6ba425dbb9 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups), GFP_KERNEL); - if (!cpu_groups) + if (!cpu_groups) { + free_cpumask_var(tmp); return -ENOMEM; + } cpumask_copy(tmp, cpu_online_mask); @@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) topology_core_cpumask(cpumask_any(tmp)); if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { + free_cpumask_var(tmp); free_cpu_groups(num_groups, &cpu_groups); return -ENOMEM; } @@ -196,13 +199,12 @@ static int hotplug_tests(void) if (!page_buf) goto out_free_cpu_groups; - err = 0; /* * Of course the last CPU cannot be powered down and cpu_down() should * refuse doing that. */ pr_info("Trying to turn off and on again all CPUs\n"); - err += down_and_up_cpus(cpu_online_mask, offlined_cpus); + err = down_and_up_cpus(cpu_online_mask, offlined_cpus); /* * Take down CPUs by cpu group this time. When the last CPU is turned diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index ef8098856a47..625c8fdceabf 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -181,6 +181,7 @@ EXPORT_SYMBOL_GPL(rpi_firmware_property); static void rpi_firmware_print_firmware_revision(struct rpi_firmware *fw) { + time64_t date_and_time; u32 packet; int ret = rpi_firmware_property(fw, RPI_FIRMWARE_GET_FIRMWARE_REVISION, @@ -189,7 +190,9 @@ rpi_firmware_print_firmware_revision(struct rpi_firmware *fw) if (ret) return; - dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &packet); + /* This is not compatible with y2038 */ + date_and_time = packet; + dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &date_and_time); } static void diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index b2408a710662..7cd5a29fc437 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -208,7 +208,7 @@ config FPGA_DFL_PCI config FPGA_MGR_ZYNQMP_FPGA tristate "Xilinx ZynqMP FPGA" - depends on ARCH_ZYNQMP || COMPILE_TEST + depends on ZYNQMP_FIRMWARE || (!ZYNQMP_FIRMWARE && COMPILE_TEST) help FPGA manager driver support for Xilinx ZynqMP FPGAs. This driver uses the processor configuration port(PCAP) diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c index 5640efe5e750..5bda38e0780f 100644 --- a/drivers/gpio/gpio-arizona.c +++ b/drivers/gpio/gpio-arizona.c @@ -64,6 +64,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset) ret = pm_runtime_get_sync(chip->parent); if (ret < 0) { dev_err(chip->parent, "Failed to resume: %d\n", ret); + pm_runtime_put_autosuspend(chip->parent); return ret; } @@ -72,12 +73,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset) if (ret < 0) { dev_err(chip->parent, "Failed to drop cache: %d\n", ret); + pm_runtime_put_autosuspend(chip->parent); return ret; } ret = regmap_read(arizona->regmap, reg, &val); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(chip->parent); return ret; + } pm_runtime_mark_last_busy(chip->parent); pm_runtime_put_autosuspend(chip->parent); @@ -106,6 +110,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip, ret = pm_runtime_get_sync(chip->parent); if (ret < 0) { dev_err(chip->parent, "Failed to resume: %d\n", ret); + pm_runtime_put(chip->parent); return ret; } } diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 1fca8dd7824f..a3b9bdedbe44 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -107,6 +107,84 @@ static const struct i2c_device_id pca953x_id[] = { }; MODULE_DEVICE_TABLE(i2c, pca953x_id); +#ifdef CONFIG_GPIO_PCA953X_IRQ + +#include <linux/dmi.h> +#include <linux/gpio.h> +#include <linux/list.h> + +static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = { + { + /* + * On Intel Galileo Gen 2 board the IRQ pin of one of + * the I²C GPIO expanders, which has GpioInt() resource, + * is provided as an absolute number instead of being + * relative. Since first controller (gpio-sch.c) and + * second (gpio-dwapb.c) are at the fixed bases, we may + * safely refer to the number in the global space to get + * an IRQ out of it. + */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), + }, + }, + {} +}; + +#ifdef CONFIG_ACPI +static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data) +{ + struct acpi_resource_gpio *agpio; + int *pin = data; + + if (acpi_gpio_get_irq_resource(ares, &agpio)) + *pin = agpio->pin_table[0]; + return 1; +} + +static int pca953x_acpi_find_pin(struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + int pin = -ENOENT, ret; + LIST_HEAD(r); + + ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin); + acpi_dev_free_resource_list(&r); + if (ret < 0) + return ret; + + return pin; +} +#else +static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; } +#endif + +static int pca953x_acpi_get_irq(struct device *dev) +{ + int pin, ret; + + pin = pca953x_acpi_find_pin(dev); + if (pin < 0) + return pin; + + dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin); + + if (!gpio_is_valid(pin)) + return -EINVAL; + + ret = gpio_request(pin, "pca953x interrupt"); + if (ret) + return ret; + + ret = gpio_to_irq(pin); + + /* When pin is used as an IRQ, no need to keep it requested */ + gpio_free(pin); + + return ret; +} +#endif + static const struct acpi_device_id pca953x_acpi_ids[] = { { "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, }, { } @@ -322,6 +400,7 @@ static const struct regmap_config pca953x_ai_i2c_regmap = { .writeable_reg = pca953x_writeable_register, .volatile_reg = pca953x_volatile_register, + .disable_locking = true, .cache_type = REGCACHE_RBTREE, .max_register = 0x7f, }; @@ -623,8 +702,6 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d) DECLARE_BITMAP(reg_direction, MAX_LINE); int level; - pca953x_read_regs(chip, chip->regs->direction, reg_direction); - if (chip->driver_data & PCA_PCAL) { /* Enable latch on interrupt-enabled inputs */ pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask); @@ -635,7 +712,11 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d) pca953x_write_regs(chip, PCAL953X_INT_MASK, irq_mask); } + /* Switch direction to input if needed */ + pca953x_read_regs(chip, chip->regs->direction, reg_direction); + bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio); + bitmap_complement(reg_direction, reg_direction, gc->ngpio); bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio); /* Look for any newly setup interrupt */ @@ -734,14 +815,16 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid) struct gpio_chip *gc = &chip->gpio_chip; DECLARE_BITMAP(pending, MAX_LINE); int level; + bool ret; - if (!pca953x_irq_pending(chip, pending)) - return IRQ_NONE; + mutex_lock(&chip->i2c_lock); + ret = pca953x_irq_pending(chip, pending); + mutex_unlock(&chip->i2c_lock); for_each_set_bit(level, pending, gc->ngpio) handle_nested_irq(irq_find_mapping(gc->irq.domain, level)); - return IRQ_HANDLED; + return IRQ_RETVAL(ret); } static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base) @@ -752,6 +835,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base) DECLARE_BITMAP(irq_stat, MAX_LINE); int ret; + if (dmi_first_match(pca953x_dmi_acpi_irq_info)) { + ret = pca953x_acpi_get_irq(&client->dev); + if (ret > 0) + client->irq = ret; + } + if (!client->irq) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index aa80cf799e42..ad3e149007fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1988,7 +1988,7 @@ static int psp_suspend(void *handle) ret = psp_tmr_terminate(psp); if (ret) { - DRM_ERROR("Failed to terminate tmr\n"); + DRM_ERROR("Falied to terminate tmr\n"); return ret; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d8224156460e..f9cbbfa7acf4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1378,7 +1378,7 @@ static int dm_late_init(void *handle) struct dmcu *dmcu = NULL; bool ret; - if (!adev->dm.fw_dmcu) + if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw) return detect_mst_link_for_all_connectors(adev->ddev); dmcu = adev->dm.dc->res_pool->dmcu; diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 5e7ea0459d01..903f4f304647 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -173,8 +173,6 @@ static int aspeed_gfx_load(struct drm_device *drm) drm_mode_config_reset(drm); - drm_fbdev_generic_setup(drm, 32); - return 0; } @@ -225,6 +223,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev) if (ret) goto err_unload; + drm_fbdev_generic_setup(&priv->drm, 32); return 0; err_unload: diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index da0d96a69570..88146f7245c5 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -227,18 +227,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info) } EXPORT_SYMBOL(drm_fb_helper_debug_leave); -/** - * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration - * @fb_helper: driver-allocated fbdev helper, can be NULL - * - * This should be called from driver's drm &drm_driver.lastclose callback - * when implementing an fbcon on top of kms using this helper. This ensures that - * the user isn't greeted with a black screen when e.g. X dies. - * - * RETURNS: - * Zero if everything went ok, negative error code otherwise. - */ -int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) +static int +__drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, + bool force) { bool do_delayed; int ret; @@ -250,7 +241,16 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) return 0; mutex_lock(&fb_helper->lock); - ret = drm_client_modeset_commit(&fb_helper->client); + if (force) { + /* + * Yes this is the _locked version which expects the master lock + * to be held. But for forced restores we're intentionally + * racing here, see drm_fb_helper_set_par(). + */ + ret = drm_client_modeset_commit_locked(&fb_helper->client); + } else { + ret = drm_client_modeset_commit(&fb_helper->client); + } do_delayed = fb_helper->delayed_hotplug; if (do_delayed) @@ -262,6 +262,22 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) return ret; } + +/** + * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration + * @fb_helper: driver-allocated fbdev helper, can be NULL + * + * This should be called from driver's drm &drm_driver.lastclose callback + * when implementing an fbcon on top of kms using this helper. This ensures that + * the user isn't greeted with a black screen when e.g. X dies. + * + * RETURNS: + * Zero if everything went ok, negative error code otherwise. + */ +int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) +{ + return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false); +} EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); #ifdef CONFIG_MAGIC_SYSRQ @@ -1318,6 +1334,7 @@ int drm_fb_helper_set_par(struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct fb_var_screeninfo *var = &info->var; + bool force; if (oops_in_progress) return -EBUSY; @@ -1327,7 +1344,25 @@ int drm_fb_helper_set_par(struct fb_info *info) return -EINVAL; } - drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + /* + * Normally we want to make sure that a kms master takes precedence over + * fbdev, to avoid fbdev flickering and occasionally stealing the + * display status. But Xorg first sets the vt back to text mode using + * the KDSET IOCTL with KD_TEXT, and only after that drops the master + * status when exiting. + * + * In the past this was caught by drm_fb_helper_lastclose(), but on + * modern systems where logind always keeps a drm fd open to orchestrate + * the vt switching, this doesn't work. + * + * To not break the userspace ABI we have this special case here, which + * is only used for the above case. Everything else uses the normal + * commit function, which ensures that we never steal the display from + * an active drm master. + */ + force = var->activate & FB_ACTIVATE_KD_TEXT; + + __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); return 0; } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index ffd95bfeaa94..d00ea384dcbf 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data { int orientation; }; -static const struct drm_dmi_panel_orientation_data acer_s1003 = { - .width = 800, - .height = 1280, - .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, -}; - static const struct drm_dmi_panel_orientation_data asus_t100ha = { .width = 800, .height = 1280, @@ -114,13 +108,19 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), }, - .driver_data = (void *)&acer_s1003, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Asus T100HA */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"), }, .driver_data = (void *)&asus_t100ha, + }, { /* Asus T101HA */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* GPD MicroPC (generic strings, also match on bios date) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c index 619f81435c1b..58b89ec11b0e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c @@ -61,7 +61,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, struct device *subdrv_dev, void **dma_priv) { struct exynos_drm_private *priv = drm_dev->dev_private; - int ret; + int ret = 0; if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) { DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n", @@ -92,7 +92,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, if (ret) clear_dma_max_seg_size(subdrv_dev); - return 0; + return ret; } /* diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index fcee33a43aca..03be31427181 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1498,7 +1498,6 @@ static int g2d_probe(struct platform_device *pdev) g2d->irq = platform_get_irq(pdev, 0); if (g2d->irq < 0) { - dev_err(dev, "failed to get irq\n"); ret = g2d->irq; goto err_put_clk; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index a86abc173605..3821ea76a703 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -269,8 +269,10 @@ static void mic_pre_enable(struct drm_bridge *bridge) goto unlock; ret = pm_runtime_get_sync(mic->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(mic->dev); goto unlock; + } mic_set_path(mic, 1); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 249c298e0987..eea13e60187b 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -308,8 +308,6 @@ static int hibmc_load(struct drm_device *dev) /* reset all the states of crtc/plane/encoder/connector */ drm_mode_config_reset(dev); - drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); - return 0; err: @@ -356,6 +354,9 @@ static int hibmc_pci_probe(struct pci_dev *pdev, ret); goto err_unload; } + + drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); + return 0; err_unload: diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 69a0682ddb6a..a2de57ede276 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -721,6 +721,25 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) fbc->compressed_fb.size * fbc->threshold; } +static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && + cache->fb.modifier != I915_FORMAT_MOD_X_TILED) + return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; + else + return 0; +} + +static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv); +} + static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; @@ -881,6 +900,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; params->fb.format = cache->fb.format; + params->fb.modifier = cache->fb.modifier; params->fb.stride = cache->fb.stride; params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); @@ -910,6 +930,9 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) if (params->fb.format != cache->fb.format) return false; + if (params->fb.modifier != cache->fb.modifier) + return false; + if (params->fb.stride != cache->fb.stride) return false; @@ -1197,7 +1220,8 @@ void intel_fbc_enable(struct intel_atomic_state *state, if (fbc->crtc) { if (fbc->crtc != crtc || - !intel_fbc_cfb_size_changed(dev_priv)) + (!intel_fbc_cfb_size_changed(dev_priv) && + !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv))) goto out; __intel_fbc_disable(dev_priv); @@ -1219,12 +1243,7 @@ void intel_fbc_enable(struct intel_atomic_state *state, goto out; } - if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && - plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED) - cache->gen9_wa_cfb_stride = - DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; - else - cache->gen9_wa_cfb_stride = 0; + cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv); drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 864a1642e81c..ae4ae800e908 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2880,19 +2880,13 @@ intel_hdmi_connector_register(struct drm_connector *connector) return ret; } -static void intel_hdmi_destroy(struct drm_connector *connector) +static void intel_hdmi_connector_unregister(struct drm_connector *connector) { struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier; cec_notifier_conn_unregister(n); - intel_connector_destroy(connector); -} - -static void intel_hdmi_connector_unregister(struct drm_connector *connector) -{ intel_hdmi_remove_i2c_symlink(connector); - intel_connector_unregister(connector); } @@ -2904,7 +2898,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_hdmi_connector_register, .early_unregister = intel_hdmi_connector_unregister, - .destroy = intel_hdmi_destroy, + .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index e4aece20bc80..52db2bde44a3 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -204,25 +204,25 @@ static int __ring_active(struct intel_ring *ring) { int err; - err = i915_active_acquire(&ring->vma->active); + err = intel_ring_pin(ring); if (err) return err; - err = intel_ring_pin(ring); + err = i915_active_acquire(&ring->vma->active); if (err) - goto err_active; + goto err_pin; return 0; -err_active: - i915_active_release(&ring->vma->active); +err_pin: + intel_ring_unpin(ring); return err; } static void __ring_retire(struct intel_ring *ring) { - intel_ring_unpin(ring); i915_active_release(&ring->vma->active); + intel_ring_unpin(ring); } __i915_active_call diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index e866b8d721ed..9158ead4afba 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -5403,13 +5403,8 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve) * typically be the first we inspect for submission. */ swp = prandom_u32_max(ve->num_siblings); - if (!swp) - return; - - swap(ve->siblings[swp], ve->siblings[0]); - if (!intel_engine_has_relative_mmio(ve->siblings[0])) - virtual_update_register_offsets(ve->context.lrc_reg_state, - ve->siblings[0]); + if (swp) + swap(ve->siblings[swp], ve->siblings[0]); } static int virtual_context_alloc(struct intel_context *ce) @@ -5422,15 +5417,9 @@ static int virtual_context_alloc(struct intel_context *ce) static int virtual_context_pin(struct intel_context *ce) { struct virtual_engine *ve = container_of(ce, typeof(*ve), context); - int err; /* Note: we must use a real engine class for setting up reg state */ - err = __execlists_context_pin(ce, ve->siblings[0]); - if (err) - return err; - - virtual_engine_initial_hint(ve); - return 0; + return __execlists_context_pin(ce, ve->siblings[0]); } static void virtual_context_enter(struct intel_context *ce) @@ -5695,6 +5684,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); intel_engine_init_breadcrumbs(&ve->base); intel_engine_init_execlists(&ve->base); + ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */ ve->base.cops = &virtual_context_ops; ve->base.request_alloc = execlists_request_alloc; @@ -5776,6 +5766,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, ve->base.flags |= I915_ENGINE_IS_VIRTUAL; + virtual_engine_initial_hint(ve); return &ve->context; err_put: diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index bb753f0c12eb..8624f5d2a1f3 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -29,9 +29,9 @@ static int cmp_u64(const void *A, const void *B) { const u64 *a = A, *b = B; - if (a < b) + if (*a < *b) return -1; - else if (a > b) + else if (*a > *b) return 1; else return 0; @@ -41,9 +41,9 @@ static int cmp_u32(const void *A, const void *B) { const u32 *a = A, *b = B; - if (a < b) + if (*a < *b) return -1; - else if (a > b) + else if (*a > *b) return 1; else return 0; diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index ec47d4114554..62e6a14ad58e 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -66,7 +66,7 @@ static inline int mmio_diff_handler(struct intel_gvt *gvt, vreg = vgpu_vreg(param->vgpu, offset); if (preg != vreg) { - node = kmalloc(sizeof(*node), GFP_KERNEL); + node = kmalloc(sizeof(*node), GFP_ATOMIC); if (!node) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 26cae4846c82..78ba2857144e 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1729,14 +1729,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2); write_vreg(vgpu, offset, p_data, bytes); - if (data & _MASKED_BIT_ENABLE(1)) { + if (IS_MASKED_BITS_ENABLED(data, 1)) { enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); return 0; } if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) || IS_COMETLAKE(vgpu->gvt->gt->i915)) && - data & _MASKED_BIT_ENABLE(2)) { + IS_MASKED_BITS_ENABLED(data, 2)) { enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); return 0; } @@ -1745,14 +1745,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, * pvinfo, if not, we will treat this guest as non-gvtg-aware * guest, and stop emulating its cfg space, mmio, gtt, etc. */ - if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) || - (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))) - && !vgpu->pv_notified) { + if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) || + IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) && + !vgpu->pv_notified) { enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); return 0; } - if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)) - || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { + if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) || + IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) { enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); gvt_dbg_core("EXECLIST %s on ring %s\n", @@ -1813,7 +1813,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)) + if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET)) data |= RESET_CTL_READY_TO_RESET; else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)) data &= ~RESET_CTL_READY_TO_RESET; @@ -1831,7 +1831,8 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18); write_vreg(vgpu, offset, p_data, bytes); - if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8)) + if (IS_MASKED_BITS_ENABLED(data, 0x10) || + IS_MASKED_BITS_ENABLED(data, 0x8)) enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); return 0; @@ -3059,6 +3060,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x72380), D_SKL_PLUS); MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS); + MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS); MMIO_D(CSR_SSP_BASE, D_SKL_PLUS); MMIO_D(CSR_HTP_SKL, D_SKL_PLUS); @@ -3135,8 +3137,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(GAMT_CHKN_BIT_REG, D_KBL); - MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL); + MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL); + MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h index 970704b18f23..3b25e7fe32f6 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.h +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h @@ -54,8 +54,8 @@ bool is_inhibit_context(struct intel_context *ce); int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, struct i915_request *req); -#define IS_RESTORE_INHIBIT(a) \ - (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \ - ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT))) + +#define IS_RESTORE_INHIBIT(a) \ + IS_MASKED_BITS_ENABLED(a, CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) #endif diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 5b66e14c5b7b..b88e033cbed4 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -94,6 +94,11 @@ #define GFX_MODE_BIT_SET_IN_MASK(val, bit) \ ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16)))) +#define IS_MASKED_BITS_ENABLED(_val, _b) \ + (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b)) +#define IS_MASKED_BITS_DISABLED(_val, _b) \ + ((_val) & _MASKED_BIT_DISABLE(_b)) + #define FORCEWAKE_RENDER_GEN9_REG 0xa278 #define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84 #define FORCEWAKE_BLITTER_GEN9_REG 0xa188 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c2e88d49f3e..1241b00fd254 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -442,6 +442,7 @@ struct intel_fbc { struct { const struct drm_format_info *format; unsigned int stride; + u64 modifier; } fb; int cfb_size; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 25329b7600c9..014f34c047d5 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1592,6 +1592,7 @@ static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, u32 d; cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; + cmd |= MI_SRM_LRM_GLOBAL_GTT; if (INTEL_GEN(stream->perf->i915) >= 8) cmd++; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 7fe1f317cd2b..8dde2415a0ef 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -104,6 +104,7 @@ vma_create(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_ggtt_view *view) { + struct i915_vma *pos = ERR_PTR(-E2BIG); struct i915_vma *vma; struct rb_node *rb, **p; @@ -184,7 +185,6 @@ vma_create(struct drm_i915_gem_object *obj, rb = NULL; p = &obj->vma.tree.rb_node; while (*p) { - struct i915_vma *pos; long cmp; rb = *p; @@ -205,8 +205,10 @@ vma_create(struct drm_i915_gem_object *obj, if (cmp < 0) p = &rb->rb_right; - else + else if (cmp > 0) p = &rb->rb_left; + else + goto err_unlock; } rb_link_node(&vma->obj_node, rb, p); rb_insert_color(&vma->obj_node, &obj->vma.tree); @@ -229,8 +231,9 @@ vma_create(struct drm_i915_gem_object *obj, err_unlock: spin_unlock(&obj->vma.lock); err_vma: + i915_vm_put(vm); i915_vma_free(vma); - return ERR_PTR(-E2BIG); + return pos; } static struct i915_vma * diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c index 04e1d38d41f7..08802e5177f6 100644 --- a/drivers/gpu/drm/mcde/mcde_display.c +++ b/drivers/gpu/drm/mcde/mcde_display.c @@ -812,7 +812,7 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc *crtc = &pipe->crtc; struct drm_plane *plane = &pipe->plane; struct drm_device *drm = crtc->dev; - struct mcde *mcde = drm->dev_private; + struct mcde *mcde = to_mcde(drm); const struct drm_display_mode *mode = &cstate->mode; struct drm_framebuffer *fb = plane->state->fb; u32 format = fb->format->format; diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index d300be5ee463..82137ab76cfc 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -209,7 +209,6 @@ static int mcde_modeset_init(struct drm_device *drm) drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); - drm_fbdev_generic_setup(drm, 32); return 0; } @@ -254,6 +253,8 @@ static int mcde_drm_bind(struct device *dev) if (ret < 0) goto unbind; + drm_fbdev_generic_setup(drm, 32); + return 0; unbind: diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index c420f5a3d33b..aa74aac3cbcc 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -6,12 +6,12 @@ config DRM_MEDIATEK depends on COMMON_CLK depends on HAVE_ARM_SMCCC depends on OF + depends on MTK_MMSYS select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL select MEMORY - select MTK_MMSYS select MTK_SMI select VIDEOMODE_HELPERS help diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index f6adc5b147c9..040834bdee9e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -189,7 +189,6 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) int ret; int i; - DRM_DEBUG_DRIVER("%s\n", __func__); for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk); if (ret) { @@ -209,7 +208,6 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc) { int i; - DRM_DEBUG_DRIVER("%s\n", __func__); for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk); } @@ -254,7 +252,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) int ret; int i; - DRM_DEBUG_DRIVER("%s\n", __func__); if (WARN_ON(!crtc->state)) return -EINVAL; @@ -295,7 +292,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) goto err_mutex_unprepare; } - DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n"); for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev, mtk_crtc->ddp_comp[i]->id, @@ -345,7 +341,6 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) struct drm_crtc *crtc = &mtk_crtc->base; int i; - DRM_DEBUG_DRIVER("%s\n", __func__); for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]); if (i == 1) @@ -827,7 +822,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, #if IS_REACHABLE(CONFIG_MTK_CMDQ) mtk_crtc->cmdq_client = - cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base), + cmdq_mbox_create(mtk_crtc->mmsys_dev, + drm_crtc_index(&mtk_crtc->base), 2000); if (IS_ERR(mtk_crtc->cmdq_client)) { dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 6bd369434d9d..040a8f393fe2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -444,7 +444,6 @@ static int mtk_drm_probe(struct platform_device *pdev) if (!private) return -ENOMEM; - private->data = of_device_get_match_data(dev); private->mmsys_dev = dev->parent; if (!private->mmsys_dev) { dev_err(dev, "Failed to get MMSYS device\n"); @@ -514,7 +513,8 @@ static int mtk_drm_probe(struct platform_device *pdev) goto err_node; } - ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); + ret = mtk_ddp_comp_init(dev->parent, node, comp, + comp_id, NULL); if (ret) { of_node_put(node); goto err_node; @@ -571,7 +571,6 @@ static int mtk_drm_sys_suspend(struct device *dev) int ret; ret = drm_mode_config_helper_suspend(drm); - DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); return ret; } @@ -583,7 +582,6 @@ static int mtk_drm_sys_resume(struct device *dev) int ret; ret = drm_mode_config_helper_resume(drm); - DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index c2bd683a87c8..92141a19681b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -164,6 +164,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, true, true); } +static void mtk_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct mtk_plane_state *state = to_mtk_plane_state(plane->state); + + state->pending.enable = false; + wmb(); /* Make sure the above parameter is set before update */ + state->pending.dirty = true; +} + static void mtk_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -178,6 +188,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, if (!crtc || WARN_ON(!fb)) return; + if (!plane->state->visible) { + mtk_plane_atomic_disable(plane, old_state); + return; + } + gem = fb->obj[0]; mtk_gem = to_mtk_gem_obj(gem); addr = mtk_gem->dma_addr; @@ -200,16 +215,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, state->pending.dirty = true; } -static void mtk_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct mtk_plane_state *state = to_mtk_plane_state(plane->state); - - state->pending.enable = false; - wmb(); /* Make sure the above parameter is set before update */ - state->pending.dirty = true; -} - static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = mtk_plane_atomic_check, diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 270bf22c98fe..02ac55c13a80 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -316,10 +316,7 @@ static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi) static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi) { - u32 tmp_reg1; - - tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON); - return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false; + return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN; } static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter) diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 55a4d095606f..95591262ac36 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1630,8 +1630,6 @@ static int mtk_hdmi_audio_startup(struct device *dev, void *data) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - mtk_hdmi_audio_enable(hdmi); return 0; @@ -1641,8 +1639,6 @@ static void mtk_hdmi_audio_shutdown(struct device *dev, void *data) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - mtk_hdmi_audio_disable(hdmi); } @@ -1651,8 +1647,6 @@ mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); - dev_dbg(dev, "%s(%d)\n", __func__, enable); - if (enable) mtk_hdmi_hw_aud_mute(hdmi); else @@ -1665,8 +1659,6 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len)); return 0; @@ -1766,7 +1758,6 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev) goto err_bridge_remove; } - dev_dbg(dev, "mediatek hdmi probe success\n"); return 0; err_bridge_remove: @@ -1789,7 +1780,7 @@ static int mtk_hdmi_suspend(struct device *dev) struct mtk_hdmi *hdmi = dev_get_drvdata(dev); mtk_hdmi_clk_disable_audio(hdmi); - dev_dbg(dev, "hdmi suspend success!\n"); + return 0; } @@ -1804,7 +1795,6 @@ static int mtk_hdmi_resume(struct device *dev) return ret; } - dev_dbg(dev, "hdmi resume success!\n"); return 0; } #endif diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c index b55f51675205..827b93786fac 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c @@ -107,60 +107,10 @@ #define RGS_HDMITX_5T1_EDG (0xf << 4) #define RGS_HDMITX_PLUG_TST BIT(0) -static const u8 PREDIV[3][4] = { - {0x0, 0x0, 0x0, 0x0}, /* 27Mhz */ - {0x1, 0x1, 0x1, 0x1}, /* 74Mhz */ - {0x1, 0x1, 0x1, 0x1} /* 148Mhz */ -}; - -static const u8 TXDIV[3][4] = { - {0x3, 0x3, 0x3, 0x2}, /* 27Mhz */ - {0x2, 0x1, 0x1, 0x1}, /* 74Mhz */ - {0x1, 0x0, 0x0, 0x0} /* 148Mhz */ -}; - -static const u8 FBKSEL[3][4] = { - {0x1, 0x1, 0x1, 0x1}, /* 27Mhz */ - {0x1, 0x0, 0x1, 0x1}, /* 74Mhz */ - {0x1, 0x0, 0x1, 0x1} /* 148Mhz */ -}; - -static const u8 FBKDIV[3][4] = { - {19, 24, 29, 19}, /* 27Mhz */ - {19, 24, 14, 19}, /* 74Mhz */ - {19, 24, 14, 19} /* 148Mhz */ -}; - -static const u8 DIVEN[3][4] = { - {0x2, 0x1, 0x1, 0x2}, /* 27Mhz */ - {0x2, 0x2, 0x2, 0x2}, /* 74Mhz */ - {0x2, 0x2, 0x2, 0x2} /* 148Mhz */ -}; - -static const u8 HTPLLBP[3][4] = { - {0xc, 0xc, 0x8, 0xc}, /* 27Mhz */ - {0xc, 0xf, 0xf, 0xc}, /* 74Mhz */ - {0xc, 0xf, 0xf, 0xc} /* 148Mhz */ -}; - -static const u8 HTPLLBC[3][4] = { - {0x2, 0x3, 0x3, 0x2}, /* 27Mhz */ - {0x2, 0x3, 0x3, 0x2}, /* 74Mhz */ - {0x2, 0x3, 0x3, 0x2} /* 148Mhz */ -}; - -static const u8 HTPLLBR[3][4] = { - {0x1, 0x1, 0x0, 0x1}, /* 27Mhz */ - {0x1, 0x2, 0x2, 0x1}, /* 74Mhz */ - {0x1, 0x2, 0x2, 0x1} /* 148Mhz */ -}; - static int mtk_hdmi_pll_prepare(struct clk_hw *hw) { struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - dev_dbg(hdmi_phy->dev, "%s\n", __func__); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN); @@ -178,8 +128,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) { struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - dev_dbg(hdmi_phy->dev, "%s\n", __func__); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN); usleep_range(100, 150); diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h index 08631fdfe4b9..446e7961da48 100644 --- a/drivers/gpu/drm/meson/meson_registers.h +++ b/drivers/gpu/drm/meson/meson_registers.h @@ -266,6 +266,12 @@ #define VIU_OSD_FIFO_DEPTH_VAL(val) ((val & 0x7f) << 12) #define VIU_OSD_WORDS_PER_BURST(words) (((words & 0x4) >> 1) << 22) #define VIU_OSD_FIFO_LIMITS(size) ((size & 0xf) << 24) +#define VIU_OSD_BURST_LENGTH_24 (0x0 << 31 | 0x0 << 10) +#define VIU_OSD_BURST_LENGTH_32 (0x0 << 31 | 0x1 << 10) +#define VIU_OSD_BURST_LENGTH_48 (0x0 << 31 | 0x2 << 10) +#define VIU_OSD_BURST_LENGTH_64 (0x0 << 31 | 0x3 << 10) +#define VIU_OSD_BURST_LENGTH_96 (0x1 << 31 | 0x0 << 10) +#define VIU_OSD_BURST_LENGTH_128 (0x1 << 31 | 0x1 << 10) #define VD1_IF0_GEN_REG 0x1a50 #define VD1_IF0_CANVAS0 0x1a51 diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index 304f8ff1339c..aede0c67a57f 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -411,13 +411,6 @@ void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv) priv->io_base + _REG(VIU_MISC_CTRL1)); } -static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length) -{ - uint32_t val = (((length & 0x80) % 24) / 12); - - return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31)); -} - void meson_viu_init(struct meson_drm *priv) { uint32_t reg; @@ -444,9 +437,9 @@ void meson_viu_init(struct meson_drm *priv) VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) - reg |= meson_viu_osd_burst_length_reg(32); + reg |= VIU_OSD_BURST_LENGTH_32; else - reg |= meson_viu_osd_burst_length_reg(64); + reg |= VIU_OSD_BURST_LENGTH_64; writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT)); diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 60f6472a3e58..6021f8d9efd1 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -408,7 +408,7 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) struct msm_gem_address_space *aspace; aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, - SZ_16M + 0xfff * SZ_64K); + 0xfff * SZ_64K); if (IS_ERR(aspace) && !IS_ERR(mmu)) mmu->funcs->destroy(mmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 096be97ce9f9..21e77d67151f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1121,7 +1121,7 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) return -ENODEV; mmu = msm_iommu_new(gmu->dev, domain); - gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff); + gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); if (IS_ERR(gmu->aspace)) { iommu_domain_free(domain); return PTR_ERR(gmu->aspace); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 68314dcfce18..aa53f47b7e8b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -893,8 +893,8 @@ static const struct adreno_gpu_funcs funcs = { #if defined(CONFIG_DRM_MSM_GPU_STATE) .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, - .create_address_space = adreno_iommu_create_address_space, #endif + .create_address_space = adreno_iommu_create_address_space, }, .get_timestamp = a6xx_get_timestamp, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 89673c7ed473..5db06b590943 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -194,7 +194,7 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu, struct msm_gem_address_space *aspace; aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, - 0xfffffff); + 0xffffffff - SZ_16M); if (IS_ERR(aspace) && !IS_ERR(mmu)) mmu->funcs->destroy(mmu); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 797e8fd4c16f..b0b264e30ce5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -504,7 +504,7 @@ static struct msm_display_topology dpu_encoder_get_topology( struct dpu_kms *dpu_kms, struct drm_display_mode *mode) { - struct msm_display_topology topology; + struct msm_display_topology topology = {0}; int i, intf_count = 0; for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) @@ -520,7 +520,8 @@ static struct msm_display_topology dpu_encoder_get_topology( * 1 LM, 1 INTF * 2 LM, 1 INTF (stream merge to support high resolution interfaces) * - * Adding color blocks only to primary interface + * Adding color blocks only to primary interface if available in + * sufficient number */ if (intf_count == 2) topology.num_lm = 2; @@ -529,8 +530,11 @@ static struct msm_display_topology dpu_encoder_get_topology( else topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; - if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) - topology.num_dspp = topology.num_lm; + if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) { + if (dpu_kms->catalog->dspp && + (dpu_kms->catalog->dspp_count >= topology.num_lm)) + topology.num_dspp = topology.num_lm; + } topology.num_enc = 0; topology.num_intf = intf_count; @@ -2109,7 +2113,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, dpu_enc = to_dpu_encoder_virt(enc); - mutex_init(&dpu_enc->enc_lock); ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); if (ret) goto fail; @@ -2124,7 +2127,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, 0); - mutex_init(&dpu_enc->rc_lock); INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, dpu_encoder_off_work); dpu_enc->idle_timeout = IDLE_TIMEOUT; @@ -2156,7 +2158,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); if (!dpu_enc) - return ERR_PTR(ENOMEM); + return ERR_PTR(-ENOMEM); rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, drm_enc_mode, NULL); @@ -2169,6 +2171,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, spin_lock_init(&dpu_enc->enc_spinlock); dpu_enc->enabled = false; + mutex_init(&dpu_enc->enc_lock); + mutex_init(&dpu_enc->rc_lock); return &dpu_enc->base; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index b8615d4fe8a3..680527e28d09 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -780,7 +780,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) mmu = msm_iommu_new(dpu_kms->dev->dev, domain); aspace = msm_gem_address_space_create(mmu, "dpu1", - 0x1000, 0xfffffff); + 0x1000, 0x100000000 - 0x1000); if (IS_ERR(aspace)) { mmu->funcs->destroy(mmu); diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index 19291d77df40..dbf8d429223e 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -514,7 +514,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) config->iommu); aspace = msm_gem_address_space_create(mmu, - "mdp4", 0x1000, 0xffffffff); + "mdp4", 0x1000, 0x100000000 - 0x1000); if (IS_ERR(aspace)) { if (!IS_ERR(mmu)) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 19ec48695ffb..e193865ce9a2 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) mmu = msm_iommu_new(iommu_dev, config->platform.iommu); aspace = msm_gem_address_space_create(mmu, "mdp5", - 0x1000, 0xffffffff); + 0x1000, 0x100000000 - 0x1000); if (IS_ERR(aspace)) { if (!IS_ERR(mmu)) diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 001fbf537440..a1d94be7883a 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -71,8 +71,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, queue->flags = flags; if (priv->gpu) { - if (prio >= priv->gpu->nr_rings) + if (prio >= priv->gpu->nr_rings) { + kfree(queue); return -EINVAL; + } queue->prio = prio; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index cd71b9876c8a..4c2894d8e15b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -586,6 +586,9 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc) (0x0100 << nv_crtc->index), }; + if (!nv_encoder->audio) + return; + nv_encoder->audio = false; nvif_mthd(&disp->disp->object, 0, &args, sizeof(args)); diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index af870671195a..462ee0493651 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -550,7 +550,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, *dma_addr)) goto out_free_page; - if (drm->dmem->migrate.copy_func(drm, page_size(spage), + if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr)) goto out_dma_unmap; } else { diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index ba9f9359c30e..6586d9d39874 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -562,6 +562,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, .end = notifier->notifier.interval_tree.last + 1, .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE, .hmm_pfns = hmm_pfns, + .dev_private_owner = drm->dev, }; struct mm_struct *mm = notifier->notifier.mm; int ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c index c8ab1b5741a3..db7769cb33eb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c @@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, if (retries) udelay(400); - /* transaction request, wait up to 1ms for it to complete */ + /* transaction request, wait up to 2ms for it to complete */ nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl); - timeout = 1000; + timeout = 2000; do { ctrl = nvkm_rd32(device, 0x00e4e4 + base); udelay(1); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c index 7ef60895f43a..edb6148cbca0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c @@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, if (retries) udelay(400); - /* transaction request, wait up to 1ms for it to complete */ + /* transaction request, wait up to 2ms for it to complete */ nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl); - timeout = 1000; + timeout = 2000; do { ctrl = nvkm_rd32(device, 0x00d954 + base); udelay(1); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 88493538a147..af6ea5480c81 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2583,6 +2583,7 @@ static const struct panel_desc logicpd_type_28 = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, }; static const struct panel_desc mitsubishi_aa070mc01 = { @@ -2746,6 +2747,7 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, }; static const struct display_timing nlt_nl192108ac18_02d_timing = { diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 0919f1f159a4..f65d1489dc50 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -31,6 +31,7 @@ config DRM_RCAR_DW_HDMI config DRM_RCAR_LVDS tristate "R-Car DU LVDS Encoder Support" depends on DRM && DRM_BRIDGE && OF + select DRM_KMS_HELPER select DRM_PANEL select OF_FLATTREE select OF_OVERLAY diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index ce07ddc3e058..557cbe5ab35f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -259,9 +259,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force) struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector); unsigned long reg; - if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg, - reg & SUN4I_HDMI_HPD_HIGH, - 0, 500000)) { + reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG); + if (reg & SUN4I_HDMI_HPD_HIGH) { cec_phys_addr_invalidate(hdmi->cec_adap); return connector_status_disconnected; } diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 9b308b572eac..da8b9983b7de 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -957,6 +957,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, } drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs); + drm_plane_create_zpos_immutable_property(&plane->base, 255); return &plane->base; } diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 8183e617bf6b..22a03f7ffdc1 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -149,7 +149,9 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub) for (i = 0; i < hub->soc->num_wgrps; i++) { struct tegra_windowgroup *wgrp = &hub->wgrps[i]; - tegra_windowgroup_enable(wgrp); + /* Skip orphaned window group whose parent DC is disabled */ + if (wgrp->parent) + tegra_windowgroup_enable(wgrp); } return 0; @@ -166,7 +168,9 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub) for (i = 0; i < hub->soc->num_wgrps; i++) { struct tegra_windowgroup *wgrp = &hub->wgrps[i]; - tegra_windowgroup_disable(wgrp); + /* Skip orphaned window group whose parent DC is disabled */ + if (wgrp->parent) + tegra_windowgroup_disable(wgrp); } } @@ -944,6 +948,15 @@ static int tegra_display_hub_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to register host1x client: %d\n", err); + err = devm_of_platform_populate(&pdev->dev); + if (err < 0) + goto unregister; + + return err; + +unregister: + host1x_client_unregister(&hub->client); + pm_runtime_disable(&pdev->dev); return err; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 0768a054a916..b03747717ec7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -880,8 +880,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, if (!fence) return 0; - if (no_wait_gpu) + if (no_wait_gpu) { + dma_fence_put(fence); return -EBUSY; + } dma_resv_add_shared_fence(bo->base.resv, fence); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 82b893d4249f..d7a6537dd6ee 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -300,8 +300,10 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, break; case -EBUSY: case -ERESTARTSYS: + dma_fence_put(moving); return VM_FAULT_NOPAGE; default: + dma_fence_put(moving); return VM_FAULT_SIGBUS; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 9ffa9c75a5da..16b385629688 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1069,10 +1069,6 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, if (new_content_type != SAME_AS_DISPLAY) { struct vmw_surface_metadata metadata = {0}; - metadata.base_size.width = hdisplay; - metadata.base_size.height = vdisplay; - metadata.base_size.depth = 1; - /* * If content buffer is a buffer object, then we have to * construct surface info @@ -1104,6 +1100,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, metadata = new_vfbs->surface->metadata; } + metadata.base_size.width = hdisplay; + metadata.base_size.height = vdisplay; + metadata.base_size.depth = 1; + if (vps->surf) { struct drm_vmw_size cur_base_size = vps->surf->metadata.base_size; diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 6a995db51d6d..e201f62d62c0 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -686,8 +686,17 @@ EXPORT_SYMBOL(host1x_driver_register_full); */ void host1x_driver_unregister(struct host1x_driver *driver) { + struct host1x *host1x; + driver_unregister(&driver->driver); + mutex_lock(&devices_lock); + + list_for_each_entry(host1x, &devices, list) + host1x_detach_driver(host1x, driver); + + mutex_unlock(&devices_lock); + mutex_lock(&drivers_lock); list_del_init(&driver->list); mutex_unlock(&drivers_lock); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index d24344e91922..d0ebb70e2fdd 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -468,11 +468,18 @@ static int host1x_probe(struct platform_device *pdev) err = host1x_register(host); if (err < 0) - goto deinit_intr; + goto deinit_debugfs; + + err = devm_of_platform_populate(&pdev->dev); + if (err < 0) + goto unregister; return 0; -deinit_intr: +unregister: + host1x_unregister(host); +deinit_debugfs: + host1x_debug_deinit(host); host1x_intr_deinit(host); deinit_syncpt: host1x_syncpt_deinit(host); diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index 6f1fe7248d81..a9c2de95c5e2 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -25,6 +25,7 @@ #define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */ #define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */ +#define U1_ABSOLUTE_REPORT_ID_SECD 0x02 /* FW-PTP Absolute data ReportID */ #define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */ #define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */ @@ -368,6 +369,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size) case U1_FEATURE_REPORT_ID: break; case U1_ABSOLUTE_REPORT_ID: + case U1_ABSOLUTE_REPORT_ID_SECD: for (i = 0; i < hdata->max_fingers; i++) { u8 *contact = &data[i * 5]; diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 359bdfbe3701..e82f604d33e9 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -60,6 +60,7 @@ MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. " struct apple_sc { unsigned long quirks; unsigned int fn_on; + unsigned int fn_found; DECLARE_BITMAP(pressed_numlock, KEY_CNT); }; @@ -365,12 +366,15 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { + struct apple_sc *asc = hid_get_drvdata(hdev); + if (usage->hid == (HID_UP_CUSTOM | 0x0003) || usage->hid == (HID_UP_MSVENDOR | 0x0003) || usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) { /* The fn key on Apple USB keyboards */ set_bit(EV_REP, hi->input->evbit); hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); + asc->fn_found = true; apple_setup_input(hi->input); return 1; } @@ -397,6 +401,19 @@ static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi, return 0; } +static int apple_input_configured(struct hid_device *hdev, + struct hid_input *hidinput) +{ + struct apple_sc *asc = hid_get_drvdata(hdev); + + if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { + hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); + asc->quirks = 0; + } + + return 0; +} + static int apple_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -611,6 +628,7 @@ static struct hid_driver apple_driver = { .event = apple_event, .input_mapping = apple_input_mapping, .input_mapped = apple_input_mapped, + .input_configured = apple_input_configured, }; module_hid_driver(apple_driver); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 874fc3791f3b..6f370e020feb 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -618,6 +618,7 @@ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2 0xa0c2 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096 0xa096 +#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293 0xa293 #define USB_VENDOR_ID_IMATION 0x0718 #define USB_DEVICE_ID_DISC_STAKKA 0xd000 @@ -998,6 +999,8 @@ #define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO 0x3232 #define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a +#define USB_VENDOR_ID_SAI 0x17dd + #define USB_VENDOR_ID_SAITEK 0x06a3 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 48dff5d6b605..a78c13cc9f47 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1153,7 +1153,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) if (!dj_report) return -ENOMEM; dj_report->report_id = REPORT_ID_DJ_SHORT; - dj_report->device_index = 0xFF; + dj_report->device_index = HIDPP_RECEIVER_INDEX; dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES; retval = logi_dj_recv_send_report(djrcv_dev, dj_report); kfree(dj_report); @@ -1175,7 +1175,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, if (djrcv_dev->type == recvr_type_dj) { dj_report->report_id = REPORT_ID_DJ_SHORT; - dj_report->device_index = 0xFF; + dj_report->device_index = HIDPP_RECEIVER_INDEX; dj_report->report_type = REPORT_TYPE_CMD_SWITCH; dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F; dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = @@ -1204,7 +1204,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, memset(buf, 0, HIDPP_REPORT_SHORT_LENGTH); buf[0] = REPORT_ID_HIDPP_SHORT; - buf[1] = 0xFF; + buf[1] = HIDPP_RECEIVER_INDEX; buf[2] = 0x80; buf[3] = 0x00; buf[4] = 0x00; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 1e1cf8eae649..b8b53dc95e86 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -3146,7 +3146,7 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp) multiplier = 1; hidpp->vertical_wheel_counter.wheel_multiplier = multiplier; - hid_info(hidpp->hid_dev, "multiplier = %d\n", multiplier); + hid_dbg(hidpp->hid_dev, "wheel multiplier = %d\n", multiplier); return 0; } diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 34138667f8af..abd86903875f 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -535,6 +535,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd __set_bit(MSC_RAW, input->mscbit); } + /* + * hid-input may mark device as using autorepeat, but neither + * the trackpad, nor the mouse actually want it. + */ + __clear_bit(EV_REP, input->evbit); + return 0; } diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index ca8b5c261c7c..934fc0a798d4 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -88,6 +88,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, @@ -832,6 +833,7 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAI, USB_DEVICE_ID_CYPRESS_HIDCOM) }, #if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB) { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) }, diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 6286204d4c56..a3b151b29bd7 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -526,7 +526,8 @@ static int steam_register(struct steam_device *steam) steam_battery_register(steam); mutex_lock(&steam_devices_lock); - list_add(&steam->list, &steam_devices); + if (list_empty(&steam->list)) + list_add(&steam->list, &steam_devices); mutex_unlock(&steam_devices_lock); } @@ -552,7 +553,7 @@ static void steam_unregister(struct steam_device *steam) hid_info(steam->hdev, "Steam Controller '%s' disconnected", steam->serial_no); mutex_lock(&steam_devices_lock); - list_del(&steam->list); + list_del_init(&steam->list); mutex_unlock(&steam_devices_lock); steam->serial_no[0] = 0; } @@ -738,6 +739,7 @@ static int steam_probe(struct hid_device *hdev, mutex_init(&steam->mutex); steam->quirks = id->driver_data; INIT_WORK(&steam->work_connect, steam_work_connect_cb); + INIT_LIST_HEAD(&steam->list); steam->client_hdev = steam_create_client_hid(hdev); if (IS_ERR(steam->client_hdev)) { diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c index ec142bc8c1da..35f3bfc3e6f5 100644 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -374,6 +374,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { .driver_data = (void *)&sipodev_desc }, { + .ident = "Mediacom FlexBook edge 13", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"), + }, + .driver_data = (void *)&sipodev_desc + }, + { .ident = "Odys Winbook 13", .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AXDIA International GmbH"), diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 9147ee9d5f7d..d69f4efa3719 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1368,7 +1368,7 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper, * Write dump contents to the page. No need to synchronize; panic should * be single-threaded. */ - kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE, + kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE, &bytes_written); if (bytes_written) hyperv_report_panic_msg(panic_pa, bytes_written); diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 0db8ef4fd6e1..a270b975e90b 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -883,7 +883,7 @@ static int acpi_power_meter_add(struct acpi_device *device) res = setup_attrs(resource); if (res) - goto exit_free; + goto exit_free_capability; resource->hwmon_dev = hwmon_device_register(&device->dev); if (IS_ERR(resource->hwmon_dev)) { @@ -896,6 +896,8 @@ static int acpi_power_meter_add(struct acpi_device *device) exit_remove: remove_attrs(resource); +exit_free_capability: + free_capabilities(resource); exit_free: kfree(resource); exit: diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c index e95b7426106e..29603742c858 100644 --- a/drivers/hwmon/amd_energy.c +++ b/drivers/hwmon/amd_energy.c @@ -362,7 +362,7 @@ static struct platform_driver amd_energy_driver = { static struct platform_device *amd_energy_platdev; static const struct x86_cpu_id cpu_ids[] __initconst = { - X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL), + X86_MATCH_VENDOR_FAM_MODEL(AMD, 0x17, 0x31, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, cpu_ids); diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index 33fb54845bf6..3d8239fd66ed 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c @@ -851,6 +851,8 @@ static int aspeed_create_fan(struct device *dev, ret = of_property_read_u32(child, "reg", &pwm_port); if (ret) return ret; + if (pwm_port >= ARRAY_SIZE(pwm_port_params)) + return -EINVAL; aspeed_create_pwm_port(priv, (u8)pwm_port); ret = of_property_count_u8_elems(child, "cooling-levels"); diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c index 1a9772fb1f73..94698cae0497 100644 --- a/drivers/hwmon/bt1-pvt.c +++ b/drivers/hwmon/bt1-pvt.c @@ -64,7 +64,7 @@ static const struct pvt_sensor_info pvt_info[] = { * 48380, * where T = [-48380, 147438] mC and N = [0, 1023]. */ -static const struct pvt_poly poly_temp_to_N = { +static const struct pvt_poly __maybe_unused poly_temp_to_N = { .total_divider = 10000, .terms = { {4, 18322, 10000, 10000}, @@ -96,7 +96,7 @@ static const struct pvt_poly poly_N_to_temp = { * N = (18658e-3*V - 11572) / 10, * V = N * 10^5 / 18658 + 11572 * 10^4 / 18658. */ -static const struct pvt_poly poly_volt_to_N = { +static const struct pvt_poly __maybe_unused poly_volt_to_N = { .total_divider = 10, .terms = { {1, 18658, 1000, 1}, @@ -300,12 +300,12 @@ static irqreturn_t pvt_soft_isr(int irq, void *data) return IRQ_HANDLED; } -inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type) +static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type) { return 0644; } -inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type) +static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type) { return 0444; } @@ -462,12 +462,12 @@ static irqreturn_t pvt_hard_isr(int irq, void *data) #define pvt_soft_isr NULL -inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type) +static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type) { return 0; } -inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type) +static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type) { return 0; } diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c index 0d4f3d97ffc6..72c760373957 100644 --- a/drivers/hwmon/drivetemp.c +++ b/drivers/hwmon/drivetemp.c @@ -285,6 +285,42 @@ static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val) return err; } +static const char * const sct_avoid_models[] = { +/* + * These drives will have WRITE FPDMA QUEUED command timeouts and sometimes just + * freeze until power-cycled under heavy write loads when their temperature is + * getting polled in SCT mode. The SMART mode seems to be fine, though. + * + * While only the 3 TB model (DT01ACA3) was actually caught exhibiting the + * problem let's play safe here to avoid data corruption and ban the whole + * DT01ACAx family. + + * The models from this array are prefix-matched. + */ + "TOSHIBA DT01ACA", +}; + +static bool drivetemp_sct_avoid(struct drivetemp_data *st) +{ + struct scsi_device *sdev = st->sdev; + unsigned int ctr; + + if (!sdev->model) + return false; + + /* + * The "model" field contains just the raw SCSI INQUIRY response + * "product identification" field, which has a width of 16 bytes. + * This field is space-filled, but is NOT NULL-terminated. + */ + for (ctr = 0; ctr < ARRAY_SIZE(sct_avoid_models); ctr++) + if (!strncmp(sdev->model, sct_avoid_models[ctr], + strlen(sct_avoid_models[ctr]))) + return true; + + return false; +} + static int drivetemp_identify_sata(struct drivetemp_data *st) { struct scsi_device *sdev = st->sdev; @@ -326,6 +362,13 @@ static int drivetemp_identify_sata(struct drivetemp_data *st) /* bail out if this is not a SATA device */ if (!is_ata || !is_sata) return -ENODEV; + + if (have_sct && drivetemp_sct_avoid(st)) { + dev_notice(&sdev->sdev_gendev, + "will avoid using SCT for temperature monitoring\n"); + have_sct = false; + } + if (!have_sct) goto skip_sct; diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c index 491a570e8e50..924c02c1631d 100644 --- a/drivers/hwmon/emc2103.c +++ b/drivers/hwmon/emc2103.c @@ -443,7 +443,7 @@ static ssize_t pwm1_enable_store(struct device *dev, } result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); - if (result) { + if (result < 0) { count = result; goto err; } diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c index 743752a2467a..64122eb38060 100644 --- a/drivers/hwmon/max6697.c +++ b/drivers/hwmon/max6697.c @@ -38,8 +38,9 @@ static const u8 MAX6697_REG_CRIT[] = { * Map device tree / platform data register bit map to chip bit map. * Applies to alert register and over-temperature register. */ -#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ +#define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ (((reg) & 0x01) << 6) | ((reg) & 0x80)) +#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7)) #define MAX6697_REG_STAT(n) (0x44 + (n)) @@ -562,12 +563,12 @@ static int max6697_init_chip(struct max6697_data *data, return ret; ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK, - MAX6697_MAP_BITS(pdata->alert_mask)); + MAX6697_ALERT_MAP_BITS(pdata->alert_mask)); if (ret < 0) return ret; ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK, - MAX6697_MAP_BITS(pdata->over_temperature_mask)); + MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask)); if (ret < 0) return ret; diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index e7e1ddc1d631..750b08713dee 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -786,13 +786,13 @@ static const char *const nct6798_temp_label[] = { "Agent1 Dimm1", "BYTE_TEMP0", "BYTE_TEMP1", - "", - "", + "PECI Agent 0 Calibration", /* undocumented */ + "PECI Agent 1 Calibration", /* undocumented */ "", "Virtual_TEMP" }; -#define NCT6798_TEMP_MASK 0x8fff0ffe +#define NCT6798_TEMP_MASK 0xbfff0ffe #define NCT6798_VIRT_TEMP_MASK 0x80000c00 /* NCT6102D/NCT6106D specific data */ diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index a337195b1c39..ea516cec1d35 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -71,7 +71,7 @@ config SENSORS_IR35221 Infineon IR35221 controller. This driver can also be built as a module. If so, the module will - be called ir35521. + be called ir35221. config SENSORS_IR38064 tristate "Infineon IR38064" diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c index e25f541227da..19317575d1c6 100644 --- a/drivers/hwmon/pmbus/adm1275.c +++ b/drivers/hwmon/pmbus/adm1275.c @@ -465,6 +465,7 @@ MODULE_DEVICE_TABLE(i2c, adm1275_id); static int adm1275_probe(struct i2c_client *client, const struct i2c_device_id *id) { + s32 (*config_read_fn)(const struct i2c_client *client, u8 reg); u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1]; int config, device_config; int ret; @@ -510,11 +511,16 @@ static int adm1275_probe(struct i2c_client *client, "Device mismatch: Configured %s, detected %s\n", id->name, mid->name); - config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG); + if (mid->driver_data == adm1272 || mid->driver_data == adm1278 || + mid->driver_data == adm1293 || mid->driver_data == adm1294) + config_read_fn = i2c_smbus_read_word_data; + else + config_read_fn = i2c_smbus_read_byte_data; + config = config_read_fn(client, ADM1275_PMON_CONFIG); if (config < 0) return config; - device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG); + device_config = config_read_fn(client, ADM1275_DEVICE_CONFIG); if (device_config < 0) return device_config; diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index a420877ba533..2191575a448b 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -1869,7 +1869,7 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client, struct pmbus_sensor *sensor; sensor = pmbus_add_sensor(data, "fan", "target", index, page, - PMBUS_VIRT_FAN_TARGET_1 + id, 0xff, PSC_FAN, + 0xff, PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN, false, false, true); if (!sensor) @@ -1880,14 +1880,14 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client, return 0; sensor = pmbus_add_sensor(data, "pwm", NULL, index, page, - PMBUS_VIRT_PWM_1 + id, 0xff, PSC_PWM, + 0xff, PMBUS_VIRT_PWM_1 + id, PSC_PWM, false, false, true); if (!sensor) return -ENOMEM; sensor = pmbus_add_sensor(data, "pwm", "enable", index, page, - PMBUS_VIRT_PWM_ENABLE_1 + id, 0xff, PSC_PWM, + 0xff, PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM, true, false, false); if (!sensor) @@ -1929,7 +1929,7 @@ static int pmbus_add_fan_attributes(struct i2c_client *client, continue; if (pmbus_add_sensor(data, "fan", "input", index, - page, pmbus_fan_registers[f], 0xff, + page, 0xff, pmbus_fan_registers[f], PSC_FAN, true, true, true) == NULL) return -ENOMEM; diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c index 286d3cfda7de..d421e691318b 100644 --- a/drivers/hwmon/scmi-hwmon.c +++ b/drivers/hwmon/scmi-hwmon.c @@ -147,7 +147,7 @@ static enum hwmon_sensor_types scmi_types[] = { [ENERGY] = hwmon_energy, }; -static u32 hwmon_attributes[] = { +static u32 hwmon_attributes[hwmon_max] = { [hwmon_chip] = HWMON_C_REGISTER_TZ, [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL, [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL, diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c index 40387d58c8e7..3ccc703dc940 100644 --- a/drivers/hwtracing/coresight/coresight-cti.c +++ b/drivers/hwtracing/coresight/coresight-cti.c @@ -747,17 +747,50 @@ static int cti_dying_cpu(unsigned int cpu) return 0; } +static int cti_pm_setup(struct cti_drvdata *drvdata) +{ + int ret; + + if (drvdata->ctidev.cpu == -1) + return 0; + + if (nr_cti_cpu) + goto done; + + cpus_read_lock(); + ret = cpuhp_setup_state_nocalls_cpuslocked( + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, + "arm/coresight_cti:starting", + cti_starting_cpu, cti_dying_cpu); + if (ret) { + cpus_read_unlock(); + return ret; + } + + ret = cpu_pm_register_notifier(&cti_cpu_pm_nb); + cpus_read_unlock(); + if (ret) { + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); + return ret; + } + +done: + nr_cti_cpu++; + cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata; + + return 0; +} + /* release PM registrations */ static void cti_pm_release(struct cti_drvdata *drvdata) { - if (drvdata->ctidev.cpu >= 0) { - if (--nr_cti_cpu == 0) { - cpu_pm_unregister_notifier(&cti_cpu_pm_nb); + if (drvdata->ctidev.cpu == -1) + return; - cpuhp_remove_state_nocalls( - CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); - } - cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL; + cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL; + if (--nr_cti_cpu == 0) { + cpu_pm_unregister_notifier(&cti_cpu_pm_nb); + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); } } @@ -823,19 +856,14 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) /* driver data*/ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); - if (!drvdata) { - ret = -ENOMEM; - dev_info(dev, "%s, mem err\n", __func__); - goto err_out; - } + if (!drvdata) + return -ENOMEM; /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); - if (IS_ERR(base)) { - ret = PTR_ERR(base); - dev_err(dev, "%s, remap err\n", __func__); - goto err_out; - } + if (IS_ERR(base)) + return PTR_ERR(base); + drvdata->base = base; dev_set_drvdata(dev, drvdata); @@ -854,8 +882,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) pdata = coresight_cti_get_platform_data(dev); if (IS_ERR(pdata)) { dev_err(dev, "coresight_cti_get_platform_data err\n"); - ret = PTR_ERR(pdata); - goto err_out; + return PTR_ERR(pdata); } /* default to powered - could change on PM notifications */ @@ -867,35 +894,20 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) drvdata->ctidev.cpu); else cti_desc.name = coresight_alloc_device_name(&cti_sys_devs, dev); - if (!cti_desc.name) { - ret = -ENOMEM; - goto err_out; - } + if (!cti_desc.name) + return -ENOMEM; /* setup CPU power management handling for CPU bound CTI devices. */ - if (drvdata->ctidev.cpu >= 0) { - cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata; - if (!nr_cti_cpu++) { - cpus_read_lock(); - ret = cpuhp_setup_state_nocalls_cpuslocked( - CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, - "arm/coresight_cti:starting", - cti_starting_cpu, cti_dying_cpu); - - if (!ret) - ret = cpu_pm_register_notifier(&cti_cpu_pm_nb); - cpus_read_unlock(); - if (ret) - goto err_out; - } - } + ret = cti_pm_setup(drvdata); + if (ret) + return ret; /* create dynamic attributes for connections */ ret = cti_create_cons_sysfs(dev, drvdata); if (ret) { dev_err(dev, "%s: create dynamic sysfs entries failed\n", cti_desc.name); - goto err_out; + goto pm_release; } /* set up coresight component description */ @@ -908,7 +920,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) drvdata->csdev = coresight_register(&cti_desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); - goto err_out; + goto pm_release; } /* add to list of CTI devices */ @@ -927,7 +939,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) dev_info(&drvdata->csdev->dev, "CTI initialized\n"); return 0; -err_out: +pm_release: cti_pm_release(drvdata); return ret; } diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 747afc875f91..0c35cd5e0d1d 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1388,18 +1388,57 @@ static struct notifier_block etm4_cpu_pm_nb = { .notifier_call = etm4_cpu_pm_notify, }; -static int etm4_cpu_pm_register(void) +/* Setup PM. Called with cpus locked. Deals with error conditions and counts */ +static int etm4_pm_setup_cpuslocked(void) { - if (IS_ENABLED(CONFIG_CPU_PM)) - return cpu_pm_register_notifier(&etm4_cpu_pm_nb); + int ret; - return 0; + if (etm4_count++) + return 0; + + ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb); + if (ret) + goto reduce_count; + + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, + "arm/coresight4:starting", + etm4_starting_cpu, etm4_dying_cpu); + + if (ret) + goto unregister_notifier; + + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, + "arm/coresight4:online", + etm4_online_cpu, NULL); + + /* HP dyn state ID returned in ret on success */ + if (ret > 0) { + hp_online = ret; + return 0; + } + + /* failed dyn state - remove others */ + cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING); + +unregister_notifier: + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); + +reduce_count: + --etm4_count; + return ret; } -static void etm4_cpu_pm_unregister(void) +static void etm4_pm_clear(void) { - if (IS_ENABLED(CONFIG_CPU_PM)) - cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); + if (--etm4_count != 0) + return; + + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); + if (hp_online) { + cpuhp_remove_state_nocalls(hp_online); + hp_online = 0; + } } static int etm4_probe(struct amba_device *adev, const struct amba_id *id) @@ -1453,24 +1492,15 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) etm4_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); - if (!etm4_count++) { - cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, - "arm/coresight4:starting", - etm4_starting_cpu, etm4_dying_cpu); - ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, - "arm/coresight4:online", - etm4_online_cpu, NULL); - if (ret < 0) - goto err_arch_supported; - hp_online = ret; + ret = etm4_pm_setup_cpuslocked(); + cpus_read_unlock(); - ret = etm4_cpu_pm_register(); - if (ret) - goto err_arch_supported; + /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */ + if (ret) { + etmdrvdata[drvdata->cpu] = NULL; + return ret; } - cpus_read_unlock(); - if (etm4_arch_supported(drvdata->arch) == false) { ret = -EINVAL; goto err_arch_supported; @@ -1517,13 +1547,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) err_arch_supported: etmdrvdata[drvdata->cpu] = NULL; - if (--etm4_count == 0) { - etm4_cpu_pm_unregister(); - - cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); - if (hp_online) - cpuhp_remove_state_nocalls(hp_online); - } + etm4_pm_clear(); return ret; } diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index ca232ec565e8..c9ac3dc65113 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -1021,15 +1021,30 @@ int intel_th_set_output(struct intel_th_device *thdev, { struct intel_th_device *hub = to_intel_th_hub(thdev); struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); + int ret; /* In host mode, this is up to the external debugger, do nothing. */ if (hub->host_mode) return 0; - if (!hubdrv->set_output) - return -ENOTSUPP; + /* + * hub is instantiated together with the source device that + * calls here, so guaranteed to be present. + */ + hubdrv = to_intel_th_driver(hub->dev.driver); + if (!hubdrv || !try_module_get(hubdrv->driver.owner)) + return -EINVAL; + + if (!hubdrv->set_output) { + ret = -ENOTSUPP; + goto out; + } + + ret = hubdrv->set_output(hub, master); - return hubdrv->set_output(hub, master); +out: + module_put(hubdrv->driver.owner); + return ret; } EXPORT_SYMBOL_GPL(intel_th_set_output); diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 7ccac74553a6..21fdf0b93516 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -234,11 +234,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Tiger Lake PCH-H */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x43a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Jasper Lake PCH */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Jasper Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4e29), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Elkhart Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529), .driver_data = (kernel_ulong_t)&intel_th_2x, @@ -248,6 +258,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Emmitsburg PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c index 3a1f4e650378..a1529f571491 100644 --- a/drivers/hwtracing/intel_th/sth.c +++ b/drivers/hwtracing/intel_th/sth.c @@ -161,9 +161,7 @@ static int sth_stm_link(struct stm_data *stm_data, unsigned int master, { struct sth_device *sth = container_of(stm_data, struct sth_device, stm); - intel_th_set_output(to_intel_th_device(sth->dev), master); - - return 0; + return intel_th_set_output(to_intel_th_device(sth->dev), master); } static int intel_th_sw_init(struct sth_device *sth) diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index ef39c83aaf33..bae1dc08ec9a 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -113,11 +113,18 @@ config I2C_STUB config I2C_SLAVE bool "I2C slave support" + help + This enables Linux to act as an I2C slave device. Note that your I2C + bus master driver also needs to support this functionality. Please + read Documentation/i2c/slave-interface.rst for further details. if I2C_SLAVE config I2C_SLAVE_EEPROM tristate "I2C eeprom slave driver" + help + This backend makes Linux behave like an I2C EEPROM. Please read + Documentation/i2c/slave-eeprom-backend.rst for further details. endif diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index 7f10312d1b88..388978775be0 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -314,7 +314,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, DEB2("BUS ERROR - SDA Stuck low\n"); pca_reset(adap); goto out; - case 0x90: /* Bus error - SCL stuck low */ + case 0x78: /* Bus error - SCL stuck low (PCA9665) */ + case 0x90: /* Bus error - SCL stuck low (PCA9564) */ DEB2("BUS ERROR - SCL Stuck low\n"); pca_reset(adap); goto out; diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index e3a8640db7da..3c19aada4b30 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -286,10 +286,8 @@ int i2c_dw_acpi_configure(struct device *device) } EXPORT_SYMBOL_GPL(i2c_dw_acpi_configure); -void i2c_dw_acpi_adjust_bus_speed(struct device *device) +static u32 i2c_dw_acpi_round_bus_speed(struct device *device) { - struct dw_i2c_dev *dev = dev_get_drvdata(device); - struct i2c_timings *t = &dev->timings; u32 acpi_speed; int i; @@ -300,9 +298,22 @@ void i2c_dw_acpi_adjust_bus_speed(struct device *device) */ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { if (acpi_speed >= supported_speeds[i]) - break; + return supported_speeds[i]; } - acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0; + + return 0; +} + +#else /* CONFIG_ACPI */ + +static inline u32 i2c_dw_acpi_round_bus_speed(struct device *device) { return 0; } + +#endif /* CONFIG_ACPI */ + +void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev) +{ + u32 acpi_speed = i2c_dw_acpi_round_bus_speed(dev->dev); + struct i2c_timings *t = &dev->timings; /* * Find bus speed from the "clock-frequency" device property, ACPI @@ -315,9 +326,7 @@ void i2c_dw_acpi_adjust_bus_speed(struct device *device) else t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; } -EXPORT_SYMBOL_GPL(i2c_dw_acpi_adjust_bus_speed); - -#endif /* CONFIG_ACPI */ +EXPORT_SYMBOL_GPL(i2c_dw_adjust_bus_speed); u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) { diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 556673a1f61b..eb5ef4d0f463 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -361,11 +361,10 @@ static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0; #endif int i2c_dw_validate_speed(struct dw_i2c_dev *dev); +void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev); #if IS_ENABLED(CONFIG_ACPI) int i2c_dw_acpi_configure(struct device *device); -void i2c_dw_acpi_adjust_bus_speed(struct device *device); #else static inline int i2c_dw_acpi_configure(struct device *device) { return -ENODEV; } -static inline void i2c_dw_acpi_adjust_bus_speed(struct device *device) {} #endif diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 947c096f86e3..8522134f9ea9 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -240,7 +240,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, } } - i2c_dw_acpi_adjust_bus_speed(&pdev->dev); + i2c_dw_adjust_bus_speed(dev); if (has_acpi_companion(&pdev->dev)) i2c_dw_acpi_configure(&pdev->dev); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 0de4e302fc6a..a71bc58fc03c 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -12,6 +12,7 @@ #include <linux/clk-provider.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dmi.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/i2c.h> @@ -191,6 +192,17 @@ static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev) return ret; } +static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = { + { + .ident = "Qtechnology QT5222", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Qtechnology"), + DMI_MATCH(DMI_PRODUCT_NAME, "QT5222"), + }, + }, + { } /* terminate list */ +}; + static int dw_i2c_plat_probe(struct platform_device *pdev) { struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev); @@ -228,7 +240,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) else i2c_parse_fw_timings(&pdev->dev, t, false); - i2c_dw_acpi_adjust_bus_speed(&pdev->dev); + i2c_dw_adjust_bus_speed(dev); if (pdev->dev.of_node) dw_i2c_of_configure(pdev); @@ -267,7 +279,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) adap = &dev->adapter; adap->owner = THIS_MODULE; - adap->class = I2C_CLASS_DEPRECATED; + adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ? + I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED; ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); adap->dev.of_node = pdev->dev.of_node; adap->nr = -1; diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index bb810dee8fb5..73f139690e4e 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -180,6 +180,7 @@ static const struct pci_device_id pch_pcidev_id[] = { { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, }, {0,} }; +MODULE_DEVICE_TABLE(pci, pch_pcidev_id); static irqreturn_t pch_i2c_handler(int irq, void *pData); diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c index e0c256922d4f..977d6f524649 100644 --- a/drivers/i2c/busses/i2c-fsi.c +++ b/drivers/i2c/busses/i2c-fsi.c @@ -98,7 +98,7 @@ #define I2C_STAT_DAT_REQ BIT(25) #define I2C_STAT_CMD_COMP BIT(24) #define I2C_STAT_STOP_ERR BIT(23) -#define I2C_STAT_MAX_PORT GENMASK(19, 16) +#define I2C_STAT_MAX_PORT GENMASK(22, 16) #define I2C_STAT_ANY_INT BIT(15) #define I2C_STAT_SCL_IN BIT(11) #define I2C_STAT_SDA_IN BIT(10) diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index 2fd717d8dd30..71d7bae2cbca 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -337,9 +337,9 @@ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv) if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) { mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG, &datalen, 1); - if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) { + if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) { dev_err(priv->dev, "Incorrect smbus block read message len\n"); - return -E2BIG; + return -EPROTO; } } else { datalen = priv->xfer.data_len; diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index 56bb840142e3..f5c9787992e9 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -495,6 +495,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: + if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) { + dev_err(&adapter->dev, + "Invalid block size returned: %d\n", + msg[1].buf[0]); + status = -EPROTO; + goto cleanup; + } for (i = 0; i < msg[1].buf[0] + 1; i++) data->block[i] = msg[1].buf[i]; break; diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 00e100fc845a..813bca7cfc3e 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1685,10 +1685,13 @@ static int mma8452_probe(struct i2c_client *client, ret = mma8452_set_freefall_mode(data, false); if (ret < 0) - goto buffer_cleanup; + goto unregister_device; return 0; +unregister_device: + iio_device_unregister(indio_dev); + buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c index f47606ebbbbe..b33fe6c3907e 100644 --- a/drivers/iio/adc/ad7780.c +++ b/drivers/iio/adc/ad7780.c @@ -329,7 +329,7 @@ static int ad7780_probe(struct spi_device *spi) ret = ad7780_init_gpios(&spi->dev, st); if (ret) - goto error_cleanup_buffer_and_trigger; + return ret; st->reg = devm_regulator_get(&spi->dev, "avdd"); if (IS_ERR(st->reg)) diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index c24c8da99eb4..7af8f0510535 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -332,12 +332,12 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev) if (cl->dev->of_node != cln) continue; - if (!try_module_get(dev->driver->owner)) { + if (!try_module_get(cl->dev->driver->owner)) { mutex_unlock(®istered_clients_lock); return ERR_PTR(-ENODEV); } - get_device(dev); + get_device(cl->dev); cl->info = info; mutex_unlock(®istered_clients_lock); return cl; diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index e9f87e42ff4f..a3507624b30f 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -65,6 +65,7 @@ static const struct reg_field afe4403_reg_fields[] = { * @regulator: Pointer to the regulator for the IC * @trig: IIO trigger for this device * @irq: ADC_RDY line interrupt number + * @buffer: Used to construct data layout to push into IIO buffer. */ struct afe4403_data { struct device *dev; @@ -74,6 +75,8 @@ struct afe4403_data { struct regulator *regulator; struct iio_trigger *trig; int irq; + /* Ensure suitable alignment for timestamp */ + s32 buffer[8] __aligned(8); }; enum afe4403_chan_id { @@ -309,7 +312,6 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) struct iio_dev *indio_dev = pf->indio_dev; struct afe4403_data *afe = iio_priv(indio_dev); int ret, bit, i = 0; - s32 buffer[8]; u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ}; u8 rx[3]; @@ -326,7 +328,7 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) if (ret) goto err; - buffer[i++] = get_unaligned_be24(&rx[0]); + afe->buffer[i++] = get_unaligned_be24(&rx[0]); } /* Disable reading from the device */ @@ -335,7 +337,8 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) if (ret) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index e728bbb21ca8..cebb1fd4d0b1 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -83,6 +83,7 @@ static const struct reg_field afe4404_reg_fields[] = { * @regulator: Pointer to the regulator for the IC * @trig: IIO trigger for this device * @irq: ADC_RDY line interrupt number + * @buffer: Used to construct a scan to push to the iio buffer. */ struct afe4404_data { struct device *dev; @@ -91,6 +92,7 @@ struct afe4404_data { struct regulator *regulator; struct iio_trigger *trig; int irq; + s32 buffer[10] __aligned(8); }; enum afe4404_chan_id { @@ -328,17 +330,17 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private) struct iio_dev *indio_dev = pf->indio_dev; struct afe4404_data *afe = iio_priv(indio_dev); int ret, bit, i = 0; - s32 buffer[10]; for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { ret = regmap_read(afe->regmap, afe4404_channel_values[bit], - &buffer[i++]); + &afe->buffer[i++]); if (ret) goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index 7ecd2ffa3132..665eb7e38293 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -38,6 +38,11 @@ struct hdc100x_data { /* integration time of the sensor */ int adc_int_us[2]; + /* Ensure natural alignment of timestamp */ + struct { + __be16 channels[2]; + s64 ts __aligned(8); + } scan; }; /* integration time in us */ @@ -322,7 +327,6 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) struct i2c_client *client = data->client; int delay = data->adc_int_us[0] + data->adc_int_us[1]; int ret; - s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */ /* dual read starts at temp register */ mutex_lock(&data->lock); @@ -333,13 +337,13 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) } usleep_range(delay, delay + 1000); - ret = i2c_master_recv(client, (u8 *)buf, 4); + ret = i2c_master_recv(client, (u8 *)data->scan.channels, 4); if (ret < 0) { dev_err(&client->dev, "cannot read sensor data\n"); goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); err: mutex_unlock(&data->lock); diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h index 7d6771f7cf47..b2eb5abeaccd 100644 --- a/drivers/iio/humidity/hts221.h +++ b/drivers/iio/humidity/hts221.h @@ -14,8 +14,6 @@ #include <linux/iio/iio.h> -#define HTS221_DATA_SIZE 2 - enum hts221_sensor_type { HTS221_SENSOR_H, HTS221_SENSOR_T, @@ -39,6 +37,11 @@ struct hts221_hw { bool enabled; u8 odr; + /* Ensure natural alignment of timestamp */ + struct { + __le16 channels[2]; + s64 ts __aligned(8); + } scan; }; extern const struct dev_pm_ops hts221_pm_ops; diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c index 9fb3f33614d4..ba7d413d75ba 100644 --- a/drivers/iio/humidity/hts221_buffer.c +++ b/drivers/iio/humidity/hts221_buffer.c @@ -160,7 +160,6 @@ static const struct iio_buffer_setup_ops hts221_buffer_ops = { static irqreturn_t hts221_buffer_handler_thread(int irq, void *p) { - u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)]; struct iio_poll_func *pf = p; struct iio_dev *iio_dev = pf->indio_dev; struct hts221_hw *hw = iio_priv(iio_dev); @@ -170,18 +169,20 @@ static irqreturn_t hts221_buffer_handler_thread(int irq, void *p) /* humidity data */ ch = &iio_dev->channels[HTS221_SENSOR_H]; err = regmap_bulk_read(hw->regmap, ch->address, - buffer, HTS221_DATA_SIZE); + &hw->scan.channels[0], + sizeof(hw->scan.channels[0])); if (err < 0) goto out; /* temperature data */ ch = &iio_dev->channels[HTS221_SENSOR_T]; err = regmap_bulk_read(hw->regmap, ch->address, - buffer + HTS221_DATA_SIZE, HTS221_DATA_SIZE); + &hw->scan.channels[1], + sizeof(hw->scan.channels[1])); if (err < 0) goto out; - iio_push_to_buffers_with_timestamp(iio_dev, buffer, + iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan, iio_get_time_ns(iio_dev)); out: diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 1527f01a44f1..352533342702 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -130,6 +130,8 @@ static const char * const iio_modifier_names[] = { [IIO_MOD_PM2P5] = "pm2p5", [IIO_MOD_PM4] = "pm4", [IIO_MOD_PM10] = "pm10", + [IIO_MOD_ETHANOL] = "ethanol", + [IIO_MOD_H2] = "h2", }; /* relies on pairs of these shared then separate */ diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index 810fdfd37c88..91c39352fba2 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -192,6 +192,11 @@ struct ak8974 { bool drdy_irq; struct completion drdy_complete; bool drdy_active_low; + /* Ensure timestamp is naturally aligned */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; }; static const char ak8974_reg_avdd[] = "avdd"; @@ -657,7 +662,6 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) { struct ak8974 *ak8974 = iio_priv(indio_dev); int ret; - __le16 hw_values[8]; /* Three axes + 64bit padding */ pm_runtime_get_sync(&ak8974->i2c->dev); mutex_lock(&ak8974->lock); @@ -667,13 +671,13 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) dev_err(&ak8974->i2c->dev, "error triggering measure\n"); goto out_unlock; } - ret = ak8974_getresult(ak8974, hw_values); + ret = ak8974_getresult(ak8974, ak8974->scan.channels); if (ret) { dev_err(&ak8974->i2c->dev, "error getting measures\n"); goto out_unlock; } - iio_push_to_buffers_with_timestamp(indio_dev, hw_values, + iio_push_to_buffers_with_timestamp(indio_dev, &ak8974->scan, iio_get_time_ns(indio_dev)); out_unlock: @@ -862,19 +866,21 @@ static int ak8974_probe(struct i2c_client *i2c, ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config); if (IS_ERR(ak8974->map)) { dev_err(&i2c->dev, "failed to allocate register map\n"); + pm_runtime_put_noidle(&i2c->dev); + pm_runtime_disable(&i2c->dev); return PTR_ERR(ak8974->map); } ret = ak8974_set_power(ak8974, AK8974_PWR_ON); if (ret) { dev_err(&i2c->dev, "could not power on\n"); - goto power_off; + goto disable_pm; } ret = ak8974_detect(ak8974); if (ret) { dev_err(&i2c->dev, "neither AK8974 nor AMI30x found\n"); - goto power_off; + goto disable_pm; } ret = ak8974_selftest(ak8974); @@ -884,14 +890,9 @@ static int ak8974_probe(struct i2c_client *i2c, ret = ak8974_reset(ak8974); if (ret) { dev_err(&i2c->dev, "AK8974 reset failed\n"); - goto power_off; + goto disable_pm; } - pm_runtime_set_autosuspend_delay(&i2c->dev, - AK8974_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(&i2c->dev); - pm_runtime_put(&i2c->dev); - indio_dev->dev.parent = &i2c->dev; switch (ak8974->variant) { case AK8974_WHOAMI_VALUE_AMI306: @@ -957,6 +958,11 @@ no_irq: goto cleanup_buffer; } + pm_runtime_set_autosuspend_delay(&i2c->dev, + AK8974_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&i2c->dev); + pm_runtime_put(&i2c->dev); + return 0; cleanup_buffer: @@ -965,7 +971,6 @@ disable_pm: pm_runtime_put_noidle(&i2c->dev); pm_runtime_disable(&i2c->dev); ak8974_set_power(ak8974, AK8974_PWR_OFF); -power_off: regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs); return ret; diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index 2f598ad91621..f5db9fa086f3 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -212,16 +212,21 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ms5611_state *st = iio_priv(indio_dev); - s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */ + /* Ensure buffer elements are naturally aligned */ + struct { + s32 channels[2]; + s64 ts __aligned(8); + } scan; int ret; mutex_lock(&st->lock); - ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]); + ret = ms5611_read_temp_and_pressure(indio_dev, &scan.channels[1], + &scan.channels[0]); mutex_unlock(&st->lock); if (ret < 0) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); err: diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index 37fe851f89af..799a8dc3e248 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c @@ -665,8 +665,10 @@ static int zpa2326_resume(const struct iio_dev *indio_dev) int err; err = pm_runtime_get_sync(indio_dev->dev.parent); - if (err < 0) + if (err < 0) { + pm_runtime_put(indio_dev->dev.parent); return err; + } if (err > 0) { /* diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 9ce787e37e22..0d1377232933 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -918,6 +918,7 @@ static void cm_free_work(struct cm_work *work) static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv, struct cm_work *work) + __releases(&cm_id_priv->lock) { bool immediate; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 3d7cc9f0f3d4..c30cf5307ce3 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1624,6 +1624,8 @@ static struct rdma_id_private *cma_find_listener( { struct rdma_id_private *id_priv, *id_priv_dev; + lockdep_assert_held(&lock); + if (!bind_list) return ERR_PTR(-EINVAL); @@ -1670,6 +1672,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, } } + mutex_lock(&lock); /* * Net namespace might be getting deleted while route lookup, * cm_id lookup is in progress. Therefore, perform netdevice @@ -1711,6 +1714,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); err: rcu_read_unlock(); + mutex_unlock(&lock); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; @@ -2492,6 +2496,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct net *net = id_priv->id.route.addr.dev_addr.net; int ret; + lockdep_assert_held(&lock); + if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) return; @@ -3342,6 +3348,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list, u64 sid, mask; __be16 port; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); port = htons(bind_list->port); @@ -3370,6 +3378,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps, struct rdma_bind_list *bind_list; int ret; + lockdep_assert_held(&lock); + bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); if (!bind_list) return -ENOMEM; @@ -3396,6 +3406,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, struct sockaddr *saddr = cma_src_addr(id_priv); __be16 dport = cma_port(daddr); + lockdep_assert_held(&lock); + hlist_for_each_entry(cur_id, &bind_list->owners, node) { struct sockaddr *cur_daddr = cma_dst_addr(cur_id); struct sockaddr *cur_saddr = cma_src_addr(cur_id); @@ -3435,6 +3447,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps, unsigned int rover; struct net *net = id_priv->id.route.addr.dev_addr.net; + lockdep_assert_held(&lock); + inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; @@ -3482,6 +3496,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list, struct rdma_id_private *cur_id; struct sockaddr *addr, *cur_addr; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); hlist_for_each_entry(cur_id, &bind_list->owners, node) { if (id_priv == cur_id) @@ -3512,6 +3528,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps, unsigned short snum; int ret; + lockdep_assert_held(&lock); + snum = ntohs(cma_port(cma_src_addr(id_priv))); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 2257d7f7810f..738d1faf4bba 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -202,7 +202,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp) return ret; } -static void counter_history_stat_update(const struct rdma_counter *counter) +static void counter_history_stat_update(struct rdma_counter *counter) { struct ib_device *dev = counter->device; struct rdma_port_counter *port_counter; @@ -212,6 +212,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter) if (!port_counter->hstats) return; + rdma_counter_query_stats(counter); + for (i = 0; i < counter->stats->num_counters; i++) port_counter->hstats->value[i] += counter->stats->value[i]; } diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 186e0d652e8b..a09f8e3c7f3f 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -509,10 +509,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); flush_workqueue(port_priv->wq); - ib_cancel_rmpp_recvs(mad_agent_priv); deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); + ib_cancel_rmpp_recvs(mad_agent_priv); ib_mad_agent_security_cleanup(&mad_agent_priv->agent); @@ -2718,6 +2718,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { + kfree(mad_priv); ret = -ENOMEM; break; } diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 38de4942c682..3027cd2fb247 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -470,40 +470,46 @@ static struct ib_uobject * alloc_begin_fd_uobject(const struct uverbs_api_object *obj, struct uverbs_attr_bundle *attrs) { - const struct uverbs_obj_fd_type *fd_type = - container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); + const struct uverbs_obj_fd_type *fd_type; int new_fd; - struct ib_uobject *uobj; + struct ib_uobject *uobj, *ret; struct file *filp; + uobj = alloc_uobj(attrs, obj); + if (IS_ERR(uobj)) + return uobj; + + fd_type = + container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && - fd_type->fops->release != &uverbs_async_event_release)) - return ERR_PTR(-EINVAL); + fd_type->fops->release != &uverbs_async_event_release)) { + ret = ERR_PTR(-EINVAL); + goto err_fd; + } new_fd = get_unused_fd_flags(O_CLOEXEC); - if (new_fd < 0) - return ERR_PTR(new_fd); - - uobj = alloc_uobj(attrs, obj); - if (IS_ERR(uobj)) + if (new_fd < 0) { + ret = ERR_PTR(new_fd); goto err_fd; + } /* Note that uverbs_uobject_fd_release() is called during abort */ filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL, fd_type->flags); if (IS_ERR(filp)) { - uverbs_uobject_put(uobj); - uobj = ERR_CAST(filp); - goto err_fd; + ret = ERR_CAST(filp); + goto err_getfile; } uobj->object = filp; uobj->id = new_fd; return uobj; -err_fd: +err_getfile: put_unused_fd(new_fd); - return uobj; +err_fd: + uverbs_uobject_put(uobj); + return ret; } struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index a2ed09a3c714..8c930bf1df89 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) return len; } -static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) +static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; void *data; struct ib_sa_mad *mad; int len; + unsigned long flags; + unsigned long delay; + gfp_t gfp_flag; + int ret; + + INIT_LIST_HEAD(&query->list); + query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); mad = query->mad_buf->mad; len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); @@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) /* Repair the nlmsg header length */ nlmsg_end(skb, nlh); - return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask); -} + gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC : + GFP_NOWAIT; -static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) -{ - unsigned long flags; - unsigned long delay; - int ret; + spin_lock_irqsave(&ib_nl_request_lock, flags); + ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag); - INIT_LIST_HEAD(&query->list); - query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); + if (ret) + goto out; - /* Put the request on the list first.*/ - spin_lock_irqsave(&ib_nl_request_lock, flags); + /* Put the request on the list.*/ delay = msecs_to_jiffies(sa_local_svc_timeout_ms); query->timeout = delay + jiffies; list_add_tail(&query->list, &ib_nl_request_list); /* Start the timeout if this is the only request */ if (ib_nl_request_list.next == &query->list) queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); - spin_unlock_irqrestore(&ib_nl_request_lock, flags); - ret = ib_nl_send_msg(query, gfp_mask); - if (ret) { - ret = -EIO; - /* Remove the request */ - spin_lock_irqsave(&ib_nl_request_lock, flags); - list_del(&query->list); - spin_unlock_irqrestore(&ib_nl_request_lock, flags); - } +out: + spin_unlock_irqrestore(&ib_nl_request_lock, flags); return ret; } diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 08313f7c73bc..7dd082441333 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -212,6 +212,7 @@ int efa_query_device(struct ib_device *ibdev, props->max_send_sge = dev_attr->max_sq_sge; props->max_recv_sge = dev_attr->max_rq_sge; props->max_sge_rd = dev_attr->max_wr_rdma_sge; + props->max_pkeys = 1; if (udata && udata->outlen) { resp.max_sq_sge = dev_attr->max_sq_sge; diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index 4633a0ce1a8c..2ced236e1553 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c @@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf, static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; - int ret; ppd = private2ppd(fp); - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); - if (ret) /* failed - release the module */ - module_put(THIS_MODULE); - - return ret; + return acquire_chip_resource(ppd->dd, i2c_target(target), 0); } static int i2c1_debugfs_open(struct inode *in, struct file *fp) @@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target) ppd = private2ppd(fp); release_chip_resource(ppd->dd, i2c_target(target)); - module_put(THIS_MODULE); return 0; } @@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp) static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; - int ret; - - if (!try_module_get(THIS_MODULE)) - return -ENODEV; ppd = private2ppd(fp); - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); - if (ret) /* failed - release the module */ - module_put(THIS_MODULE); - - return ret; + return acquire_chip_resource(ppd->dd, i2c_target(target), 0); } static int qsfp1_debugfs_open(struct inode *in, struct file *fp) @@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target) ppd = private2ppd(fp); release_chip_resource(ppd->dd, i2c_target(target)); - module_put(THIS_MODULE); return 0; } diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 5eed4360695f..cb7ad1288821 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -831,6 +831,29 @@ wq_error: } /** + * destroy_workqueues - destroy per port workqueues + * @dd: the hfi1_ib device + */ +static void destroy_workqueues(struct hfi1_devdata *dd) +{ + int pidx; + struct hfi1_pportdata *ppd; + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + + if (ppd->hfi1_wq) { + destroy_workqueue(ppd->hfi1_wq); + ppd->hfi1_wq = NULL; + } + if (ppd->link_wq) { + destroy_workqueue(ppd->link_wq); + ppd->link_wq = NULL; + } + } +} + +/** * enable_general_intr() - Enable the IRQs that will be handled by the * general interrupt handler. * @dd: valid devdata @@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd) * We can't count on interrupts since we are stopping. */ hfi1_quiet_serdes(ppd); - - if (ppd->hfi1_wq) { - destroy_workqueue(ppd->hfi1_wq); - ppd->hfi1_wq = NULL; - } - if (ppd->link_wq) { - destroy_workqueue(ppd->link_wq); - ppd->link_wq = NULL; - } + if (ppd->hfi1_wq) + flush_workqueue(ppd->hfi1_wq); + if (ppd->link_wq) + flush_workqueue(ppd->link_wq); } sdma_exit(dd); } @@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev) * clear dma engines, etc. */ shutdown_device(dd); + destroy_workqueues(dd); stop_timers(dd); diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h index 07847cb72169..d580aa17ae37 100644 --- a/drivers/infiniband/hw/hfi1/iowait.h +++ b/drivers/infiniband/hw/hfi1/iowait.h @@ -399,7 +399,7 @@ static inline void iowait_get_priority(struct iowait *w) * @wait_head: the wait queue * * This function is called to insert an iowait struct into a - * wait queue after a resource (eg, sdma decriptor or pio + * wait queue after a resource (eg, sdma descriptor or pio * buffer) is run out. */ static inline void iowait_queue(bool pkts_sent, struct iowait *w, diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h index 185c9b02c974..b8c9d0a003fb 100644 --- a/drivers/infiniband/hw/hfi1/ipoib.h +++ b/drivers/infiniband/hw/hfi1/ipoib.h @@ -67,6 +67,9 @@ struct hfi1_ipoib_circ_buf { * @sde: sdma engine * @tx_list: tx request list * @sent_txreqs: count of txreqs posted to sdma + * @stops: count of stops of queue + * @ring_full: ring has been filled + * @no_desc: descriptor shortage seen * @flow: tracks when list needs to be flushed for a flow change * @q_idx: ipoib Tx queue index * @pkts_sent: indicator packets have been sent from this queue @@ -80,6 +83,9 @@ struct hfi1_ipoib_txq { struct sdma_engine *sde; struct list_head tx_list; u64 sent_txreqs; + atomic_t stops; + atomic_t ring_full; + atomic_t no_desc; union hfi1_ipoib_flow flow; u8 q_idx; bool pkts_sent; diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c index 883cb9d48022..9df292b51a05 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c @@ -55,23 +55,48 @@ static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed) return sent - completed; } -static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq) +static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) { - if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs, - atomic64_read(&txq->complete_txreqs)) >= - min_t(unsigned int, txq->priv->netdev->tx_queue_len, - txq->tx_ring.max_items - 1))) + return hfi1_ipoib_txreqs(txq->sent_txreqs, + atomic64_read(&txq->complete_txreqs)); +} + +static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) +{ + if (atomic_inc_return(&txq->stops) == 1) netif_stop_subqueue(txq->priv->netdev, txq->q_idx); } +static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) +{ + if (atomic_dec_and_test(&txq->stops)) + netif_wake_subqueue(txq->priv->netdev, txq->q_idx); +} + +static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq) +{ + return min_t(uint, txq->priv->netdev->tx_queue_len, + txq->tx_ring.max_items - 1); +} + +static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq) +{ + return min_t(uint, txq->priv->netdev->tx_queue_len, + txq->tx_ring.max_items) >> 1; +} + +static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq) +{ + ++txq->sent_txreqs; + if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) && + !atomic_xchg(&txq->ring_full, 1)) + hfi1_ipoib_stop_txq(txq); +} + static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) { struct net_device *dev = txq->priv->netdev; - /* If the queue is already running just return */ - if (likely(!__netif_subqueue_stopped(dev, txq->q_idx))) - return; - /* If shutting down just return as queue state is irrelevant */ if (unlikely(dev->reg_state != NETREG_REGISTERED)) return; @@ -86,11 +111,9 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) * Use the minimum of the current tx_queue_len or the rings max txreqs * to protect against ring overflow. */ - if (hfi1_ipoib_txreqs(txq->sent_txreqs, - atomic64_read(&txq->complete_txreqs)) - < min_t(unsigned int, dev->tx_queue_len, - txq->tx_ring.max_items) >> 1) - netif_wake_subqueue(dev, txq->q_idx); + if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) && + atomic_xchg(&txq->ring_full, 0)) + hfi1_ipoib_wake_txq(txq); } static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) @@ -364,11 +387,12 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev, if (unlikely(!tx)) return ERR_PTR(-ENOMEM); - /* so that we can test if the sdma decriptors are there */ + /* so that we can test if the sdma descriptors are there */ tx->txreq.num_desc = 0; tx->priv = priv; tx->txq = txp->txq; tx->skb = skb; + INIT_LIST_HEAD(&tx->txreq.list); hfi1_ipoib_build_ib_tx_headers(tx, txp); @@ -469,6 +493,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev, ret = hfi1_ipoib_submit_tx(txq, tx); if (likely(!ret)) { +tx_ok: trace_sdma_output_ibhdr(tx->priv->dd, &tx->sdma_hdr.hdr, ib_is_sc5(txp->flow.sc5)); @@ -478,20 +503,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev, txq->pkts_sent = false; - if (ret == -EBUSY) { - list_add_tail(&tx->txreq.list, &txq->tx_list); - - trace_sdma_output_ibhdr(tx->priv->dd, - &tx->sdma_hdr.hdr, - ib_is_sc5(txp->flow.sc5)); - hfi1_ipoib_check_queue_depth(txq); - return NETDEV_TX_OK; - } - - if (ret == -ECOMM) { - hfi1_ipoib_check_queue_depth(txq); - return NETDEV_TX_OK; - } + if (ret == -EBUSY || ret == -ECOMM) + goto tx_ok; sdma_txclean(priv->dd, &tx->txreq); dev_kfree_skb_any(skb); @@ -509,9 +522,17 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev, struct ipoib_txreq *tx; /* Has the flow change ? */ - if (txq->flow.as_int != txp->flow.as_int) - (void)hfi1_ipoib_flush_tx_list(dev, txq); - + if (txq->flow.as_int != txp->flow.as_int) { + int ret; + + ret = hfi1_ipoib_flush_tx_list(dev, txq); + if (unlikely(ret)) { + if (ret == -EBUSY) + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } tx = hfi1_ipoib_send_dma_common(dev, skb, txp); if (IS_ERR(tx)) { int ret = PTR_ERR(tx); @@ -610,10 +631,14 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde, return -EAGAIN; } - netif_stop_subqueue(txq->priv->netdev, txq->q_idx); - - if (list_empty(&txq->wait.list)) + if (list_empty(&txreq->list)) + /* came from non-list submit */ + list_add_tail(&txreq->list, &txq->tx_list); + if (list_empty(&txq->wait.list)) { + if (!atomic_xchg(&txq->no_desc, 1)) + hfi1_ipoib_stop_txq(txq); iowait_queue(pkts_sent, wait->iow, &sde->dmawait); + } write_sequnlock(&sde->waitlock); return -EBUSY; @@ -648,9 +673,9 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work) struct net_device *dev = txq->priv->netdev; if (likely(dev->reg_state == NETREG_REGISTERED) && - likely(__netif_subqueue_stopped(dev, txq->q_idx)) && likely(!hfi1_ipoib_flush_tx_list(dev, txq))) - netif_wake_subqueue(dev, txq->q_idx); + if (atomic_xchg(&txq->no_desc, 0)) + hfi1_ipoib_wake_txq(txq); } int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) @@ -704,6 +729,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) txq->sde = NULL; INIT_LIST_HEAD(&txq->tx_list); atomic64_set(&txq->complete_txreqs, 0); + atomic_set(&txq->stops, 0); + atomic_set(&txq->ring_full, 0); + atomic_set(&txq->no_desc, 0); txq->q_idx = i; txq->flow.tx_queue = 0xff; txq->flow.sc5 = 0xff; @@ -769,7 +797,7 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq) atomic64_inc(complete_txreqs); } - if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs))) + if (hfi1_ipoib_used(txq)) dd_dev_warn(txq->priv->dd, "txq %d not empty found %llu requests\n", txq->q_idx, diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c index 63688e85e8da..6d263c9749b3 100644 --- a/drivers/infiniband/hw/hfi1/netdev_rx.c +++ b/drivers/infiniband/hw/hfi1/netdev_rx.c @@ -373,7 +373,7 @@ void hfi1_netdev_free(struct hfi1_devdata *dd) { if (dd->dummy_netdev) { dd_dev_info(dd, "hfi1 netdev freed\n"); - free_netdev(dd->dummy_netdev); + kfree(dd->dummy_netdev); dd->dummy_netdev = NULL; } } diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 0c2ae9f7b3e8..be62284e42d9 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) { /* Constraining 10KB packets to 8KB packets */ if (mtu == (enum ib_mtu)OPA_MTU_10240) - mtu = OPA_MTU_8192; + mtu = (enum ib_mtu)OPA_MTU_8192; return opa_mtu_enum_to_int((enum opa_mtu)mtu); } @@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp) struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct hfi1_devdata *dd = ppd->dd; + + if (dd->flags & HFI1_SHUTDOWN) + return true; return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 243b4ba0b6f6..facff133139a 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp) struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct hfi1_devdata *dd = ppd->dd; + + if ((dd->flags & HFI1_SHUTDOWN)) + return true; return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index bfa6e081cb56..d2d526c5a756 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h @@ -91,7 +91,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, tx->mr = NULL; tx->sde = priv->s_sde; tx->psc = priv->s_sendcontext; - /* so that we can test if the sdma decriptors are there */ + /* so that we can test if the sdma descriptors are there */ tx->txreq.num_desc = 0; /* Set the header type */ tx->phdr.hdr.hdr_type = priv->hdr_type; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index a77fa6730b2d..479fa557993e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -898,13 +898,14 @@ struct hns_roce_hw { int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, enum ib_mtu mtu); - int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr, - unsigned long mtpt_idx); + int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, + struct hns_roce_mr *mr, unsigned long mtpt_idx); int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, int flags, u32 pdn, int mr_access_flags, u64 iova, u64 size, void *mb_buf); - int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr); + int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, + struct hns_roce_mr *mr); int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw); void (*write_cqc)(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index d02207cd30df..cf39f560b800 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1756,10 +1756,10 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port, val); } -static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, +static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, + struct hns_roce_mr *mr, unsigned long mtpt_idx) { - struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device); u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 }; struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_v1_mpt_entry *mpt_entry; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index c597d7281629..dd01a51816cc 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -910,7 +910,7 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev) instance_stage = handle->rinfo.instance_state; reset_stage = handle->rinfo.reset_state; reset_cnt = ops->ae_dev_reset_cnt(handle); - hw_resetting = ops->get_hw_reset_stat(handle); + hw_resetting = ops->get_cmdq_stat(handle); sw_resetting = ops->ae_dev_resetting(handle); if (reset_cnt != hr_dev->reset_cnt) @@ -2529,10 +2529,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, return hns_roce_cmq_send(hr_dev, &desc, 1); } -static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, +static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, + struct hns_roce_v2_mpt_entry *mpt_entry, struct hns_roce_mr *mr) { - struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device); u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 }; struct ib_device *ibdev = &hr_dev->ib_dev; dma_addr_t pbl_ba; @@ -2571,7 +2571,8 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, return 0; } -static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, +static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, + void *mb_buf, struct hns_roce_mr *mr, unsigned long mtpt_idx) { struct hns_roce_v2_mpt_entry *mpt_entry; @@ -2620,7 +2621,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, if (mr->type == MR_TYPE_DMA) return 0; - ret = set_mtpt_pbl(mpt_entry, mr); + ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); return ret; } @@ -2666,15 +2667,15 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, mr->iova = iova; mr->size = size; - ret = set_mtpt_pbl(mpt_entry, mr); + ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); } return ret; } -static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) +static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, + void *mb_buf, struct hns_roce_mr *mr) { - struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device); struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_v2_mpt_entry *mpt_entry; dma_addr_t pbl_ba = 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 4c0bbb12770d..0e71ebee9e52 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -180,9 +180,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, } if (mr->type != MR_TYPE_FRMR) - ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx); + ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr, + mtpt_idx); else - ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr); + ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr); if (ret) { dev_err(dev, "Write mtpt fail!\n"); goto err_page; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 343a8b8361e7..6f99ed03d88e 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, mdev_port_num); if (err) goto out; - ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); props->active_width = IB_WIDTH_4X; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 81bf6b975e0e..e050eade97a1 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1862,7 +1862,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (!in) return -ENOMEM; - if (MLX5_CAP_GEN(mdev, ece_support)) + if (MLX5_CAP_GEN(mdev, ece_support) && ucmd) MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); @@ -2341,18 +2341,18 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, unsigned long flags; int err; - if (qp->ibqp.rwq_ind_tbl) { + if (qp->is_rss) { destroy_rss_raw_qp_tir(dev, qp); return; } - base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || + base = (qp->type == IB_QPT_RAW_PACKET || qp->flags & IB_QP_CREATE_SOURCE_QPN) ? - &qp->raw_packet_qp.rq.base : - &qp->trans_qp.base; + &qp->raw_packet_qp.rq.base : + &qp->trans_qp.base; if (qp->state != IB_QPS_RESET) { - if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET && + if (qp->type != IB_QPT_RAW_PACKET && !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) { err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0, NULL, &base->mqp, NULL); @@ -2368,8 +2368,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, base->mqp.qpn); } - get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, - &send_cq, &recv_cq); + get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq, + &recv_cq); spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); mlx5_ib_lock_cqs(send_cq, recv_cq); @@ -2391,7 +2391,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, mlx5_ib_unlock_cqs(send_cq, recv_cq); spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); - if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || + if (qp->type == IB_QPT_RAW_PACKET || qp->flags & IB_QP_CREATE_SOURCE_QPN) { destroy_raw_packet_qp(dev, qp); } else { @@ -2668,6 +2668,13 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) return (create_flags) ? -EINVAL : 0; + process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP, + mlx5_get_flow_namespace(dev->mdev, + MLX5_FLOW_NAMESPACE_BYPASS), + qp); + process_create_flag(dev, &create_flags, + IB_QP_CREATE_INTEGRITY_EN, + MLX5_CAP_GEN(mdev, sho), qp); process_create_flag(dev, &create_flags, IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, MLX5_CAP_GEN(mdev, block_lb_mc), qp); @@ -2873,7 +2880,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) static int check_ucmd_data(struct mlx5_ib_dev *dev, struct mlx5_create_qp_params *params) { - struct ib_qp_init_attr *attr = params->attr; struct ib_udata *udata = params->udata; size_t size, last; int ret; @@ -2885,14 +2891,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev, */ last = sizeof(struct mlx5_ib_create_qp_rss); else - /* IB_QPT_RAW_PACKET doesn't have ECE data */ - switch (attr->qp_type) { - case IB_QPT_RAW_PACKET: - last = offsetof(struct mlx5_ib_create_qp, ece_options); - break; - default: - last = offsetof(struct mlx5_ib_create_qp, reserved); - } + last = offsetof(struct mlx5_ib_create_qp, reserved); if (udata->inlen <= last) return 0; @@ -2907,7 +2906,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev, if (!ret) mlx5_ib_dbg( dev, - "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n", + "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n", udata->inlen, params->ucmd_size, last, size); return ret ? 0 : -EINVAL; } @@ -3002,10 +3001,19 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, return &qp->ibqp; destroy_qp: - if (qp->type == MLX5_IB_QPT_DCT) + if (qp->type == MLX5_IB_QPT_DCT) { mlx5_ib_destroy_dct(qp); - else + } else { + /* + * These lines below are temp solution till QP allocation + * will be moved to be under IB/core responsiblity. + */ + qp->ibqp.send_cq = attr->send_cq; + qp->ibqp.recv_cq = attr->recv_cq; + qp->ibqp.pd = pd; destroy_qp_common(dev, qp, udata); + } + qp = NULL; free_qp: kfree(qp); @@ -4162,8 +4170,6 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (udata->outlen < min_resp_len) return -EINVAL; - resp.response_length = min_resp_len; - /* * If we don't have enough space for the ECE options, * simply indicate it with resp.response_length. @@ -4384,8 +4390,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, MLX5_GET(ads, path, src_addr_index), MLX5_GET(ads, path, hop_limit), MLX5_GET(ads, path, tclass)); - memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip), - MLX5_FLD_SZ_BYTES(ads, rgid_rip)); + rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip)); } } diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c index c19d91d6dce8..7c3968ef9cd1 100644 --- a/drivers/infiniband/hw/mlx5/qpc.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -346,6 +346,9 @@ static int get_ece_from_mbox(void *out, u16 opcode) int ece = 0; switch (opcode) { + case MLX5_CMD_OP_INIT2INIT_QP: + ece = MLX5_GET(init2init_qp_out, out, ece); + break; case MLX5_CMD_OP_INIT2RTR_QP: ece = MLX5_GET(init2rtr_qp_out, out, ece); break; @@ -355,6 +358,9 @@ static int get_ece_from_mbox(void *out, u16 opcode) case MLX5_CMD_OP_RTS2RTS_QP: ece = MLX5_GET(rts2rts_qp_out, out, ece); break; + case MLX5_CMD_OP_RST2INIT_QP: + ece = MLX5_GET(rst2init_qp_out, out, ece); + break; default: break; } @@ -406,6 +412,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, return -ENOMEM; MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, opt_param_mask, qpc, uid); + MLX5_SET(rst2init_qp_in, mbox->in, ece, ece); break; case MLX5_CMD_OP_INIT2RTR_QP: if (MBOX_ALLOC(mbox, init2rtr_qp)) @@ -439,6 +446,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, return -ENOMEM; MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn, opt_param_mask, qpc, uid); + MLX5_SET(init2init_qp_in, mbox->in, ece, ece); break; default: return -EINVAL; diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 792eecd206b6..97fc7dd353b0 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context, if (params->cm_info) { event.ird = params->cm_info->ird; event.ord = params->cm_info->ord; - event.private_data_len = params->cm_info->private_data_len; - event.private_data = (void *)params->cm_info->private_data; + /* Only connect_request and reply have valid private data + * the rest of the events this may be left overs from + * connection establishment. CONNECT_REQUEST is issued via + * qedr_iw_mpa_request + */ + if (event_type == IW_CM_EVENT_CONNECT_REPLY) { + event.private_data_len = + params->cm_info->private_data_len; + event.private_data = + (void *)params->cm_info->private_data; + } } if (ep->cm_id) diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 511b72809e14..7db35dd6ad74 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1204,7 +1204,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, err = alloc_ud_wq_attr(qp, rdi->dparms.node); if (err) { ret = (ERR_PTR(err)); - goto bail_driver_priv; + goto bail_rq_rvt; } if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) @@ -1314,9 +1314,11 @@ bail_qpn: rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); bail_rq_wq: - rvt_free_rq(&qp->r_rq); free_ud_wq_attr(qp); +bail_rq_rvt: + rvt_free_rq(&qp->r_rq); + bail_driver_priv: rdi->driver_f.qp_priv_free(rdi, qp); diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index a0b8cc643c5c..ed60c9e4643e 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -67,12 +67,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name) static int dev_id = 1; int rv; + sdev->vendor_part_id = dev_id++; + rv = ib_register_device(base_dev, name); if (rv) { pr_warn("siw: device registration error %d\n", rv); return rv; } - sdev->vendor_part_id = dev_id++; siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr); diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 650520244ed0..7271d705f4b0 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx, break; bytes = min(bytes, len); - if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) { + if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) == + bytes) { copied += bytes; offset += bytes; len -= bytes; diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 3f9354baac4b..6291fb5fa015 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -951,6 +951,8 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet) u8 hover_info = packet[ETP_HOVER_INFO_OFFSET]; bool contact_valid, hover_event; + pm_wakeup_event(&data->client->dev, 0); + hover_event = hover_info & 0x40; for (i = 0; i < ETP_MAX_FINGERS; i++) { contact_valid = tp_info & (1U << (3 + i)); @@ -974,6 +976,8 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report) u8 *packet = &report[ETP_REPORT_ID_OFFSET + 1]; int x, y; + pm_wakeup_event(&data->client->dev, 0); + if (!data->tp_input) { dev_warn_once(&data->client->dev, "received a trackpoint report while no trackpoint device has been created. Please report upstream.\n"); @@ -998,7 +1002,6 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report) static irqreturn_t elan_isr(int irq, void *dev_id) { struct elan_tp_data *data = dev_id; - struct device *dev = &data->client->dev; int error; u8 report[ETP_MAX_REPORT_LEN]; @@ -1016,8 +1019,6 @@ static irqreturn_t elan_isr(int irq, void *dev_id) if (error) goto out; - pm_wakeup_event(dev, 0); - switch (report[ETP_REPORT_ID_OFFSET]) { case ETP_REPORT_ID: elan_report_absolute(data, report); @@ -1026,7 +1027,7 @@ static irqreturn_t elan_isr(int irq, void *dev_id) elan_report_trackpoint(data, report); break; default: - dev_err(dev, "invalid report id data (%x)\n", + dev_err(&data->client->dev, "invalid report id data (%x)\n", report[ETP_REPORT_ID_OFFSET]); } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 758dae8d6500..4b81b2d0fe06 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -179,6 +179,7 @@ static const char * const smbus_pnp_ids[] = { "LEN0093", /* T480 */ "LEN0096", /* X280 */ "LEN0097", /* X280 -> ALPS trackpoint */ + "LEN0099", /* X1 Extreme 1st */ "LEN009b", /* T580 */ "LEN200f", /* T450s */ "LEN2044", /* L470 */ diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 7b08ff8ddf35..7d7f73702726 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -426,6 +426,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { }, }, { + /* Lenovo XiaoXin Air 12 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80UN"), + }, + }, + { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 233cb1085bbd..5477a5718202 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -1325,7 +1325,6 @@ static int elants_i2c_probe(struct i2c_client *client, 0, MT_TOOL_PALM, 0, 0); input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res); input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res); - input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1); touchscreen_parse_properties(ts->input, true, &ts->prop); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index b510f67dfa49..b0f308cb7f7c 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -211,7 +211,7 @@ config INTEL_IOMMU_DEBUGFS config INTEL_IOMMU_SVM bool "Support for Shared Virtual Memory with Intel IOMMU" - depends on INTEL_IOMMU && X86 + depends on INTEL_IOMMU && X86_64 select PCI_PASID select PCI_PRI select MMU_NOTIFIER @@ -305,6 +305,7 @@ config ROCKCHIP_IOMMU config SUN50I_IOMMU bool "Allwinner H6 IOMMU Support" + depends on HAS_DMA depends on ARCH_SUNXI || COMPILE_TEST select ARM_DMA_USE_IOMMU select IOMMU_API diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index f892992c8744..57309716fd18 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -102,7 +102,7 @@ extern int __init add_special_device(u8 type, u8 id, u16 *devid, #ifdef CONFIG_DMI void amd_iommu_apply_ivrs_quirks(void); #else -static void amd_iommu_apply_ivrs_quirks(void) { } +static inline void amd_iommu_apply_ivrs_quirks(void) { } #endif #endif diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 74cca1757172..2f22326ee4df 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3985,9 +3985,10 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) if (!fn) return -ENOMEM; iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); - irq_domain_free_fwnode(fn); - if (!iommu->ir_domain) + if (!iommu->ir_domain) { + irq_domain_free_fwnode(fn); return -ENOMEM; + } iommu->ir_domain->parent = arch_get_ir_parent_domain(); iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c index cf01d0215a39..be4318044f96 100644 --- a/drivers/iommu/arm-smmu-qcom.c +++ b/drivers/iommu/arm-smmu-qcom.c @@ -12,7 +12,7 @@ struct qcom_smmu { struct arm_smmu_device smmu; }; -static const struct of_device_id qcom_smmu_client_of_match[] = { +static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { .compatible = "qcom,adreno" }, { .compatible = "qcom,mdp4" }, { .compatible = "qcom,mdss" }, diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c index 3c0c67a99c7b..8919c1c70b68 100644 --- a/drivers/iommu/hyperv-iommu.c +++ b/drivers/iommu/hyperv-iommu.c @@ -155,7 +155,10 @@ static int __init hyperv_prepare_irq_remapping(void) 0, IOAPIC_REMAPPING_ENTRY, fn, &hyperv_ir_domain_ops, NULL); - irq_domain_free_fwnode(fn); + if (!ioapic_ir_domain) { + irq_domain_free_fwnode(fn); + return -ENOMEM; + } /* * Hyper-V doesn't provide irq remapping function for diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index cc46dff98fa0..683b812c5c47 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -898,7 +898,8 @@ int __init detect_intel_iommu(void) if (!ret) ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, &validate_drhd_cb); - if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) { + if (!ret && !no_iommu && !iommu_detected && + (!dmar_disabled || dmar_platform_optin())) { iommu_detected = 1; /* Make sure ACS will be enabled */ pci_request_acs(); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 9129663a7406..d759e7234e98 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -612,6 +612,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) return g_iommus[iommu_id]; } +static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) +{ + return sm_supported(iommu) ? + ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); +} + static void domain_update_iommu_coherency(struct dmar_domain *domain) { struct dmar_drhd_unit *drhd; @@ -623,7 +629,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) for_each_domain_iommu(i, domain) { found = true; - if (!ecap_coherent(g_iommus[i]->ecap)) { + if (!iommu_paging_structure_coherency(g_iommus[i])) { domain->iommu_coherency = 0; break; } @@ -634,7 +640,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) /* No hardware attached; use lowest common denominator */ rcu_read_lock(); for_each_active_iommu(iommu, drhd) { - if (!ecap_coherent(iommu->ecap)) { + if (!iommu_paging_structure_coherency(iommu)) { domain->iommu_coherency = 0; break; } @@ -921,7 +927,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; if (domain_use_first_level(domain)) - pteval |= DMA_FL_PTE_XD; + pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US; if (cmpxchg64(&pte->val, 0ULL, pteval)) /* Someone else set it while we were thinking; use theirs. */ free_pgtable_page(tmp_page); @@ -1951,7 +1957,6 @@ static inline void context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid) { context->hi |= pasid & ((1 << 20) - 1); - context->hi |= (1 << 20); } /* @@ -2095,7 +2100,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, context_set_fault_enable(context); context_set_present(context); - domain_flush_cache(domain, context, sizeof(*context)); + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(context, sizeof(*context)); /* * It's a non-present to present mapping. If hardware doesn't cache @@ -2243,7 +2249,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP); if (domain_use_first_level(domain)) - attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD; + attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US; if (!sg) { sg_res = nr_pages; @@ -2695,7 +2701,9 @@ static int __init si_domain_init(int hw) end >> agaw_to_width(si_domain->agaw))) continue; - ret = iommu_domain_identity_map(si_domain, start, end); + ret = iommu_domain_identity_map(si_domain, + mm_to_dma_pfn(start >> PAGE_SHIFT), + mm_to_dma_pfn(end >> PAGE_SHIFT)); if (ret) return ret; } @@ -6021,6 +6029,23 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain, return ret; } +/* + * Check that the device does not live on an external facing PCI port that is + * marked as untrusted. Such devices should not be able to apply quirks and + * thus not be able to bypass the IOMMU restrictions. + */ +static bool risky_device(struct pci_dev *pdev) +{ + if (pdev->untrusted) { + pci_info(pdev, + "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n", + pdev->vendor, pdev->device); + pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n"); + return true; + } + return false; +} + const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, @@ -6060,6 +6085,9 @@ const struct iommu_ops intel_iommu_ops = { static void quirk_iommu_igfx(struct pci_dev *dev) { + if (risky_device(dev)) + return; + pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); dmar_map_gfx = 0; } @@ -6101,6 +6129,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx); static void quirk_iommu_rwbf(struct pci_dev *dev) { + if (risky_device(dev)) + return; + /* * Mobile 4 Series Chipset neglects to set RWBF capability, * but needs it. Same seems to hold for the desktop versions. @@ -6131,6 +6162,9 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) { unsigned short ggc; + if (risky_device(dev)) + return; + if (pci_read_config_word(dev, GGC, &ggc)) return; @@ -6164,6 +6198,12 @@ static void __init check_tylersburg_isoch(void) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); if (!pdev) return; + + if (risky_device(pdev)) { + pci_dev_put(pdev); + return; + } + pci_dev_put(pdev); /* System Management Registers. Might be hidden, in which case @@ -6173,6 +6213,11 @@ static void __init check_tylersburg_isoch(void) if (!pdev) return; + if (risky_device(pdev)) { + pci_dev_put(pdev); + return; + } + if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { pci_dev_put(pdev); return; diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 7f8769800815..9564d23d094f 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -563,8 +563,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) 0, INTR_REMAP_TABLE_ENTRIES, fn, &intel_ir_domain_ops, iommu); - irq_domain_free_fwnode(fn); if (!iommu->ir_domain) { + irq_domain_free_fwnode(fn); pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); goto out_free_bitmap; } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d43120eb1dc5..b6858adc4f17 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -295,10 +295,10 @@ void iommu_release_device(struct device *dev) return; iommu_device_unlink(dev->iommu->iommu_dev, dev); - iommu_group_remove_device(dev); ops->release_device(dev); + iommu_group_remove_device(dev); module_put(ops->owner); dev_iommu_free(dev); } diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index fce605e96aa2..3b1bf2fb94f5 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -313,9 +313,9 @@ static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu) IOMMU_TLB_FLUSH_MICRO_TLB(1) | IOMMU_TLB_FLUSH_MICRO_TLB(0)); - ret = readl_poll_timeout(iommu->base + IOMMU_TLB_FLUSH_REG, - reg, !reg, - 1, 2000); + ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG, + reg, !reg, + 1, 2000); if (ret) dev_warn(iommu->dev, "TLB Flush timed out!\n"); @@ -556,7 +556,6 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova { struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); phys_addr_t pt_phys; - dma_addr_t pte_dma; u32 *pte_addr; u32 dte; @@ -566,7 +565,6 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova pt_phys = sun50i_dte_get_pt_address(dte); pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova); - pte_dma = pt_phys + sun50i_iova_get_pte_index(iova) * PT_ENTRY_SIZE; if (!sun50i_pte_is_page_valid(*pte_addr)) return 0; diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 29fead208cad..216b3b8392b5 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -563,7 +563,7 @@ config LOONGSON_PCH_PIC Support for the Loongson PCH PIC Controller. config LOONGSON_PCH_MSI - bool "Loongson PCH PIC Controller" + bool "Loongson PCH MSI Controller" depends on MACH_LOONGSON64 || COMPILE_TEST depends on PCI default MACH_LOONGSON64 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index cd685f521c77..beac4caefad9 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -3797,10 +3797,10 @@ static void its_wait_vpt_parse_complete(void) if (!gic_rdists->has_vpend_valid_dirty) return; - WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER, - val, - !(val & GICR_VPENDBASER_Dirty), - 10, 500)); + WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER, + val, + !(val & GICR_VPENDBASER_Dirty), + 10, 500)); } static void its_vpe_schedule(struct its_vpe *vpe) @@ -4054,16 +4054,24 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, u64 val; if (info->req_db) { + unsigned long flags; + /* * vPE is going to block: make the vPE non-resident with * PendingLast clear and DB set. The GIC guarantees that if * we read-back PendingLast clear, then a doorbell will be * delivered when an interrupt comes. + * + * Note the locking to deal with the concurrent update of + * pending_last from the doorbell interrupt handler that can + * run concurrently. */ + raw_spin_lock_irqsave(&vpe->vpe_lock, flags); val = its_clear_vpend_valid(vlpi_base, GICR_VPENDBASER_PendingLast, GICR_VPENDBASER_4_1_DB); vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); } else { /* * We're not blocking, so just make the vPE non-resident diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 00de05abd3c3..c17fabd6741e 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -329,10 +329,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); - unsigned int cpu, shift = (gic_irq(d) % 4) * 8; - u32 val, mask, bit; - unsigned long flags; + void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d); + unsigned int cpu; if (!force) cpu = cpumask_any_and(mask_val, cpu_online_mask); @@ -342,13 +340,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) return -EINVAL; - gic_lock_irqsave(flags); - mask = 0xff << shift; - bit = gic_cpu_map[cpu] << shift; - val = readl_relaxed(reg) & ~mask; - writel_relaxed(val | bit, reg); - gic_unlock_irqrestore(flags); - + writeb_relaxed(gic_cpu_map[cpu], reg); irq_data_update_effective_affinity(d, cpumask_of(cpu)); return IRQ_SET_MASK_OK_DONE; diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c index a6f97fa6ff69..8017f6d32d52 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -99,7 +99,7 @@ static int __init riscv_intc_init(struct device_node *node, hartid = riscv_of_parent_hartid(node); if (hartid < 0) { - pr_warn("unable to fine hart id for %pOF\n", node); + pr_warn("unable to find hart id for %pOF\n", node); return 0; } diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index ac83f5002ce5..489935d5f22d 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1471,7 +1471,7 @@ static void retrieve_deps(struct dm_table *table, /* * Check we have enough space. */ - needed = sizeof(*deps) + (sizeof(*deps->dev) * count); + needed = struct_size(deps, dev, count); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; return; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index f60c02512121..85e0daabad49 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -146,10 +146,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) */ static void rq_completed(struct mapped_device *md) { - /* nudge anyone waiting on suspend queue */ - if (unlikely(wq_has_sleeper(&md->wait))) - wake_up(&md->wait); - /* * dm_put() must be at the end of this function. See the comment above */ diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 74f3c506f084..5358894bb9fd 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -282,6 +282,8 @@ static int persistent_memory_claim(struct dm_writecache *wc) while (daa-- && i < p) { pages[i++] = pfn_t_to_page(pfn); pfn.val++; + if (!(i & 15)) + cond_resched(); } } while (i < p); wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL); @@ -849,10 +851,14 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_ if (likely(!e->write_in_progress)) { if (!discarded_something) { - writecache_wait_for_ios(wc, READ); - writecache_wait_for_ios(wc, WRITE); + if (!WC_MODE_PMEM(wc)) { + writecache_wait_for_ios(wc, READ); + writecache_wait_for_ios(wc, WRITE); + } discarded_something = true; } + if (!writecache_entry_is_committed(wc, e)) + wc->uncommitted_blocks--; writecache_free_entry(wc, e); } @@ -2260,6 +2266,12 @@ invalid_optional: } if (WC_MODE_PMEM(wc)) { + if (!dax_synchronous(wc->ssd_dev->dax_dev)) { + r = -EOPNOTSUPP; + ti->error = "Asynchronous persistent memory not supported as pmem cache"; + goto bad; + } + r = persistent_memory_claim(wc); if (r) { ti->error = "Unable to map persistent memory for cache"; diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 130b5a6d9f12..b298fefb022e 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -1078,7 +1078,8 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb, nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1) >> zmd->zone_nr_blocks_shift; if (!nr_meta_zones || - nr_meta_zones >= zmd->nr_rnd_zones) { + (zmd->nr_devs <= 1 && nr_meta_zones >= zmd->nr_rnd_zones) || + (zmd->nr_devs > 1 && nr_meta_zones >= zmd->nr_cache_zones)) { dmz_dev_err(dev, "Invalid number of metadata blocks"); return -ENXIO; } @@ -1949,7 +1950,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd, unsigned int idx, bool idle) { struct dm_zone *dzone = NULL; - struct dm_zone *zone, *last = NULL; + struct dm_zone *zone, *maxw_z = NULL; struct list_head *zone_list; /* If we have cache zones select from the cache zone list */ @@ -1961,18 +1962,37 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd, } else zone_list = &zmd->dev[idx].map_rnd_list; + /* + * Find the buffer zone with the heaviest weight or the first (oldest) + * data zone that can be reclaimed. + */ list_for_each_entry(zone, zone_list, link) { if (dmz_is_buf(zone)) { dzone = zone->bzone; - if (dzone->dev->dev_idx != idx) - continue; - if (!last) { - last = dzone; + if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx) continue; - } - if (last->weight < dzone->weight) + if (!maxw_z || maxw_z->weight < dzone->weight) + maxw_z = dzone; + } else { + dzone = zone; + if (dmz_lock_zone_reclaim(dzone)) + return dzone; + } + } + + if (maxw_z && dmz_lock_zone_reclaim(maxw_z)) + return maxw_z; + + /* + * If we come here, none of the zones inspected could be locked for + * reclaim. Try again, being more aggressive, that is, find the + * first zone that can be reclaimed regardless of its weitght. + */ + list_for_each_entry(zone, zone_list, link) { + if (dmz_is_buf(zone)) { + dzone = zone->bzone; + if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx) continue; - dzone = last; } else dzone = zone; if (dmz_lock_zone_reclaim(dzone)) @@ -2006,7 +2026,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, unsigned int dev_idx, bool idle) { - struct dm_zone *zone; + struct dm_zone *zone = NULL; /* * Search for a zone candidate to reclaim: 2 cases are possible. @@ -2019,7 +2039,7 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, dmz_lock_map(zmd); if (list_empty(&zmd->reserved_seq_zones_list)) zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx); - else + if (!zone) zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle); dmz_unlock_map(zmd); @@ -2197,8 +2217,15 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx, { struct list_head *list; struct dm_zone *zone; - int i = 0; + int i; + + /* Schedule reclaim to ensure free zones are available */ + if (!(flags & DMZ_ALLOC_RECLAIM)) { + for (i = 0; i < zmd->nr_devs; i++) + dmz_schedule_reclaim(zmd->dev[i].reclaim); + } + i = 0; again: if (flags & DMZ_ALLOC_CACHE) list = &zmd->unmap_cache_list; diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index 2261b4dd60b7..9c0ecc9568a4 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -377,6 +377,7 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) dmz_metadata_label(zmd), zrc->dev_idx); return -EBUSY; } + rzone = dzone; start = jiffies; if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) { @@ -391,8 +392,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) */ ret = dmz_reclaim_rnd_data(zrc, dzone); } - rzone = dzone; - } else { struct dm_zone *bzone = dzone->bzone; sector_t chunk_block = 0; @@ -415,7 +414,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) * be later reclaimed. */ ret = dmz_reclaim_seq_data(zrc, dzone); - rzone = dzone; } } out: @@ -458,6 +456,8 @@ static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc) nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx); nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx); } + if (nr_unmap <= 1) + return 0; return nr_unmap * 100 / nr_zones; } @@ -503,7 +503,7 @@ static void dmz_reclaim_work(struct work_struct *work) { struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work); struct dmz_metadata *zmd = zrc->metadata; - unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0; + unsigned int p_unmap; int ret; if (dmz_dev_is_dying(zmd)) @@ -529,9 +529,6 @@ static void dmz_reclaim_work(struct work_struct *work) zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2); } - nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx); - nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx); - DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)", dmz_metadata_label(zmd), zrc->dev_idx, zrc->kc_throttle.throttle, diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index a907a9446c0b..42aa5139df7c 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -400,15 +400,7 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw, dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); struct dmz_metadata *zmd = dmz->metadata; struct dm_zone *zone; - int i, ret; - - /* - * Write may trigger a zone allocation. So make sure the - * allocation can succeed. - */ - if (bio_op(bio) == REQ_OP_WRITE) - for (i = 0; i < dmz->nr_ddevs; i++) - dmz_schedule_reclaim(dmz->dev[i].reclaim); + int ret; dmz_lock_metadata(zmd); @@ -890,7 +882,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) } /* Set target (no write same support) */ - ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9; + ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata); ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->num_write_zeroes_bios = 1; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 109e81f33edb..52449afd58eb 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/blkpg.h> #include <linux/bio.h> @@ -654,28 +655,6 @@ static void free_tio(struct dm_target_io *tio) bio_put(&tio->clone); } -static bool md_in_flight_bios(struct mapped_device *md) -{ - int cpu; - struct hd_struct *part = &dm_disk(md)->part0; - long sum = 0; - - for_each_possible_cpu(cpu) { - sum += part_stat_local_read_cpu(part, in_flight[0], cpu); - sum += part_stat_local_read_cpu(part, in_flight[1], cpu); - } - - return sum != 0; -} - -static bool md_in_flight(struct mapped_device *md) -{ - if (queue_is_mq(md->queue)) - return blk_mq_queue_inflight(md->queue); - else - return md_in_flight_bios(md); -} - u64 dm_start_time_ns_from_clone(struct bio *bio) { struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); @@ -1009,6 +988,7 @@ static void clone_endio(struct bio *bio) struct dm_io *io = tio->io; struct mapped_device *md = tio->io->md; dm_endio_fn endio = tio->ti->type->end_io; + struct bio *orig_bio = io->orig_bio; if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { if (bio_op(bio) == REQ_OP_DISCARD && @@ -1022,6 +1002,18 @@ static void clone_endio(struct bio *bio) disable_write_zeroes(md); } + /* + * For zone-append bios get offset in zone of the written + * sector and add that to the original bio sector pos. + */ + if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) { + sector_t written_sector = bio->bi_iter.bi_sector; + struct request_queue *q = orig_bio->bi_disk->queue; + u64 mask = (u64)blk_queue_zone_sectors(q) - 1; + + orig_bio->bi_iter.bi_sector += written_sector & mask; + } + if (endio) { int r = endio(tio->ti, bio, &error); switch (r) { @@ -1452,9 +1444,6 @@ static int __send_empty_flush(struct clone_info *ci) BUG_ON(bio_has_data(ci->bio)); while ((ti = dm_table_get_target(ci->map, target_nr++))) __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); - - bio_disassociate_blkg(ci->bio); - return 0; } @@ -1642,6 +1631,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, ci.bio = &flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); + bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else if (op_is_zone_mgmt(bio_op(bio))) { ci.bio = bio; @@ -1716,6 +1706,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, ci.bio = &flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); + bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else { struct dm_target_io *tio; @@ -2457,15 +2448,29 @@ void dm_put(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_put); -static int dm_wait_for_completion(struct mapped_device *md, long task_state) +static bool md_in_flight_bios(struct mapped_device *md) +{ + int cpu; + struct hd_struct *part = &dm_disk(md)->part0; + long sum = 0; + + for_each_possible_cpu(cpu) { + sum += part_stat_local_read_cpu(part, in_flight[0], cpu); + sum += part_stat_local_read_cpu(part, in_flight[1], cpu); + } + + return sum != 0; +} + +static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state) { int r = 0; DEFINE_WAIT(wait); - while (1) { + while (true) { prepare_to_wait(&md->wait, &wait, task_state); - if (!md_in_flight(md)) + if (!md_in_flight_bios(md)) break; if (signal_pending_state(task_state, current)) { @@ -2480,6 +2485,28 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state) return r; } +static int dm_wait_for_completion(struct mapped_device *md, long task_state) +{ + int r = 0; + + if (!queue_is_mq(md->queue)) + return dm_wait_for_bios_completion(md, task_state); + + while (true) { + if (!blk_mq_queue_inflight(md->queue)) + break; + + if (signal_pending_state(task_state, current)) { + r = -EINTR; + break; + } + + msleep(5); + } + + return r; +} + /* * Process the deferred bios */ @@ -2913,17 +2940,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast); int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie) { + int r; + unsigned noio_flag; char udev_cookie[DM_COOKIE_LENGTH]; char *envp[] = { udev_cookie, NULL }; + noio_flag = memalloc_noio_save(); + if (!cookie) - return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); + r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); else { snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", DM_COOKIE_ENV_VAR_NAME, cookie); - return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, - action, envp); + r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, + action, envp); } + + memalloc_noio_restore(noio_flag); + + return r; } uint32_t dm_next_uevent_seq(struct mapped_device *md) diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index a4ee6b86663e..b91e472ee764 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -39,8 +39,6 @@ * Troy Laramy <t-laramy@ti.com> */ -#include <asm/cacheflush.h> - #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/delay.h> diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index 10c214bd0903..1ac9aef70dff 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c @@ -18,7 +18,6 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <asm/cacheflush.h> #include <media/v4l2-dev.h> #include <media/v4l2-ioctl.h> diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 68aea22f2b89..5216487db4fb 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -1324,13 +1324,13 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init) return 0; /* fw doesn't need any host buffers */ /* spin till we get enough memory */ - while(host_page_buffer_sz > 0) { - - if((ioc->HostPageBuffer = pci_alloc_consistent( - ioc->pcidev, - host_page_buffer_sz, - &ioc->HostPageBuffer_dma)) != NULL) { - + while (host_page_buffer_sz > 0) { + ioc->HostPageBuffer = + dma_alloc_coherent(&ioc->pcidev->dev, + host_page_buffer_sz, + &ioc->HostPageBuffer_dma, + GFP_KERNEL); + if (ioc->HostPageBuffer) { dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n", ioc->name, ioc->HostPageBuffer, @@ -2741,8 +2741,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) sz = ioc->alloc_sz; dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n", ioc->name, ioc->alloc, ioc->alloc_sz)); - pci_free_consistent(ioc->pcidev, sz, - ioc->alloc, ioc->alloc_dma); + dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc, + ioc->alloc_dma); ioc->reply_frames = NULL; ioc->req_frames = NULL; ioc->alloc = NULL; @@ -2751,8 +2751,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) if (ioc->sense_buf_pool != NULL) { sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); - pci_free_consistent(ioc->pcidev, sz, - ioc->sense_buf_pool, ioc->sense_buf_pool_dma); + dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool, + ioc->sense_buf_pool_dma); ioc->sense_buf_pool = NULL; ioc->alloc_total -= sz; } @@ -2802,7 +2802,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) "HostPageBuffer free @ %p, sz=%d bytes\n", ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz)); - pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz, + dma_free_coherent(&ioc->pcidev->dev, ioc->HostPageBuffer_sz, ioc->HostPageBuffer, ioc->HostPageBuffer_dma); ioc->HostPageBuffer = NULL; ioc->HostPageBuffer_sz = 0; @@ -4497,7 +4497,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc) ioc->name, sz, sz, num_chain)); total_size += sz; - mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma); + mem = dma_alloc_coherent(&ioc->pcidev->dev, total_size, + &alloc_dma, GFP_KERNEL); if (mem == NULL) { printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n", ioc->name); @@ -4574,8 +4575,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc) spin_unlock_irqrestore(&ioc->FreeQlock, flags); sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); - ioc->sense_buf_pool = - pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma); + ioc->sense_buf_pool = dma_alloc_coherent(&ioc->pcidev->dev, sz, + &ioc->sense_buf_pool_dma, GFP_KERNEL); if (ioc->sense_buf_pool == NULL) { printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n", ioc->name); @@ -4613,18 +4614,16 @@ out_fail: if (ioc->alloc != NULL) { sz = ioc->alloc_sz; - pci_free_consistent(ioc->pcidev, - sz, - ioc->alloc, ioc->alloc_dma); + dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc, + ioc->alloc_dma); ioc->reply_frames = NULL; ioc->req_frames = NULL; ioc->alloc_total -= sz; } if (ioc->sense_buf_pool != NULL) { sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); - pci_free_consistent(ioc->pcidev, - sz, - ioc->sense_buf_pool, ioc->sense_buf_pool_dma); + dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool, + ioc->sense_buf_pool_dma); ioc->sense_buf_pool = NULL; } diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index f0737c57ed5f..1491561d2e5c 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -118,8 +118,6 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state); int mptscsih_resume(struct pci_dev *pdev); #endif -#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -2422,7 +2420,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR /* Copy the sense received into the scsi command block. */ req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); - memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc)); + memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC); /* Log SMART data (asc = 0x5D, non-IM case only) if required. */ diff --git a/drivers/mfd/ioc3.c b/drivers/mfd/ioc3.c index 02998d4eb74b..74cee7cb0afc 100644 --- a/drivers/mfd/ioc3.c +++ b/drivers/mfd/ioc3.c @@ -142,10 +142,11 @@ static int ioc3_irq_domain_setup(struct ioc3_priv_data *ipd, int irq) goto err; domain = irq_domain_create_linear(fn, 24, &ioc3_irq_domain_ops, ipd); - if (!domain) + if (!domain) { + irq_domain_free_fwnode(fn); goto err; + } - irq_domain_free_fwnode(fn); ipd->domain = domain; irq_set_chained_handler_and_data(irq, ioc3_irq_handler, domain); diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index ab4144ea1f11..d6cd5537126c 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -10,7 +10,7 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> -#include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/atmel-ssc.h> #include <linux/slab.h> #include <linux/module.h> @@ -20,7 +20,7 @@ #include "../../sound/soc/atmel/atmel_ssc_dai.h" /* Serialize access to ssc_list and user count */ -static DEFINE_SPINLOCK(user_lock); +static DEFINE_MUTEX(user_lock); static LIST_HEAD(ssc_list); struct ssc_device *ssc_request(unsigned int ssc_num) @@ -28,7 +28,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num) int ssc_valid = 0; struct ssc_device *ssc; - spin_lock(&user_lock); + mutex_lock(&user_lock); list_for_each_entry(ssc, &ssc_list, list) { if (ssc->pdev->dev.of_node) { if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc") @@ -44,18 +44,18 @@ struct ssc_device *ssc_request(unsigned int ssc_num) } if (!ssc_valid) { - spin_unlock(&user_lock); + mutex_unlock(&user_lock); pr_err("ssc: ssc%d platform device is missing\n", ssc_num); return ERR_PTR(-ENODEV); } if (ssc->user) { - spin_unlock(&user_lock); + mutex_unlock(&user_lock); dev_dbg(&ssc->pdev->dev, "module busy\n"); return ERR_PTR(-EBUSY); } ssc->user++; - spin_unlock(&user_lock); + mutex_unlock(&user_lock); clk_prepare(ssc->clk); @@ -67,14 +67,14 @@ void ssc_free(struct ssc_device *ssc) { bool disable_clk = true; - spin_lock(&user_lock); + mutex_lock(&user_lock); if (ssc->user) ssc->user--; else { disable_clk = false; dev_dbg(&ssc->pdev->dev, "device already free\n"); } - spin_unlock(&user_lock); + mutex_unlock(&user_lock); if (disable_clk) clk_unprepare(ssc->clk); @@ -237,9 +237,9 @@ static int ssc_probe(struct platform_device *pdev) return -ENXIO; } - spin_lock(&user_lock); + mutex_lock(&user_lock); list_add_tail(&ssc->list, &ssc_list); - spin_unlock(&user_lock); + mutex_unlock(&user_lock); platform_set_drvdata(pdev, ssc); @@ -258,9 +258,9 @@ static int ssc_remove(struct platform_device *pdev) ssc_sound_dai_remove(ssc); - spin_lock(&user_lock); + mutex_lock(&user_lock); list_del(&ssc->list); - spin_unlock(&user_lock); + mutex_unlock(&user_lock); return 0; } diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index f82974a916c3..b0f62cbbdc87 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -62,6 +62,12 @@ static void hl_fence_release(struct dma_fence *fence) container_of(fence, struct hl_cs_compl, base_fence); struct hl_device *hdev = hl_cs_cmpl->hdev; + /* EBUSY means the CS was never submitted and hence we don't have + * an attached hw_sob object that we should handle here + */ + if (fence->error == -EBUSY) + goto free; + if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) || (hl_cs_cmpl->type == CS_TYPE_WAIT)) { @@ -92,6 +98,7 @@ static void hl_fence_release(struct dma_fence *fence) kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset); } +free: kfree_rcu(hl_cs_cmpl, base_fence.rcu); } @@ -328,10 +335,16 @@ static void cs_do_release(struct kref *ref) hl_ctx_put(cs->ctx); + /* We need to mark an error for not submitted because in that case + * the dma fence release flow is different. Mainly, we don't need + * to handle hw_sob for signal/wait + */ if (cs->timedout) dma_fence_set_error(cs->fence, -ETIMEDOUT); else if (cs->aborted) dma_fence_set_error(cs->fence, -EIO); + else if (!cs->submitted) + dma_fence_set_error(cs->fence, -EBUSY); dma_fence_signal(cs->fence); dma_fence_put(cs->fence); diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c index 3c8dcdfba20c..fc4372c18ce2 100644 --- a/drivers/misc/habanalabs/debugfs.c +++ b/drivers/misc/habanalabs/debugfs.c @@ -480,7 +480,7 @@ out: return 0; } -static ssize_t mmu_write(struct file *file, const char __user *buf, +static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf, size_t count, loff_t *f_pos) { struct seq_file *s = file->private_data; @@ -1125,7 +1125,7 @@ static const struct hl_info_list hl_debugfs_list[] = { {"command_submission_jobs", command_submission_jobs_show, NULL}, {"userptr", userptr_show, NULL}, {"vm", vm_show, NULL}, - {"mmu", mmu_show, mmu_write}, + {"mmu", mmu_show, mmu_asid_va_write}, {"engines", engines_show, NULL} }; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 61f88e9884ce..834470d10b46 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -96,7 +96,7 @@ #define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3 -#define GAUDI_ARB_WDT_TIMEOUT 0x400000 +#define GAUDI_ARB_WDT_TIMEOUT 0x1000000 static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3", @@ -1893,6 +1893,8 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id, WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo); WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi); + WREG32(mmDMA0_QM_CP_BARRIER_CFG_0 + q_off, 0x100); + /* The following configuration is needed only once per QMAN */ if (qman_id == 0) { /* Configure RAZWI IRQ */ @@ -2725,6 +2727,12 @@ static int gaudi_mmu_init(struct hl_device *hdev) WREG32(mmSTLB_HOP_CONFIGURATION, hdev->mmu_huge_page_opt ? 0x30440 : 0x40440); + /* + * The H/W expects the first PI after init to be 1. After wraparound + * we'll write 0. + */ + gaudi->mmu_cache_inv_pi = 1; + gaudi->hw_cap_initialized |= HW_CAP_MMU; return 0; @@ -3790,6 +3798,25 @@ static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev, src_in_host); } +static int gaudi_validate_load_and_exe_pkt(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_load_and_exe *user_pkt) +{ + u32 cfg; + + cfg = le32_to_cpu(user_pkt->cfg); + + if (cfg & GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK) { + dev_err(hdev->dev, + "User not allowed to use Load and Execute\n"); + return -EPERM; + } + + parser->patched_cb_size += sizeof(struct packet_load_and_exe); + + return 0; +} + static int gaudi_validate_cb(struct hl_device *hdev, struct hl_cs_parser *parser, bool is_mmu) { @@ -3838,6 +3865,11 @@ static int gaudi_validate_cb(struct hl_device *hdev, rc = -EPERM; break; + case PACKET_LOAD_AND_EXE: + rc = gaudi_validate_load_and_exe_pkt(hdev, parser, + (struct packet_load_and_exe *) user_pkt); + break; + case PACKET_LIN_DMA: parser->contains_dma_pkt = true; if (is_mmu) @@ -3855,7 +3887,6 @@ static int gaudi_validate_cb(struct hl_device *hdev, case PACKET_FENCE: case PACKET_NOP: case PACKET_ARB_POINT: - case PACKET_LOAD_AND_EXE: parser->patched_cb_size += pkt_size; break; @@ -5994,6 +6025,8 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, mutex_lock(&hdev->mmu_cache_lock); /* L0 & L1 invalidation */ + WREG32(mmSTLB_INV_PS, 3); + WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++); WREG32(mmSTLB_INV_PS, 2); rc = hl_poll_timeout( diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index a46530d375fa..41a8d9bff6bf 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -229,6 +229,8 @@ struct gaudi_internal_qman_info { * @multi_msi_mode: whether we are working in multi MSI single MSI mode. * Multi MSI is possible only with IOMMU enabled. * @ext_queue_idx: helper index for external queues initialization. + * @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an + * 8-bit value so use u8. */ struct gaudi_device { int (*armcp_info_get)(struct hl_device *hdev); @@ -248,6 +250,7 @@ struct gaudi_device { u32 hw_cap_initialized; u8 multi_msi_mode; u8 ext_queue_idx; + u8 mmu_cache_inv_pi; }; void gaudi_init_security(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h index 9a5800b0086b..0f0cd067bb43 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h @@ -197,6 +197,9 @@ struct packet_wait { __le32 ctl; }; +#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_SHIFT 0 +#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK 0x00000001 + struct packet_load_and_exe { __le32 cfg; __le32 ctl; diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 8d468e0a950a..f476dbc7252b 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -745,9 +745,8 @@ static int mei_cl_device_remove(struct device *dev) mei_cl_bus_module_put(cldev); module_put(THIS_MODULE); - dev->driver = NULL; - return ret; + return ret; } static ssize_t name_show(struct device *dev, struct device_attribute *a, diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 9392934e3a06..7becfc768bbc 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -94,6 +94,7 @@ #define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */ #define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */ +#define MEI_DEV_ID_TGP_H 0x43E0 /* Tiger Lake Point H */ #define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ #define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ @@ -107,6 +108,8 @@ # define PCI_CFG_HFS_1_D0I3_MSK 0x80000000 #define PCI_CFG_HFS_2 0x48 #define PCI_CFG_HFS_3 0x60 +# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070 +# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060 #define PCI_CFG_HFS_4 0x64 #define PCI_CFG_HFS_5 0x68 #define PCI_CFG_HFS_6 0x6C diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index f620442addf5..7649710a2ab9 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1366,7 +1366,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev) #define MEI_CFG_FW_NM \ .quirk_probe = mei_me_fw_type_nm -static bool mei_me_fw_type_sps(struct pci_dev *pdev) +static bool mei_me_fw_type_sps_4(struct pci_dev *pdev) { u32 reg; unsigned int devfn; @@ -1382,7 +1382,36 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev) return (reg & 0xf0000) == 0xf0000; } -#define MEI_CFG_FW_SPS \ +#define MEI_CFG_FW_SPS_4 \ + .quirk_probe = mei_me_fw_type_sps_4 + +/** + * mei_me_fw_sku_sps() - check for sps sku + * + * Read ME FW Status register to check for SPS Firmware. + * The SPS FW is only signaled in pci function 0 + * + * @pdev: pci device + * + * Return: true in case of SPS firmware + */ +static bool mei_me_fw_type_sps(struct pci_dev *pdev) +{ + u32 reg; + u32 fw_type; + unsigned int devfn; + + devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); + pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, ®); + trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg); + fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK); + + dev_dbg(&pdev->dev, "fw type is %d\n", fw_type); + + return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS; +} + +#define MEI_CFG_FW_SPS \ .quirk_probe = mei_me_fw_type_sps #define MEI_CFG_FW_VER_SUPP \ @@ -1452,10 +1481,17 @@ static const struct mei_cfg mei_me_pch8_cfg = { }; /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */ -static const struct mei_cfg mei_me_pch8_sps_cfg = { +static const struct mei_cfg mei_me_pch8_sps_4_cfg = { MEI_CFG_PCH8_HFS, MEI_CFG_FW_VER_SUPP, - MEI_CFG_FW_SPS, + MEI_CFG_FW_SPS_4, +}; + +/* LBG with quirk for SPS (4.0) Firmware exclusion */ +static const struct mei_cfg mei_me_pch12_sps_4_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_FW_SPS_4, }; /* Cannon Lake and newer devices */ @@ -1465,10 +1501,20 @@ static const struct mei_cfg mei_me_pch12_cfg = { MEI_CFG_DMA_128, }; -/* LBG with quirk for SPS Firmware exclusion */ +/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */ static const struct mei_cfg mei_me_pch12_sps_cfg = { MEI_CFG_PCH8_HFS, MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, + MEI_CFG_FW_SPS, +}; + +/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion + * w/o DMA support + */ +static const struct mei_cfg mei_me_pch12_nodma_sps_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, MEI_CFG_FW_SPS, }; @@ -1480,6 +1526,15 @@ static const struct mei_cfg mei_me_pch15_cfg = { MEI_CFG_TRC, }; +/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */ +static const struct mei_cfg mei_me_pch15_sps_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, + MEI_CFG_TRC, + MEI_CFG_FW_SPS, +}; + /* * mei_cfg_list - A list of platform platform specific configurations. * Note: has to be synchronized with enum mei_cfg_idx. @@ -1492,10 +1547,13 @@ static const struct mei_cfg *const mei_cfg_list[] = { [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg, [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg, [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, - [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg, + [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg, [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg, + [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg, [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg, + [MEI_ME_PCH12_SPS_NODMA_CFG] = &mei_me_pch12_nodma_sps_cfg, [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg, + [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg, }; const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx) diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index b6b94e211464..6a8973649c49 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2012-2019, Intel Corporation. All rights reserved. + * Copyright (c) 2012-2020, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -76,14 +76,20 @@ struct mei_me_hw { * with quirk for Node Manager exclusion. * @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer * client platforms. - * @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer + * @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer * servers platforms with quirk for * SPS firmware exclusion. * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer - * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 and newer + * @MEI_ME_PCH12_SPS_4_CFG:Platform Controller Hub Gen12 up to 4.0 + * servers platforms with quirk for + * SPS firmware exclusion. + * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 5.0 and newer * servers platforms with quirk for * SPS firmware exclusion. * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer + * @MEI_ME_PCH15_SPS_CFG: Platform Controller Hub Gen15 and newer + * servers platforms with quirk for + * SPS firmware exclusion. * @MEI_ME_NUM_CFG: Upper Sentinel. */ enum mei_cfg_idx { @@ -94,10 +100,13 @@ enum mei_cfg_idx { MEI_ME_PCH7_CFG, MEI_ME_PCH_CPT_PBG_CFG, MEI_ME_PCH8_CFG, - MEI_ME_PCH8_SPS_CFG, + MEI_ME_PCH8_SPS_4_CFG, MEI_ME_PCH12_CFG, + MEI_ME_PCH12_SPS_4_CFG, MEI_ME_PCH12_SPS_CFG, + MEI_ME_PCH12_SPS_NODMA_CFG, MEI_ME_PCH15_CFG, + MEI_ME_PCH15_SPS_CFG, MEI_ME_NUM_CFG, }; diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 71f795b510ce..2a3f2fd5df50 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -59,18 +59,18 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, @@ -84,8 +84,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_NODMA_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)}, @@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)}, diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 7eb38d7482c6..08a3b1c05acb 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1146,9 +1146,11 @@ static int meson_mmc_probe(struct platform_device *pdev) mmc->caps |= MMC_CAP_CMD23; if (host->dram_access_quirk) { + /* Limit segments to 1 due to low available sram memory */ + mmc->max_segs = 1; /* Limit to the available sram memory */ - mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size; - mmc->max_blk_count = mmc->max_segs; + mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN / + mmc->max_blk_size; } else { mmc->max_blk_count = CMD_CFG_LENGTH_MASK; mmc->max_segs = SD_EMMC_DESC_BUF_LEN / diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c index 5e20c099fe03..df43f42855e2 100644 --- a/drivers/mmc/host/owl-mmc.c +++ b/drivers/mmc/host/owl-mmc.c @@ -689,7 +689,7 @@ MODULE_DEVICE_TABLE(of, owl_mmc_of_match); static struct platform_driver owl_mmc_driver = { .driver = { .name = "owl_mmc", - .of_match_table = of_match_ptr(owl_mmc_of_match), + .of_match_table = owl_mmc_of_match, }, .probe = owl_mmc_probe, .remove = owl_mmc_remove, diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index b277dd7fbdb5..c0d58e9fcc33 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -618,8 +618,9 @@ static int msm_init_cm_dll(struct sdhci_host *host) config &= ~CORE_CLK_PWRSAVE; writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); - config = msm_host->dll_config; - writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); + if (msm_host->dll_config) + writel_relaxed(msm_host->dll_config, + host->ioaddr + msm_offset->core_dll_config); if (msm_host->use_14lpp_dll_reset) { config = readl_relaxed(host->ioaddr + diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 76d832a88e0c..7d930569a7df 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -1273,8 +1273,8 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, return -EROFS; if (!len) return 0; - if (!mtd->oops_panic_write) - mtd->oops_panic_write = true; + if (!master->oops_panic_write) + master->oops_panic_write = true; return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len, retlen, buf); diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 0a5cb77966cc..f5a53aac3c5f 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -1761,7 +1761,7 @@ static void ns_switch_state(struct nandsim *ns) NS_DBG("switch_state: operation is unknown, try to find it\n"); - if (!ns_find_operation(ns, 0)) + if (ns_find_operation(ns, 0)) return; if ((ns->state & ACTION_MASK) && diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c index 94bfba994326..29255476afdb 100644 --- a/drivers/mtd/nand/raw/xway_nand.c +++ b/drivers/mtd/nand/raw/xway_nand.c @@ -224,7 +224,7 @@ static int xway_nand_remove(struct platform_device *pdev) struct nand_chip *chip = &data->chip; int ret; - ret = mtd_device_unregister(mtd); + ret = mtd_device_unregister(nand_to_mtd(chip)); WARN_ON(ret); nand_cleanup(chip); diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 5d3c691a1c66..3dd46cd55114 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -572,6 +572,9 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, if (data[IFLA_BAREUDP_SRCPORT_MIN]) conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]); + if (data[IFLA_BAREUDP_MULTIPROTO_MODE]) + conf->multi_proto_mode = true; + return 0; } diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index c7ac63f41918..946e41f020a5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1147,6 +1147,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) set_bit(0, priv->cfp.used); set_bit(0, priv->cfp.unique); + /* Balance of_node_put() done by of_find_node_by_name() */ + of_node_get(dn); ports = of_find_node_by_name(dn, "ports"); if (ports) { bcm_sf2_identify_ports(priv, ports); diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 47d65b77caf7..7c17b0f705ec 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1268,6 +1268,9 @@ static int ksz8795_switch_init(struct ksz_device *dev) return -ENOMEM; } + /* set the real number of ports */ + dev->ds->num_ports = dev->port_cnt; + return 0; } diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 9a51b8a4de5d..8d15c3016024 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -1588,6 +1588,9 @@ static int ksz9477_switch_init(struct ksz_device *dev) return -ENOMEM; } + /* set the real number of ports */ + dev->ds->num_ports = dev->port_cnt; + return 0; } diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index 7d050fab0889..7951f52d860d 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -79,6 +79,7 @@ MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id); static const struct of_device_id ksz9477_dt_ids[] = { { .compatible = "microchip,ksz9477" }, { .compatible = "microchip,ksz9897" }, + { .compatible = "microchip,ksz9893" }, { .compatible = "microchip,ksz9567" }, {}, }; diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index bdfd6c4e190d..af3565160db6 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -7,6 +7,165 @@ #define SJA1105_SIZE_VL_STATUS 8 +/* Insert into the global gate list, sorted by gate action time. */ +static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg, + struct sja1105_rule *rule, + u8 gate_state, s64 entry_time, + struct netlink_ext_ack *extack) +{ + struct sja1105_gate_entry *e; + int rc; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->rule = rule; + e->gate_state = gate_state; + e->interval = entry_time; + + if (list_empty(&gating_cfg->entries)) { + list_add(&e->list, &gating_cfg->entries); + } else { + struct sja1105_gate_entry *p; + + list_for_each_entry(p, &gating_cfg->entries, list) { + if (p->interval == e->interval) { + NL_SET_ERR_MSG_MOD(extack, + "Gate conflict"); + rc = -EBUSY; + goto err; + } + + if (e->interval < p->interval) + break; + } + list_add(&e->list, p->list.prev); + } + + gating_cfg->num_entries++; + + return 0; +err: + kfree(e); + return rc; +} + +/* The gate entries contain absolute times in their e->interval field. Convert + * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5"). + */ +static void +sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg, + u64 cycle_time) +{ + struct sja1105_gate_entry *last_e; + struct sja1105_gate_entry *e; + struct list_head *prev; + + list_for_each_entry(e, &gating_cfg->entries, list) { + struct sja1105_gate_entry *p; + + prev = e->list.prev; + + if (prev == &gating_cfg->entries) + continue; + + p = list_entry(prev, struct sja1105_gate_entry, list); + p->interval = e->interval - p->interval; + } + last_e = list_last_entry(&gating_cfg->entries, + struct sja1105_gate_entry, list); + last_e->interval = cycle_time - last_e->interval; +} + +static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg) +{ + struct sja1105_gate_entry *e, *n; + + list_for_each_entry_safe(e, n, &gating_cfg->entries, list) { + list_del(&e->list); + kfree(e); + } +} + +static int sja1105_compose_gating_subschedule(struct sja1105_private *priv, + struct netlink_ext_ack *extack) +{ + struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg; + struct sja1105_rule *rule; + s64 max_cycle_time = 0; + s64 its_base_time = 0; + int i, rc = 0; + + sja1105_free_gating_config(gating_cfg); + + list_for_each_entry(rule, &priv->flow_block.rules, list) { + if (rule->type != SJA1105_RULE_VL) + continue; + if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED) + continue; + + if (max_cycle_time < rule->vl.cycle_time) { + max_cycle_time = rule->vl.cycle_time; + its_base_time = rule->vl.base_time; + } + } + + if (!max_cycle_time) + return 0; + + dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n", + max_cycle_time, its_base_time); + + gating_cfg->base_time = its_base_time; + gating_cfg->cycle_time = max_cycle_time; + gating_cfg->num_entries = 0; + + list_for_each_entry(rule, &priv->flow_block.rules, list) { + s64 time; + s64 rbt; + + if (rule->type != SJA1105_RULE_VL) + continue; + if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED) + continue; + + /* Calculate the difference between this gating schedule's + * base time, and the base time of the gating schedule with the + * longest cycle time. We call it the relative base time (rbt). + */ + rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time, + its_base_time); + rbt -= its_base_time; + + time = rbt; + + for (i = 0; i < rule->vl.num_entries; i++) { + u8 gate_state = rule->vl.entries[i].gate_state; + s64 entry_time = time; + + while (entry_time < max_cycle_time) { + rc = sja1105_insert_gate_entry(gating_cfg, rule, + gate_state, + entry_time, + extack); + if (rc) + goto err; + + entry_time += rule->vl.cycle_time; + } + time += rule->vl.entries[i].interval; + } + } + + sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time); + + return 0; +err: + sja1105_free_gating_config(gating_cfg); + return rc; +} + /* The switch flow classification core implements TTEthernet, which 'thinks' in * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7. * However it also has one other operating mode (VLLUPFORMAT=0) where it acts @@ -342,7 +501,9 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port, NL_SET_ERR_MSG_MOD(extack, "Can only redirect based on DMAC"); return -EOPNOTSUPP; - } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) { + } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT || + priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) && + key->type != SJA1105_KEY_VLAN_AWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only redirect based on {DMAC, VID, PCP}"); return -EOPNOTSUPP; @@ -388,171 +549,19 @@ int sja1105_vl_delete(struct sja1105_private *priv, int port, kfree(rule); } - rc = sja1105_init_virtual_links(priv, extack); + rc = sja1105_compose_gating_subschedule(priv, extack); if (rc) return rc; - return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS); -} - -/* Insert into the global gate list, sorted by gate action time. */ -static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg, - struct sja1105_rule *rule, - u8 gate_state, s64 entry_time, - struct netlink_ext_ack *extack) -{ - struct sja1105_gate_entry *e; - int rc; - - e = kzalloc(sizeof(*e), GFP_KERNEL); - if (!e) - return -ENOMEM; - - e->rule = rule; - e->gate_state = gate_state; - e->interval = entry_time; - - if (list_empty(&gating_cfg->entries)) { - list_add(&e->list, &gating_cfg->entries); - } else { - struct sja1105_gate_entry *p; - - list_for_each_entry(p, &gating_cfg->entries, list) { - if (p->interval == e->interval) { - NL_SET_ERR_MSG_MOD(extack, - "Gate conflict"); - rc = -EBUSY; - goto err; - } - - if (e->interval < p->interval) - break; - } - list_add(&e->list, p->list.prev); - } - - gating_cfg->num_entries++; - - return 0; -err: - kfree(e); - return rc; -} - -/* The gate entries contain absolute times in their e->interval field. Convert - * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5"). - */ -static void -sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg, - u64 cycle_time) -{ - struct sja1105_gate_entry *last_e; - struct sja1105_gate_entry *e; - struct list_head *prev; - - list_for_each_entry(e, &gating_cfg->entries, list) { - struct sja1105_gate_entry *p; - - prev = e->list.prev; - - if (prev == &gating_cfg->entries) - continue; - - p = list_entry(prev, struct sja1105_gate_entry, list); - p->interval = e->interval - p->interval; - } - last_e = list_last_entry(&gating_cfg->entries, - struct sja1105_gate_entry, list); - if (last_e->list.prev != &gating_cfg->entries) - last_e->interval = cycle_time - last_e->interval; -} - -static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg) -{ - struct sja1105_gate_entry *e, *n; - - list_for_each_entry_safe(e, n, &gating_cfg->entries, list) { - list_del(&e->list); - kfree(e); - } -} - -static int sja1105_compose_gating_subschedule(struct sja1105_private *priv, - struct netlink_ext_ack *extack) -{ - struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg; - struct sja1105_rule *rule; - s64 max_cycle_time = 0; - s64 its_base_time = 0; - int i, rc = 0; - - list_for_each_entry(rule, &priv->flow_block.rules, list) { - if (rule->type != SJA1105_RULE_VL) - continue; - if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED) - continue; - - if (max_cycle_time < rule->vl.cycle_time) { - max_cycle_time = rule->vl.cycle_time; - its_base_time = rule->vl.base_time; - } - } - - if (!max_cycle_time) - return 0; - - dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n", - max_cycle_time, its_base_time); - - sja1105_free_gating_config(gating_cfg); - - gating_cfg->base_time = its_base_time; - gating_cfg->cycle_time = max_cycle_time; - gating_cfg->num_entries = 0; - - list_for_each_entry(rule, &priv->flow_block.rules, list) { - s64 time; - s64 rbt; - - if (rule->type != SJA1105_RULE_VL) - continue; - if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED) - continue; - - /* Calculate the difference between this gating schedule's - * base time, and the base time of the gating schedule with the - * longest cycle time. We call it the relative base time (rbt). - */ - rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time, - its_base_time); - rbt -= its_base_time; - - time = rbt; - - for (i = 0; i < rule->vl.num_entries; i++) { - u8 gate_state = rule->vl.entries[i].gate_state; - s64 entry_time = time; - - while (entry_time < max_cycle_time) { - rc = sja1105_insert_gate_entry(gating_cfg, rule, - gate_state, - entry_time, - extack); - if (rc) - goto err; - - entry_time += rule->vl.cycle_time; - } - time += rule->vl.entries[i].interval; - } - } + rc = sja1105_init_virtual_links(priv, extack); + if (rc) + return rc; - sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time); + rc = sja1105_init_scheduling(priv); + if (rc < 0) + return rc; - return 0; -err: - sja1105_free_gating_config(gating_cfg); - return rc; + return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS); } int sja1105_vl_gate(struct sja1105_private *priv, int port, @@ -588,14 +597,12 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port, if (priv->vlan_state == SJA1105_VLAN_UNAWARE && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { - dev_err(priv->ds->dev, "1: vlan state %d key type %d\n", - priv->vlan_state, key->type); NL_SET_ERR_MSG_MOD(extack, "Can only gate based on DMAC"); return -EOPNOTSUPP; - } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) { - dev_err(priv->ds->dev, "2: vlan state %d key type %d\n", - priv->vlan_state, key->type); + } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT || + priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) && + key->type != SJA1105_KEY_VLAN_AWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only gate based on {DMAC, VID, PCP}"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 3c8e8047ea1e..d775b23025c1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -1700,7 +1700,7 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location, for (i = 0; i < 4; ++i) aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location + i), - ipv6_src[i]); + ipv6_src[3 - i]); } void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, @@ -1711,7 +1711,7 @@ void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, for (i = 0; i < 4; ++i) aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location + i), - ipv6_dest[i]); + ipv6_dest[3 - i]); } u32 hw_atl_sem_ram_get(struct aq_hw_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 06220792daf1..7430ff025134 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -1360,7 +1360,7 @@ */ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */ -#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4) +#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4) /* Bitmask for bitfield l3_da0[1F:0] */ #define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu /* Inverted bitmask for bitfield l3_da0[1F:0] */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b93e05f91d77..6a884df44612 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6292,6 +6292,7 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) { + struct hwrm_stat_ctx_clr_stats_input req0 = {0}; struct hwrm_stat_ctx_free_input req = {0}; int i; @@ -6301,6 +6302,7 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) if (BNXT_CHIP_TYPE_NITRO_A0(bp)) return; + bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); @@ -6310,7 +6312,11 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); - + if (BNXT_FW_MAJ(bp) <= 20) { + req0.stat_ctx_id = req.stat_ctx_id; + _hwrm_send_message(bp, &req0, sizeof(req0), + HWRM_CMD_TIMEOUT); + } _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -6976,7 +6982,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; bp->tx_push_thresh = 0; - if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) + if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && + BNXT_FW_MAJ(bp) > 217) bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); @@ -7240,8 +7247,9 @@ static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) static int bnxt_hwrm_ver_get(struct bnxt *bp) { struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + u16 fw_maj, fw_min, fw_bld, fw_rsv; u32 dev_caps_cfg, hwrm_ver; - int rc; + int rc, len; bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; mutex_lock(&bp->hwrm_cmd_lock); @@ -7273,9 +7281,22 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b); - snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", - resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, - resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); + fw_maj = le16_to_cpu(resp->hwrm_fw_major); + if (bp->hwrm_spec_code > 0x10803 && fw_maj) { + fw_min = le16_to_cpu(resp->hwrm_fw_minor); + fw_bld = le16_to_cpu(resp->hwrm_fw_build); + fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); + len = FW_VER_STR_LEN; + } else { + fw_maj = resp->hwrm_fw_maj_8b; + fw_min = resp->hwrm_fw_min_8b; + fw_bld = resp->hwrm_fw_bld_8b; + fw_rsv = resp->hwrm_fw_rsvd_8b; + len = BC_HWRM_STR_LEN; + } + bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); + snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, + fw_rsv); if (strlen(resp->active_pkg_name)) { int fw_ver_len = strlen(bp->fw_ver_str); @@ -11892,7 +11913,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->ethtool_ops = &bnxt_ethtool_ops; pci_set_drvdata(pdev, dev); - bnxt_vpd_read_info(bp); + if (BNXT_PF(bp)) + bnxt_vpd_read_info(bp); rc = bnxt_alloc_hwrm_resources(bp); if (rc) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9e173d74b72a..78e2fd63ac3d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1746,6 +1746,11 @@ struct bnxt { #define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN) char fw_ver_str[FW_VER_STR_LEN]; char hwrm_ver_supp[FW_VER_STR_LEN]; + u64 fw_ver_code; +#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \ + ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv)) +#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48) + __be16 vxlan_port; u8 vxlan_port_cnt; __le16 vxlan_fw_dst_port_id; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 3a9a51f7063a..392e32c7122a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -396,6 +396,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp) } } + bp->pf.active_vfs = 0; kfree(bp->pf.vf); bp->pf.vf = NULL; } @@ -835,7 +836,6 @@ void bnxt_sriov_disable(struct bnxt *bp) bnxt_free_vf_resources(bp); - bp->pf.active_vfs = 0; /* Reclaim all resources for the PF. */ rtnl_lock(); bnxt_restore_pf_fw_resources(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 0eef4f5e4a46..4a11c1e7cc02 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1889,7 +1889,8 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv) } static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, - struct flow_block_offload *f) + struct flow_block_offload *f, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { struct bnxt_flower_indr_block_cb_priv *cb_priv; struct flow_block_cb *block_cb; @@ -1907,9 +1908,10 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, cb_priv->bp = bp; list_add(&cb_priv->list, &bp->tc_indr_block_list); - block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb, - cb_priv, cb_priv, - bnxt_tc_setup_indr_rel); + block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, + cb_priv, cb_priv, + bnxt_tc_setup_indr_rel, f, + netdev, data, bp, cleanup); if (IS_ERR(block_cb)) { list_del(&cb_priv->list); kfree(cb_priv); @@ -1930,7 +1932,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, if (!block_cb) return -ENOENT; - flow_block_cb_remove(block_cb, f); + flow_indr_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); break; default: @@ -1945,14 +1947,17 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev) } static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv, - enum tc_setup_type type, void *type_data) + enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { if (!bnxt_is_netdev_indr_offload(netdev)) return -EOPNOTSUPP; switch (type) { case TC_SETUP_BLOCK: - return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data); + return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data, + cleanup); default: break; } @@ -2074,7 +2079,7 @@ void bnxt_shutdown_tc(struct bnxt *bp) return; flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp, - bnxt_tc_setup_indr_block_cb); + bnxt_tc_setup_indr_rel); rhashtable_destroy(&tc_info->flow_table); rhashtable_destroy(&tc_info->l2_table); rhashtable_destroy(&tc_info->decap_l2_table); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index ff31da0ed846..af924a8b885f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -459,17 +459,6 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, genet_dma_ring_regs[r]); } -static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv, - u32 f_index) -{ - u32 offset; - u32 reg; - - offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); - reg = bcmgenet_hfb_reg_readl(priv, offset); - return !!(reg & (1 << (f_index % 32))); -} - static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) { u32 offset; @@ -533,19 +522,6 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, bcmgenet_hfb_reg_writel(priv, reg, offset); } -static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv) -{ - u32 f_index; - - /* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */ - for (f_index = MAX_NUM_OF_FS_RULES; - f_index < priv->hw_params->hfb_filter_cnt; f_index++) - if (!bcmgenet_hfb_is_filter_enabled(priv, f_index)) - return f_index; - - return -ENOMEM; -} - static int bcmgenet_hfb_validate_mask(void *mask, size_t size) { while (size) { @@ -634,8 +610,9 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, { struct ethtool_rx_flow_spec *fs = &rule->fs; int err = 0, offset = 0, f_length = 0; - u16 val_16, mask_16; u8 val_8, mask_8; + __be16 val_16; + u16 mask_16; size_t size; u32 *f_data; @@ -744,59 +721,6 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, return err; } -/* bcmgenet_hfb_add_filter - * - * Add new filter to Hardware Filter Block to match and direct Rx traffic to - * desired Rx queue. - * - * f_data is an array of unsigned 32-bit integers where each 32-bit integer - * provides filter data for 2 bytes (4 nibbles) of Rx frame: - * - * bits 31:20 - unused - * bit 19 - nibble 0 match enable - * bit 18 - nibble 1 match enable - * bit 17 - nibble 2 match enable - * bit 16 - nibble 3 match enable - * bits 15:12 - nibble 0 data - * bits 11:8 - nibble 1 data - * bits 7:4 - nibble 2 data - * bits 3:0 - nibble 3 data - * - * Example: - * In order to match: - * - Ethernet frame type = 0x0800 (IP) - * - IP version field = 4 - * - IP protocol field = 0x11 (UDP) - * - * The following filter is needed: - * u32 hfb_filter_ipv4_udp[] = { - * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000, - * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000, - * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011, - * }; - * - * To add the filter to HFB and direct the traffic to Rx queue 0, call: - * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp, - * ARRAY_SIZE(hfb_filter_ipv4_udp), 0); - */ -int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data, - u32 f_length, u32 rx_queue) -{ - int f_index; - - f_index = bcmgenet_hfb_find_unused_filter(priv); - if (f_index < 0) - return -ENOMEM; - - if (f_length > priv->hw_params->hfb_filter_size) - return -EINVAL; - - bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index); - bcmgenet_hfb_enable_filter(priv, f_index); - - return 0; -} - /* bcmgenet_hfb_clear * * Clear Hardware Filter Block and disable all filtering. @@ -2118,11 +2042,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } - if (skb_padto(skb, ETH_ZLEN)) { - ret = NETDEV_TX_OK; - goto out; - } - /* Retain how many bytes will be sent on the wire, without TSB inserted * by transmit checksum offload */ @@ -2169,6 +2088,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) len_stat = (size << DMA_BUFLENGTH_SHIFT) | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ if (!i) { len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; if (skb->ip_summed == CHECKSUM_PARTIAL) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 7a3b22b35238..ebff1fc0d8ce 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18168,8 +18168,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - /* We probably don't have netdev yet */ - if (!netdev || !netif_running(netdev)) + /* Could be second call or maybe we don't have netdev yet */ + if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) goto done; /* We needn't recover from permanent error */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 67933079aeea..f1f0976e7669 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -2558,7 +2558,7 @@ static int macb_open(struct net_device *dev) err = macb_phylink_connect(bp); if (err) - goto napi_exit; + goto reset_hw; netif_tx_start_all_queues(dev); @@ -2567,9 +2567,11 @@ static int macb_open(struct net_device *dev) return 0; -napi_exit: +reset_hw: + macb_reset_hw(bp); for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) napi_disable(&queue->napi); + macb_free_consistent(bp); pm_exit: pm_runtime_put_sync(&bp->pdev->dev); return err; @@ -2819,11 +2821,13 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct macb *bp = netdev_priv(netdev); - wol->supported = 0; - wol->wolopts = 0; - - if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) + if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { phylink_ethtool_get_wol(bp->phylink, wol); + wol->supported |= WAKE_MAGIC; + + if (bp->wol & MACB_WOL_ENABLED) + wol->wolopts |= WAKE_MAGIC; + } } static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -2831,9 +2835,13 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) struct macb *bp = netdev_priv(netdev); int ret; + /* Pass the order to phylink layer */ ret = phylink_ethtool_set_wol(bp->phylink, wol); - if (!ret) - return 0; + /* Don't manage WoL on MAC if handled by the PHY + * or if there's a failure in talking to the PHY + */ + if (!ret || ret != -EOPNOTSUPP) + return ret; if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || (wol->wolopts & ~WAKE_MAGIC)) @@ -3760,15 +3768,9 @@ static int macb_init(struct platform_device *pdev) static struct sifive_fu540_macb_mgmt *mgmt; -/* Initialize and start the Receiver and Transmit subsystems */ -static int at91ether_start(struct net_device *dev) +static int at91ether_alloc_coherent(struct macb *lp) { - struct macb *lp = netdev_priv(dev); struct macb_queue *q = &lp->queues[0]; - struct macb_dma_desc *desc; - dma_addr_t addr; - u32 ctl; - int i; q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, (AT91ETHER_MAX_RX_DESCR * @@ -3790,6 +3792,43 @@ static int at91ether_start(struct net_device *dev) return -ENOMEM; } + return 0; +} + +static void at91ether_free_coherent(struct macb *lp) +{ + struct macb_queue *q = &lp->queues[0]; + + if (q->rx_ring) { + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + macb_dma_desc_get_size(lp), + q->rx_ring, q->rx_ring_dma); + q->rx_ring = NULL; + } + + if (q->rx_buffers) { + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + AT91ETHER_MAX_RBUFF_SZ, + q->rx_buffers, q->rx_buffers_dma); + q->rx_buffers = NULL; + } +} + +/* Initialize and start the Receiver and Transmit subsystems */ +static int at91ether_start(struct macb *lp) +{ + struct macb_queue *q = &lp->queues[0]; + struct macb_dma_desc *desc; + dma_addr_t addr; + u32 ctl; + int i, ret; + + ret = at91ether_alloc_coherent(lp); + if (ret) + return ret; + addr = q->rx_buffers_dma; for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { desc = macb_rx_desc(q, i); @@ -3811,9 +3850,39 @@ static int at91ether_start(struct net_device *dev) ctl = macb_readl(lp, NCR); macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); + /* Enable MAC interrupts */ + macb_writel(lp, IER, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + return 0; } +static void at91ether_stop(struct macb *lp) +{ + u32 ctl; + + /* Disable MAC interrupts */ + macb_writel(lp, IDR, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + + /* Disable Receiver and Transmitter */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); + + /* Free resources. */ + at91ether_free_coherent(lp); +} + /* Open the ethernet interface */ static int at91ether_open(struct net_device *dev) { @@ -3833,63 +3902,36 @@ static int at91ether_open(struct net_device *dev) macb_set_hwaddr(lp); - ret = at91ether_start(dev); + ret = at91ether_start(lp); if (ret) - return ret; - - /* Enable MAC interrupts */ - macb_writel(lp, IER, MACB_BIT(RCOMP) | - MACB_BIT(RXUBR) | - MACB_BIT(ISR_TUND) | - MACB_BIT(ISR_RLE) | - MACB_BIT(TCOMP) | - MACB_BIT(ISR_ROVR) | - MACB_BIT(HRESP)); + goto pm_exit; ret = macb_phylink_connect(lp); if (ret) - return ret; + goto stop; netif_start_queue(dev); return 0; + +stop: + at91ether_stop(lp); +pm_exit: + pm_runtime_put_sync(&lp->pdev->dev); + return ret; } /* Close the interface */ static int at91ether_close(struct net_device *dev) { struct macb *lp = netdev_priv(dev); - struct macb_queue *q = &lp->queues[0]; - u32 ctl; - - /* Disable Receiver and Transmitter */ - ctl = macb_readl(lp, NCR); - macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); - - /* Disable MAC interrupts */ - macb_writel(lp, IDR, MACB_BIT(RCOMP) | - MACB_BIT(RXUBR) | - MACB_BIT(ISR_TUND) | - MACB_BIT(ISR_RLE) | - MACB_BIT(TCOMP) | - MACB_BIT(ISR_ROVR) | - MACB_BIT(HRESP)); netif_stop_queue(dev); phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); - dma_free_coherent(&lp->pdev->dev, - AT91ETHER_MAX_RX_DESCR * - macb_dma_desc_get_size(lp), - q->rx_ring, q->rx_ring_dma); - q->rx_ring = NULL; - - dma_free_coherent(&lp->pdev->dev, - AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, - q->rx_buffers, q->rx_buffers_dma); - q->rx_buffers = NULL; + at91ether_stop(lp); return pm_runtime_put(&lp->pdev->dev); } @@ -4386,7 +4428,7 @@ static int macb_probe(struct platform_device *pdev) bp->wol = 0; if (of_get_property(np, "magic-packet", NULL)) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; - device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); spin_lock_init(&bp->lock); @@ -4562,10 +4604,10 @@ static int __maybe_unused macb_suspend(struct device *dev) bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); } - netif_carrier_off(netdev); if (bp->ptp_info) bp->ptp_info->ptp_remove(netdev); - pm_runtime_force_suspend(dev); + if (!device_may_wakeup(dev)) + pm_runtime_force_suspend(dev); return 0; } @@ -4580,7 +4622,8 @@ static int __maybe_unused macb_resume(struct device *dev) if (!netif_running(netdev)) return 0; - pm_runtime_force_resume(dev); + if (!device_may_wakeup(dev)) + pm_runtime_force_resume(dev); if (bp->wol & MACB_WOL_ENABLED) { macb_writel(bp, IDR, MACB_BIT(WOL)); @@ -4618,7 +4661,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(&bp->dev->dev))) { + if (!(device_may_wakeup(dev))) { clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); @@ -4634,7 +4677,7 @@ static int __maybe_unused macb_runtime_resume(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(&bp->dev->dev))) { + if (!(device_may_wakeup(dev))) { clk_prepare_enable(bp->pclk); clk_prepare_enable(bp->hclk); clk_prepare_enable(bp->tx_clk); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 7b9cd69f9844..d8ab8e366818 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1975,7 +1975,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, u8 mem_type[CTXT_INGRESS + 1] = { 0 }; struct cudbg_buffer temp_buff = { 0 }; struct cudbg_ch_cntxt *buff; - u64 *dst_off, *src_off; u8 *ctx_buf; u8 i, k; int rc; @@ -2044,8 +2043,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, } for (j = 0; j < max_ctx_qid; j++) { + __be64 *dst_off; + u64 *src_off; + src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE); - dst_off = (u64 *)buff->data; + dst_off = (__be64 *)buff->data; /* The data is stored in 64-bit cpu order. Convert it * to big endian before parsing. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index d3c654b9989b..80c6627fe981 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -136,6 +136,9 @@ static inline __u8 bitswap_1(unsigned char val) ((val & 0x02) << 5) | ((val & 0x01) << 7); } + +extern const char * const dcb_ver_array[]; + #define CXGB4_DCB_ENABLED true #else /* !CONFIG_CHELSIO_T4_DCB */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 828499256004..b477b8842905 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2379,7 +2379,6 @@ static const struct file_operations rss_vf_config_debugfs_fops = { }; #ifdef CONFIG_CHELSIO_T4_DCB -extern char *dcb_ver_array[]; /* Data Center Briging information for each port. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 9fd496732b2c..f27be1132d37 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -588,7 +588,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, /** * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware * capabilities - * @et_lmm: ethtool Link Mode Mask + * @link_mode_mask: ethtool Link Mode Mask * * Translate ethtool Link Mode Mask into a Firmware Port capabilities * value. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 796555255207..d02d346629b3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -165,6 +165,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, unsigned int tid, bool dip, bool sip, bool dp, bool sp) { + u8 *nat_lp = (u8 *)&f->fs.nat_lport; + u8 *nat_fp = (u8 *)&f->fs.nat_fport; + if (dip) { if (f->fs.type) { set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W, @@ -236,8 +239,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, } set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK, - (dp ? f->fs.nat_lport : 0) | - (sp ? f->fs.nat_fport << 16 : 0), 1); + (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) | + (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0), + 1); } /* Validate filter spec against configuration done on the card. */ @@ -909,6 +913,9 @@ int set_filter_wr(struct adapter *adapter, int fidx) fwr->fpm = htons(f->fs.mask.fport); if (adapter->params.filter2_wr_support) { + u8 *nat_lp = (u8 *)&f->fs.nat_lport; + u8 *nat_fp = (u8 *)&f->fs.nat_fport; + fwr->natmode_to_ulp_type = FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ? ULP_MODE_TCPDDP : @@ -916,8 +923,8 @@ int set_filter_wr(struct adapter *adapter, int fidx) FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode); memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip)); memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip)); - fwr->newlport = htons(f->fs.nat_lport); - fwr->newfport = htons(f->fs.nat_fport); + fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8); + fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8); } /* Mark the filter as "pending" and ship off the Filter Work Request. @@ -1105,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family) struct in_addr *addr; addr = (struct in_addr *)ipmask; - if (addr->s_addr == 0xffffffff) + if (addr->s_addr == htonl(0xffffffff)) return true; } else if (family == AF_INET6) { struct in6_addr *addr6; addr6 = (struct in6_addr *)ipmask; - if (addr6->s6_addr32[0] == 0xffffffff && - addr6->s6_addr32[1] == 0xffffffff && - addr6->s6_addr32[2] == 0xffffffff && - addr6->s6_addr32[3] == 0xffffffff) + if (addr6->s6_addr32[0] == htonl(0xffffffff) && + addr6->s6_addr32[1] == htonl(0xffffffff) && + addr6->s6_addr32[2] == htonl(0xffffffff) && + addr6->s6_addr32[3] == htonl(0xffffffff)) return true; } return false; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 854b1717a70d..0329a6b52087 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -449,7 +449,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) * or -1 * @addr: the new MAC address value * @persist: whether a new MAC allocation should be persistent - * @add_smt: if true also add the address to the HW SMT + * @smt_idx: the destination to store the new SMT index. * * Modifies an MPS filter and sets it to the new MAC address if * @tcam_idx >= 0, or adds the MAC address to a new filter if @@ -1615,6 +1615,7 @@ static int tid_init(struct tid_info *t) * @stid: the server TID * @sip: local IP address to bind server to * @sport: the server's TCP port + * @vlan: the VLAN header information * @queue: queue to direct messages from this server to * * Create an IP server for the given port and address. @@ -2609,7 +2610,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, /* Clear out filter specifications */ memset(&f->fs, 0, sizeof(struct ch_filter_specification)); - f->fs.val.lport = cpu_to_be16(sport); + f->fs.val.lport = be16_to_cpu(sport); f->fs.mask.lport = ~0; val = (u8 *)&sip; if ((val[0] | val[1] | val[2] | val[3]) != 0) { @@ -5377,10 +5378,10 @@ static inline bool is_x_10g_port(const struct link_config *lc) static int cfg_queues(struct adapter *adap) { u32 avail_qsets, avail_eth_qsets, avail_uld_qsets; - u32 i, n10g = 0, qidx = 0, n1g = 0; u32 ncpus = num_online_cpus(); u32 niqflint, neq, num_ulds; struct sge *s = &adap->sge; + u32 i, n10g = 0, qidx = 0; u32 q10g = 0, q1g; /* Reduce memory usage in kdump environment, disable all offload. */ @@ -5426,7 +5427,6 @@ static int cfg_queues(struct adapter *adap) if (n10g) q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; - n1g = adap->params.nports - n10g; #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging support we need to be able to support up * to 8 Traffic Priorities; each of which will be assigned to its @@ -5444,7 +5444,8 @@ static int cfg_queues(struct adapter *adap) else q10g = max(8U, q10g); - while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g)) + while ((q10g * n10g) > + (avail_eth_qsets - (adap->params.nports - n10g) * q1g)) q10g--; #else /* !CONFIG_CHELSIO_T4_DCB */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index f5bc996ac77d..70dbee89118e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c @@ -194,6 +194,7 @@ int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi) } /** + * cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter * @ptp: ptp clock structure * @ppb: Desired frequency change in parts per billion * @@ -229,7 +230,7 @@ static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) /** * cxgb4_ptp_fineadjtime - Shift the time of the hardware clock - * @ptp: ptp clock structure + * @adapter: board private structure * @delta: Desired change in nanoseconds * * Adjust the timer by resetting the timecounter structure. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 4a5fa9eba0b6..59b65d4db086 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -58,10 +58,6 @@ static struct ch_tc_pedit_fields pedits[] = { PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4), PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8), PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), - PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0), - PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0), - PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0), - PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0), }; static struct ch_tc_flower_entry *allocate_flower_entry(void) @@ -156,14 +152,14 @@ static void cxgb4_process_flow_match(struct net_device *dev, struct flow_match_ports match; flow_rule_match_ports(rule, &match); - fs->val.lport = cpu_to_be16(match.key->dst); - fs->mask.lport = cpu_to_be16(match.mask->dst); - fs->val.fport = cpu_to_be16(match.key->src); - fs->mask.fport = cpu_to_be16(match.mask->src); + fs->val.lport = be16_to_cpu(match.key->dst); + fs->mask.lport = be16_to_cpu(match.mask->dst); + fs->val.fport = be16_to_cpu(match.key->src); + fs->mask.fport = be16_to_cpu(match.mask->src); /* also initialize nat_lport/fport to same values */ - fs->nat_lport = cpu_to_be16(match.key->dst); - fs->nat_fport = cpu_to_be16(match.key->src); + fs->nat_lport = fs->val.lport; + fs->nat_fport = fs->val.fport; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { @@ -354,12 +350,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, switch (offset) { case PEDIT_TCP_SPORT_DPORT: if (~mask & PEDIT_TCP_UDP_SPORT_MASK) - offload_pedit(fs, cpu_to_be32(val) >> 16, - cpu_to_be32(mask) >> 16, - TCP_SPORT); + fs->nat_fport = val; else - offload_pedit(fs, cpu_to_be32(val), - cpu_to_be32(mask), TCP_DPORT); + fs->nat_lport = val >> 16; } fs->nat_mode = NAT_MODE_ALL; break; @@ -367,12 +360,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, switch (offset) { case PEDIT_UDP_SPORT_DPORT: if (~mask & PEDIT_TCP_UDP_SPORT_MASK) - offload_pedit(fs, cpu_to_be32(val) >> 16, - cpu_to_be32(mask) >> 16, - UDP_SPORT); + fs->nat_fport = val; else - offload_pedit(fs, cpu_to_be32(val), - cpu_to_be32(mask), UDP_DPORT); + fs->nat_lport = val >> 16; } fs->nat_mode = NAT_MODE_ALL; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 3f3c11e54d97..dede02505ceb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -48,7 +48,7 @@ static int fill_match_fields(struct adapter *adap, bool next_header) { unsigned int i, j; - u32 val, mask; + __be32 val, mask; int off, err; bool found; @@ -228,7 +228,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) const struct cxgb4_next_header *next; bool found = false; unsigned int i, j; - u32 val, mask; + __be32 val, mask; int off; if (t->table[link_uhtid - 1].link_handle) { @@ -242,10 +242,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) /* Try to find matches that allow jumps to next header. */ for (i = 0; next[i].jump; i++) { - if (next[i].offoff != cls->knode.sel->offoff || - next[i].shift != cls->knode.sel->offshift || - next[i].mask != cls->knode.sel->offmask || - next[i].offset != cls->knode.sel->off) + if (next[i].sel.offoff != cls->knode.sel->offoff || + next[i].sel.offshift != cls->knode.sel->offshift || + next[i].sel.offmask != cls->knode.sel->offmask || + next[i].sel.off != cls->knode.sel->off) continue; /* Found a possible candidate. Find a key that @@ -257,9 +257,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) val = cls->knode.sel->keys[j].val; mask = cls->knode.sel->keys[j].mask; - if (next[i].match_off == off && - next[i].match_val == val && - next[i].match_mask == mask) { + if (next[i].key.off == off && + next[i].key.val == val && + next[i].key.mask == mask) { found = true; break; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h index 125868c6770a..f59dd4b2ae6f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h @@ -38,12 +38,12 @@ struct cxgb4_match_field { int off; /* Offset from the beginning of the header to match */ /* Fill the value/mask pair in the spec if matched */ - int (*val)(struct ch_filter_specification *f, u32 val, u32 mask); + int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask); }; /* IPv4 match fields */ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.tos = (ntohl(val) >> 16) & 0x000000FF; f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF; @@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { u32 mask_val; u8 frag_val; @@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.proto = (ntohl(val) >> 16) & 0x000000FF; f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF; @@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[0], &val, sizeof(u32)); memcpy(&f->mask.fip[0], &mask, sizeof(u32)); @@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[0], &val, sizeof(u32)); memcpy(&f->mask.lip[0], &mask, sizeof(u32)); @@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = { /* IPv6 match fields */ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.tos = (ntohl(val) >> 20) & 0x000000FF; f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF; @@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.proto = (ntohl(val) >> 8) & 0x000000FF; f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF; @@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[0], &val, sizeof(u32)); memcpy(&f->mask.fip[0], &mask, sizeof(u32)); @@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[4], &val, sizeof(u32)); memcpy(&f->mask.fip[4], &mask, sizeof(u32)); @@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[8], &val, sizeof(u32)); memcpy(&f->mask.fip[8], &mask, sizeof(u32)); @@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[12], &val, sizeof(u32)); memcpy(&f->mask.fip[12], &mask, sizeof(u32)); @@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[0], &val, sizeof(u32)); memcpy(&f->mask.lip[0], &mask, sizeof(u32)); @@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[4], &val, sizeof(u32)); memcpy(&f->mask.lip[4], &mask, sizeof(u32)); @@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[8], &val, sizeof(u32)); memcpy(&f->mask.lip[8], &mask, sizeof(u32)); @@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[12], &val, sizeof(u32)); memcpy(&f->mask.lip[12], &mask, sizeof(u32)); @@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = { /* TCP/UDP match */ static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.fport = ntohl(val) >> 16; f->mask.fport = ntohl(mask) >> 16; @@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = { }; struct cxgb4_next_header { - unsigned int offset; /* Offset to next header */ - /* offset, shift, and mask added to offset above + /* Offset, shift, and mask added to beginning of the header * to get to next header. Useful when using a header * field's value to jump to next header such as IHL field * in IPv4 header. */ - unsigned int offoff; - u32 shift; - u32 mask; - /* match criteria to make this jump */ - unsigned int match_off; - u32 match_val; - u32 match_mask; + struct tc_u32_sel sel; + struct tc_u32_key key; /* location of jump to make */ const struct cxgb4_match_field *jump; }; @@ -258,26 +252,74 @@ struct cxgb4_next_header { * IPv4 header. */ static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = { - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, - .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00, - .jump = cxgb4_tcp_fields }, - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, - .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00, - .jump = cxgb4_udp_fields }, - { .jump = NULL } + { + /* TCP Jump */ + .sel = { + .off = 0, + .offoff = 0, + .offshift = 6, + .offmask = cpu_to_be16(0x0f00), + }, + .key = { + .off = 8, + .val = cpu_to_be32(0x00060000), + .mask = cpu_to_be32(0x00ff0000), + }, + .jump = cxgb4_tcp_fields, + }, + { + /* UDP Jump */ + .sel = { + .off = 0, + .offoff = 0, + .offshift = 6, + .offmask = cpu_to_be16(0x0f00), + }, + .key = { + .off = 8, + .val = cpu_to_be32(0x00110000), + .mask = cpu_to_be32(0x00ff0000), + }, + .jump = cxgb4_udp_fields, + }, + { .jump = NULL }, }; /* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header * to get to transport layer header. */ static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = { - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, - .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000, - .jump = cxgb4_tcp_fields }, - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, - .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000, - .jump = cxgb4_udp_fields }, - { .jump = NULL } + { + /* TCP Jump */ + .sel = { + .off = 40, + .offoff = 0, + .offshift = 0, + .offmask = 0, + }, + .key = { + .off = 4, + .val = cpu_to_be32(0x00000600), + .mask = cpu_to_be32(0x0000ff00), + }, + .jump = cxgb4_tcp_fields, + }, + { + /* UDP Jump */ + .sel = { + .off = 40, + .offoff = 0, + .offshift = 0, + .offmask = 0, + }, + .key = { + .off = 4, + .val = cpu_to_be32(0x00001100), + .mask = cpu_to_be32(0x0000ff00), + }, + .jump = cxgb4_udp_fields, + }, + { .jump = NULL }, }; struct cxgb4_link { diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 72b37a66c7d8..c4864125fe02 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -503,40 +503,19 @@ u64 cxgb4_select_ntuple(struct net_device *dev, EXPORT_SYMBOL(cxgb4_select_ntuple); /* - * Called when address resolution fails for an L2T entry to handle packets - * on the arpq head. If a packet specifies a failure handler it is invoked, - * otherwise the packet is sent to the device. - */ -static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e) -{ - struct sk_buff *skb; - - while ((skb = __skb_dequeue(&e->arpq)) != NULL) { - const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); - - spin_unlock(&e->lock); - if (cb->arp_err_handler) - cb->arp_err_handler(cb->handle, skb); - else - t4_ofld_send(adap, skb); - spin_lock(&e->lock); - } -} - -/* * Called when the host's neighbor layer makes a change to some entry that is * loaded into the HW L2 table. */ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) { - struct l2t_entry *e; - struct sk_buff_head *arpq = NULL; - struct l2t_data *d = adap->l2t; unsigned int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *) neigh->primary_key; - int ifidx = neigh->dev->ifindex; - int hash = addr_hash(d, addr, addr_len, ifidx); + int hash, ifidx = neigh->dev->ifindex; + struct sk_buff_head *arpq = NULL; + struct l2t_data *d = adap->l2t; + struct l2t_entry *e; + hash = addr_hash(d, addr, addr_len, ifidx); read_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (!addreq(e, addr) && e->ifindex == ifidx) { @@ -569,8 +548,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) write_l2e(adap, e, 0); } - if (arpq) - handle_failed_resolution(adap, e); + if (arpq) { + struct sk_buff *skb; + + /* Called when address resolution fails for an L2T + * entry to handle packets on the arpq head. If a + * packet specifies a failure handler it is invoked, + * otherwise the packet is sent to the device. + */ + while ((skb = __skb_dequeue(&e->arpq)) != NULL) { + const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + + spin_unlock(&e->lock); + if (cb->arp_err_handler) + cb->arp_err_handler(cb->handle, skb); + else + t4_ofld_send(adap, skb); + spin_lock(&e->lock); + } + } spin_unlock_bh(&e->lock); } @@ -613,6 +609,7 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan, } /** + * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters * @dev: net_device pointer * @vlan: VLAN Id * @port: Associated port diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index fde93c50cfec..a1b14468d1ff 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -598,7 +598,7 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, /** * cxgb4_sched_class_free - free a scheduling class * @dev: net_device pointer - * @e: scheduling class + * @classid: scheduling class id to free * * Frees a scheduling class if there are no users. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 1359158652b7..32a45dc51ed7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -302,7 +302,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb) /** * free_tx_desc - reclaims Tx descriptors and their buffers - * @adapter: the adapter + * @adap: the adapter * @q: the Tx queue to reclaim descriptors from * @n: the number of descriptors to reclaim * @unmap: whether the buffers should be unmapped for DMA @@ -722,6 +722,7 @@ static inline unsigned int flits_to_desc(unsigned int n) /** * is_eth_imm - can an Ethernet packet be sent as immediate data? * @skb: the packet + * @chip_ver: chip version * * Returns whether an Ethernet packet is small enough to fit as * immediate data. Return value corresponds to headroom required. @@ -749,6 +750,7 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) /** * calc_tx_flits - calculate the number of flits for a packet Tx WR * @skb: the packet + * @chip_ver: chip version * * Returns the number of flits needed for a Tx WR for the given Ethernet * packet, including the needed WR and CPL headers. @@ -804,6 +806,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb, /** * calc_tx_descs - calculate the number of Tx descriptors for a packet * @skb: the packet + * @chip_ver: chip version * * Returns the number of Tx descriptors needed for the given Ethernet * packet, including the needed WR and CPL headers. @@ -1425,12 +1428,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) qidx = skb_get_queue_mapping(skb); if (ptp_enabled) { - spin_lock(&adap->ptp_lock); if (!(adap->ptp_tx_skb)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; adap->ptp_tx_skb = skb_get(skb); } else { - spin_unlock(&adap->ptp_lock); goto out_free; } q = &adap->sge.ptptxq; @@ -1444,11 +1445,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) #ifdef CONFIG_CHELSIO_T4_FCOE ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl); - if (unlikely(ret == -ENOTSUPP)) { - if (ptp_enabled) - spin_unlock(&adap->ptp_lock); + if (unlikely(ret == -EOPNOTSUPP)) goto out_free; - } #endif /* CONFIG_CHELSIO_T4_FCOE */ chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); @@ -1461,8 +1459,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) dev_err(adap->pdev_dev, "%s: Tx ring %u full while queue awake!\n", dev->name, qidx); - if (ptp_enabled) - spin_unlock(&adap->ptp_lock); return NETDEV_TX_BUSY; } @@ -1481,8 +1477,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); q->mapping_err++; - if (ptp_enabled) - spin_unlock(&adap->ptp_lock); goto out_free; } @@ -1533,8 +1527,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) if (iph->version == 4) { iph->check = 0; iph->tot_len = 0; - iph->check = (u16)(~ip_fast_csum((u8 *)iph, - iph->ihl)); + iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl); } if (skb->ip_summed == CHECKSUM_PARTIAL) cntrl = hwcsum(adap->params.chip, skb); @@ -1630,8 +1623,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) txq_advance(&q->q, ndesc); cxgb4_ring_tx_db(adap, &q->q, ndesc); - if (ptp_enabled) - spin_unlock(&adap->ptp_lock); return NETDEV_TX_OK; out_free: @@ -2377,6 +2368,16 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(qid >= pi->nqsets)) return cxgb4_ethofld_xmit(skb, dev); + if (is_ptp_enabled(skb, dev)) { + struct adapter *adap = netdev2adap(dev); + netdev_tx_t ret; + + spin_lock(&adap->ptp_lock); + ret = cxgb4_eth_xmit(skb, dev); + spin_unlock(&adap->ptp_lock); + return ret; + } + return cxgb4_eth_xmit(skb, dev); } @@ -2410,9 +2411,9 @@ static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) /** * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc. - * @dev - netdevice - * @eotid - ETHOFLD tid to bind/unbind - * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid + * @dev: netdevice + * @eotid: ETHOFLD tid to bind/unbind + * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid * * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class. * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from @@ -2691,7 +2692,6 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) /** * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion - * @adap: the adapter * @q: the queue to stop * * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting @@ -3286,7 +3286,7 @@ enum { /** * t4_systim_to_hwstamp - read hardware time stamp - * @adap: the adapter + * @adapter: the adapter * @skb: the packet * * Read Time Stamp from MPS packet and insert in skb which @@ -3313,15 +3313,16 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter, hwtstamps = skb_hwtstamps(skb); memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data))); + hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); return RX_PTP_PKT_SUC; } /** * t4_rx_hststamp - Recv PTP Event Message - * @adap: the adapter + * @adapter: the adapter * @rsp: the response queue descriptor holding the RX_PKT message + * @rxq: the response queue holding the RX_PKT message * @skb: the packet * * PTP enabled and MPS packet, read HW timestamp @@ -3345,7 +3346,7 @@ static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp, /** * t4_tx_hststamp - Loopback PTP Transmit Event Message - * @adap: the adapter + * @adapter: the adapter * @skb: the packet * @dev: the ingress net device * diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c index 01c65d13fc0e..cbe72ed27b1e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/smt.c +++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c @@ -103,6 +103,7 @@ static void t4_smte_free(struct smt_entry *e) } /** + * cxgb4_smt_release - Release SMT entry * @e: smt entry to release * * Releases ref count and frees up an smt entry from SMT table @@ -231,6 +232,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf, } /** + * cxgb4_smt_alloc_switching - Allocates an SMT entry for switch filters. * @dev: net_device pointer * @smac: MAC address to add to SMT * Returns pointer to the SMT entry created diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 1c8068c02728..ad522f822cc2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3163,7 +3163,7 @@ int t4_get_tp_version(struct adapter *adapter, u32 *vers) /** * t4_get_exprom_version - return the Expansion ROM version (if any) - * @adapter: the adapter + * @adap: the adapter * @vers: where to place the version * * Reads the Expansion ROM header from FLASH and returns the version @@ -3493,7 +3493,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, drv_fw = &fw_info->fw_hdr; /* Read the header of the firmware on the card */ - ret = -t4_read_flash(adap, FLASH_FW_START, + ret = t4_read_flash(adap, FLASH_FW_START, sizeof(*card_fw) / sizeof(uint32_t), (uint32_t *)card_fw, 1); if (ret == 0) { @@ -3522,8 +3522,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, should_install_fs_fw(adap, card_fw_usable, be32_to_cpu(fs_fw->fw_ver), be32_to_cpu(card_fw->fw_ver))) { - ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, - fw_size, 0); + ret = t4_fw_upgrade(adap, adap->mbox, fw_data, + fw_size, 0); if (ret != 0) { dev_err(adap->pdev_dev, "failed to install firmware: %d\n", ret); @@ -3554,7 +3554,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); - ret = EINVAL; + ret = -EINVAL; goto bye; } @@ -5310,7 +5310,7 @@ static unsigned int t4_use_ldst(struct adapter *adap) * @cmd: TP fw ldst address space type * @vals: where the indirect register values are stored/written * @nregs: how many indirect registers to read/write - * @start_idx: index of first indirect register to read/write + * @start_index: index of first indirect register to read/write * @rw: Read (1) or Write (0) * @sleep_ok: if true we may sleep while awaiting command completion * @@ -6115,7 +6115,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) /** * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port - * @adap: the adapter + * @adapter: the adapter * @pidx: the port index * * Computes and returns a bitmap indicating which MPS buffer groups are @@ -6252,7 +6252,7 @@ static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx) /** * t4_get_tp_ch_map - return TP ingress channels associated with a port - * @adapter: the adapter + * @adap: the adapter * @pidx: the port index * * Returns a bitmap indicating which TP Ingress Channels are associated @@ -6589,7 +6589,7 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to write - * @valp: value to write + * @val: value to write * * Issues a FW command through the given mailbox to write a PHY register. */ @@ -6615,7 +6615,7 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, /** * t4_sge_decode_idma_state - decode the idma state - * @adap: the adapter + * @adapter: the adapter * @state: the state idma is stuck in */ void t4_sge_decode_idma_state(struct adapter *adapter, int state) @@ -6782,7 +6782,7 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state) * t4_sge_ctxt_flush - flush the SGE context cache * @adap: the adapter * @mbox: mailbox to use for the FW command - * @ctx_type: Egress or Ingress + * @ctxt_type: Egress or Ingress * * Issues a FW command through the given mailbox to flush the * SGE context cache. @@ -6809,7 +6809,7 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type) /** * t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values - * @adap - the adapter + * @adap: the adapter * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table * @dbqtimers: SGE Doorbell Queue Timer table * @@ -7092,6 +7092,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) /** * t4_fw_restart - restart the firmware by taking the uP out of RESET * @adap: the adapter + * @mbox: mailbox to use for the FW command * @reset: if we want to do a RESET to restart things * * Restart firmware previously halted by t4_fw_halt(). On successful @@ -7630,6 +7631,8 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, * @nmac: number of MAC addresses needed (1 to 5) * @mac: the MAC addresses of the VI * @rss_size: size of RSS table slice associated with this VI + * @vivld: the destination to store the VI Valid value. + * @vin: the destination to store the VIN value. * * Allocates a virtual interface for the given physical port. If @mac is * not %NULL it contains the MAC addresses of the VI as assigned by FW. @@ -7848,7 +7851,7 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support * @adap: the adapter * @viid: the VI id - * @mac: the MAC address + * @addr: the MAC address * @mask: the mask * @vni: the VNI id for the tunnel protocol * @vni_mask: mask for the VNI id @@ -7897,11 +7900,11 @@ int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam * @adap: the adapter * @viid: the VI id - * @mac: the MAC address + * @addr: the MAC address * @mask: the mask * @idx: index at which to add this entry - * @port_id: the port index * @lookup_type: MAC address for inner (1) or outer (0) header + * @port_id: the port index * @sleep_ok: call is allowed to sleep * * Adds the mac entry at the specified index using raw mac interface. @@ -8126,7 +8129,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, * @idx: index of existing filter for old value of MAC address, or -1 * @addr: the new MAC address value * @persist: whether a new MAC allocation should be persistent - * @add_smt: if true also add the address to the HW SMT + * @smt_idx: the destination to store the new SMT index. * * Modifies an exact-match filter and sets it to the new MAC address. * Note that in general it is not possible to modify the value of a given @@ -8448,7 +8451,6 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, /** * t4_link_down_rc_str - return a string for a Link Down Reason Code - * @adap: the adapter * @link_down_rc: Link Down Reason Code * * Returns a string representation of the Link Down Reason Code. @@ -8472,9 +8474,7 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc) return reason[link_down_rc]; } -/** - * Return the highest speed set in the port capabilities, in Mb/s. - */ +/* Return the highest speed set in the port capabilities, in Mb/s. */ static unsigned int fwcap_to_speed(fw_port_cap32_t caps) { #define TEST_SPEED_RETURN(__caps_speed, __speed) \ @@ -9110,7 +9110,6 @@ found: /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter - * @reset: if true perform a HW reset * * Initialize adapter SW state for the various HW modules, set initial * values for some adapter tunables, take PHYs out of reset, and @@ -10395,6 +10394,7 @@ int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode, /** * t4_i2c_rd - read I2C data from adapter * @adap: the adapter + * @mbox: mailbox to use for the FW command * @port: Port number if per-port device; <0 if not * @devid: per-port device ID or absolute device ID * @offset: byte offset into device I2C space @@ -10450,7 +10450,7 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, /** * t4_set_vlan_acl - Set a VLAN id for the specified VF - * @adapter: the adapter + * @adap: the adapter * @mbox: mailbox to use for the FW command * @vf: one of the VFs instantiated by the specified PF * @vlan: The vlanid to be set diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index cec865a97464..a7641be9094f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -260,8 +260,7 @@ static int cxgb4vf_set_addr_hash(struct port_info *pi) * @tcam_idx: TCAM index of existing filter for old value of MAC address, * or -1 * @addr: the new MAC address value - * @persist: whether a new MAC allocation should be persistent - * @add_smt: if true also add the address to the HW SMT + * @persistent: whether a new MAC allocation should be persistent * * Modifies an MPS filter and sets it to the new MAC address if * @tcam_idx >= 0, or adds the MAC address to a new filter if diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index f71c973398ec..8c3d6e11a4bf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1692,7 +1692,7 @@ static inline bool is_new_response(const struct rsp_ctrl *rc, * restore_rx_bufs - put back a packet's RX buffers * @gl: the packet gather list * @fl: the SGE Free List - * @nfrags: how many fragments in @si + * @frags: how many fragments in @si * * Called when we find out that the current packet, @si, can't be * processed right away for some reason. This is a very rare event and @@ -2054,7 +2054,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter) /** * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues - * @data: the adapter + * @t: Rx timer * * Runs periodically from a timer to perform maintenance of SGE RX queues. * @@ -2113,7 +2113,7 @@ static void sge_rx_timer_cb(struct timer_list *t) /** * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues - * @data: the adapter + * @t: Tx timer * * Runs periodically from a timer to perform maintenance of SGE TX queues. * @@ -2405,6 +2405,7 @@ err: * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue * @adapter: the adapter * @txq: pointer to the new txq to be filled in + * @dev: the network device * @devq: the network TX queue associated with the new txq * @iqid: the relative ingress queue ID to which events relating to * the new txq should be directed diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 9d49ff211cc1..a31b87390b50 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -389,9 +389,7 @@ static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) return cc_fec; } -/** - * Return the highest speed set in the port capabilities, in Mb/s. - */ +/* Return the highest speed set in the port capabilities, in Mb/s. */ static unsigned int fwcap_to_speed(fw_port_cap32_t caps) { #define TEST_SPEED_RETURN(__caps_speed, __speed) \ @@ -1467,6 +1465,7 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid, * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, * -1 no change + * @sleep_ok: call is allowed to sleep * * Sets Rx properties of a virtual interface. */ @@ -1906,7 +1905,7 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc) /** * t4vf_handle_get_port_info - process a FW reply message * @pi: the port info - * @rpl: start of the FW message + * @cmd: start of the FW message * * Processes a GET_PORT_INFO FW reply message. */ @@ -2137,8 +2136,6 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) return 0; } -/** - */ int t4vf_prep_adapter(struct adapter *adapter) { int err; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 298c55786fd9..22105d09bc89 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -266,7 +266,7 @@ static irqreturn_t enetc_msix(int irq, void *data) /* disable interrupts */ enetc_wr_reg(v->rbier, 0); - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); napi_schedule_irqoff(&v->napi); @@ -302,7 +302,7 @@ static int enetc_poll(struct napi_struct *napi, int budget) /* enable interrupts */ enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE); - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), ENETC_TBIER_TXTIE); @@ -1595,6 +1595,24 @@ static int enetc_set_psfp(struct net_device *ndev, int en) return 0; } +static void enetc_enable_rxvlan(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_bdr_enable_rxvlan(&priv->si->hw, i, en); +} + +static void enetc_enable_txvlan(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_bdr_enable_txvlan(&priv->si->hw, i, en); +} + int enetc_set_features(struct net_device *ndev, netdev_features_t features) { @@ -1604,6 +1622,14 @@ int enetc_set_features(struct net_device *ndev, if (changed & NETIF_F_RXHASH) enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + enetc_enable_rxvlan(ndev, + !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + + if (changed & NETIF_F_HW_VLAN_CTAG_TX) + enetc_enable_txvlan(ndev, + !!(features & NETIF_F_HW_VLAN_CTAG_TX)); + if (changed & NETIF_F_HW_TC) err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 6314051bc6c1..ce0d321c0639 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -531,22 +531,22 @@ struct enetc_msg_cmd_header { /* Common H/W utility functions */ -static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx, - bool en) +static inline void enetc_bdr_enable_rxvlan(struct enetc_hw *hw, int idx, + bool en) { - u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR); + u32 val = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0); - enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val); + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, val); } -static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx, - bool en) +static inline void enetc_bdr_enable_txvlan(struct enetc_hw *hw, int idx, + bool en) { - u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR); + u32 val = enetc_txbdr_rd(hw, idx, ENETC_TBMR); val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0); - enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val); + enetc_txbdr_wr(hw, idx, ENETC_TBMR, val); } static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 824d211ec00f..4fac57dbb3c8 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -649,14 +649,6 @@ static int enetc_pf_set_features(struct net_device *ndev, netdev_features_t changed = ndev->features ^ features; struct enetc_ndev_priv *priv = netdev_priv(ndev); - if (changed & NETIF_F_HW_VLAN_CTAG_RX) - enetc_enable_rxvlan(&priv->si->hw, 0, - !!(features & NETIF_F_HW_VLAN_CTAG_RX)); - - if (changed & NETIF_F_HW_VLAN_CTAG_TX) - enetc_enable_txvlan(&priv->si->hw, 0, - !!(features & NETIF_F_HW_VLAN_CTAG_TX)); - if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { struct enetc_pf *pf = enetc_si_priv(priv->si); diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index a6cdd5b61921..d8d76da51c5e 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -525,11 +525,6 @@ struct fec_enet_private { unsigned int total_tx_ring_size; unsigned int total_rx_ring_size; - unsigned long work_tx; - unsigned long work_rx; - unsigned long work_ts; - unsigned long work_mdio; - struct platform_device *pdev; int dev_id; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2d0d313ee7c5..3982285ed020 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -75,8 +75,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define DRIVER_NAME "fec" -#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) - /* Pause frame feild and FIFO threshold */ #define FEC_ENET_FCE (1 << 5) #define FEC_ENET_RSEM_V 0x84 @@ -1248,8 +1246,6 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) fep = netdev_priv(ndev); - queue_id = FEC_ENET_GET_QUQUE(queue_id); - txq = fep->tx_queue[queue_id]; /* get next bdp of dirty_tx */ nq = netdev_get_tx_queue(ndev, queue_id); @@ -1340,17 +1336,14 @@ skb_done: writel(0, txq->bd.reg_desc_active); } -static void -fec_enet_tx(struct net_device *ndev) +static void fec_enet_tx(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - u16 queue_id; - /* First process class A queue, then Class B and Best Effort queue */ - for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { - clear_bit(queue_id, &fep->work_tx); - fec_enet_tx_queue(ndev, queue_id); - } - return; + int i; + + /* Make sure that AVB queues are processed first. */ + for (i = fep->num_tx_queues - 1; i >= 0; i--) + fec_enet_tx_queue(ndev, i); } static int @@ -1426,7 +1419,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) #ifdef CONFIG_M532x flush_cache_all(); #endif - queue_id = FEC_ENET_GET_QUQUE(queue_id); rxq = fep->rx_queue[queue_id]; /* First, grab all of the stats for the incoming packet. @@ -1550,6 +1542,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) htons(ETH_P_8021Q), vlan_tag); + skb_record_rx_queue(skb, queue_id); napi_gro_receive(&fep->napi, skb); if (is_copybreak) { @@ -1595,48 +1588,30 @@ rx_processing_done: return pkt_received; } -static int -fec_enet_rx(struct net_device *ndev, int budget) +static int fec_enet_rx(struct net_device *ndev, int budget) { - int pkt_received = 0; - u16 queue_id; struct fec_enet_private *fep = netdev_priv(ndev); + int i, done = 0; - for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { - int ret; - - ret = fec_enet_rx_queue(ndev, - budget - pkt_received, queue_id); + /* Make sure that AVB queues are processed first. */ + for (i = fep->num_rx_queues - 1; i >= 0; i--) + done += fec_enet_rx_queue(ndev, budget - done, i); - if (ret < budget - pkt_received) - clear_bit(queue_id, &fep->work_rx); - - pkt_received += ret; - } - return pkt_received; + return done; } -static bool -fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) +static bool fec_enet_collect_events(struct fec_enet_private *fep) { - if (int_events == 0) - return false; + uint int_events; + + int_events = readl(fep->hwp + FEC_IEVENT); - if (int_events & FEC_ENET_RXF_0) - fep->work_rx |= (1 << 2); - if (int_events & FEC_ENET_RXF_1) - fep->work_rx |= (1 << 0); - if (int_events & FEC_ENET_RXF_2) - fep->work_rx |= (1 << 1); + /* Don't clear MDIO events, we poll for those */ + int_events &= ~FEC_ENET_MII; - if (int_events & FEC_ENET_TXF_0) - fep->work_tx |= (1 << 2); - if (int_events & FEC_ENET_TXF_1) - fep->work_tx |= (1 << 0); - if (int_events & FEC_ENET_TXF_2) - fep->work_tx |= (1 << 1); + writel(int_events, fep->hwp + FEC_IEVENT); - return true; + return int_events != 0; } static irqreturn_t @@ -1644,18 +1619,9 @@ fec_enet_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct fec_enet_private *fep = netdev_priv(ndev); - uint int_events; irqreturn_t ret = IRQ_NONE; - int_events = readl(fep->hwp + FEC_IEVENT); - - /* Don't clear MDIO events, we poll for those */ - int_events &= ~FEC_ENET_MII; - - writel(int_events, fep->hwp + FEC_IEVENT); - fec_enet_collect_events(fep, int_events); - - if ((fep->work_tx || fep->work_rx) && fep->link) { + if (fec_enet_collect_events(fep) && fep->link) { ret = IRQ_HANDLED; if (napi_schedule_prep(&fep->napi)) { @@ -1672,17 +1638,19 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; struct fec_enet_private *fep = netdev_priv(ndev); - int pkts; + int done = 0; - pkts = fec_enet_rx(ndev, budget); - - fec_enet_tx(ndev); + do { + done += fec_enet_rx(ndev, budget - done); + fec_enet_tx(ndev); + } while ((done < budget) && fec_enet_collect_events(fep)); - if (pkts < budget) { - napi_complete_done(napi, pkts); + if (done < budget) { + napi_complete_done(napi, done); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } - return pkts; + + return done; } /* ------------------------------------------------------------------------- */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index c117074c16e3..23f278e46975 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -699,7 +699,7 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, struct net_device *ndev = ring_data->napi.dev; skb->protocol = eth_type_trans(skb, ndev); - (void)napi_gro_receive(&ring_data->napi, skb); + napi_gro_receive(&ring_data->napi, skb); } static int hns_desc_unused(struct hnae_ring *ring) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b14f2abc2425..c38f3bbe7d97 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -4127,9 +4127,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_put_ring_config(priv); - hns3_dbg_uninit(handle); - out_netdev_free: + hns3_dbg_uninit(handle); free_netdev(netdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 6b1545f982aa..2622e04e8eed 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -180,18 +180,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, { struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; unsigned char *packet = skb->data; + u32 len = skb_headlen(skb); u32 i; - for (i = 0; i < skb->len; i++) + len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE); + + for (i = 0; i < len; i++) if (packet[i] != (unsigned char)(i & 0xff)) break; /* The packet is correctly received */ - if (i == skb->len) + if (i == HNS3_NIC_LB_TEST_PACKET_SIZE) tqp_vector->rx_group.total_packets++; else print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, - skb->data, skb->len, true); + skb->data, len, true); dev_kfree_skb_any(skb); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 96bfad52630d..d6bfdc6520df 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9859,7 +9859,7 @@ retry: set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); hdev->reset_type = HNAE3_FLR_RESET; ret = hclge_reset_prepare(hdev); - if (ret) { + if (ret || hdev->reset_pending) { dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", ret); if (hdev->reset_pending || diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 1b9578d0bd80..a10b022d1951 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1793,6 +1793,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to assert VF reset, ret = %d\n", ret); + return ret; + } hdev->rst_stats.vf_func_rst_cnt++; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 0245da02efbb..b735bc537508 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -814,6 +814,8 @@ err_aeqs_init: err_init_msix: err_pfhwdev_alloc: hinic_free_hwif(hwif); + if (err > 0) + err = -EIO; return ERR_PTR(err); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index c33eb1147055..e0f5a81d8620 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -370,48 +370,89 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, MSG_NOT_RESP, timeout); } -/** - * mgmt_recv_msg_handler - handler for message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @recv_msg: received message details - **/ -static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_recv_msg *recv_msg) +static void recv_mgmt_msg_work_handler(struct work_struct *work) { - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - u8 *buf_out = recv_msg->buf_out; + struct hinic_mgmt_msg_handle_work *mgmt_work = + container_of(work, struct hinic_mgmt_msg_handle_work, work); + struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt; + struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; + u8 *buf_out = pf_to_mgmt->mgmt_ack_buf; struct hinic_mgmt_cb *mgmt_cb; unsigned long cb_state; u16 out_size = 0; - if (recv_msg->mod >= HINIC_MOD_MAX) { + memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); + + if (mgmt_work->mod >= HINIC_MOD_MAX) { dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n", - recv_msg->mod); + mgmt_work->mod); + kfree(mgmt_work->msg); + kfree(mgmt_work); return; } - mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod]; + mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod]; cb_state = cmpxchg(&mgmt_cb->state, HINIC_MGMT_CB_ENABLED, HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING); if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb)) - mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd, - recv_msg->msg, recv_msg->msg_len, + mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd, + mgmt_work->msg, mgmt_work->msg_len, buf_out, &out_size); else dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n", - recv_msg->mod, recv_msg->cmd); + mgmt_work->mod, mgmt_work->cmd); mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING; - if (!recv_msg->async_mgmt_to_pf) + if (!mgmt_work->async_mgmt_to_pf) /* MGMT sent sync msg, send the response */ - msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd, + msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd, buf_out, out_size, MGMT_RESP, - recv_msg->msg_id); + mgmt_work->msg_id); + + kfree(mgmt_work->msg); + kfree(mgmt_work); +} + +/** + * mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg) +{ + struct hinic_mgmt_msg_handle_work *mgmt_work = NULL; + struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; + + mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); + if (!mgmt_work) { + dev_err(&pdev->dev, "Allocate mgmt work memory failed\n"); + return; + } + + if (recv_msg->msg_len) { + mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); + if (!mgmt_work->msg) { + dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n"); + kfree(mgmt_work); + return; + } + } + + mgmt_work->pf_to_mgmt = pf_to_mgmt; + mgmt_work->msg_len = recv_msg->msg_len; + memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); + mgmt_work->msg_id = recv_msg->msg_id; + mgmt_work->mod = recv_msg->mod; + mgmt_work->cmd = recv_msg->cmd; + mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; + + INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); + queue_work(pf_to_mgmt->workq, &mgmt_work->work); } /** @@ -546,6 +587,12 @@ static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt) if (!pf_to_mgmt->sync_msg_buf) return -ENOMEM; + pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev, + MAX_PF_MGMT_BUF_SIZE, + GFP_KERNEL); + if (!pf_to_mgmt->mgmt_ack_buf) + return -ENOMEM; + return 0; } @@ -571,6 +618,11 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, return 0; sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt"); + if (!pf_to_mgmt->workq) { + dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n"); + return -ENOMEM; + } pf_to_mgmt->sync_msg_id = 0; err = alloc_msg_buf(pf_to_mgmt); @@ -605,4 +657,5 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt) hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); hinic_api_cmd_free(pf_to_mgmt->cmd_chain); + destroy_workqueue(pf_to_mgmt->workq); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index c2b142c08b0e..a824fbda59db 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -119,6 +119,7 @@ struct hinic_pf_to_mgmt { struct semaphore sync_msg_lock; u16 sync_msg_id; u8 *sync_msg_buf; + void *mgmt_ack_buf; struct hinic_recv_msg recv_resp_msg_from_mgmt; struct hinic_recv_msg recv_msg_from_mgmt; @@ -126,6 +127,21 @@ struct hinic_pf_to_mgmt { struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX]; + + struct workqueue_struct *workq; +}; + +struct hinic_mgmt_msg_handle_work { + struct work_struct work; + struct hinic_pf_to_mgmt *pf_to_mgmt; + + void *msg; + u16 msg_len; + + enum hinic_mod_type mod; + u8 cmd; + u16 msg_id; + int async_mgmt_to_pf; }; void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 96d36ae5049e..c5c732601e35 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1715,7 +1715,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev->min_mtu = IBMVETH_MIN_MTU; - netdev->max_mtu = ETH_MAX_MTU; + netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 2baf7b3ff4cb..0fd7eae25fe9 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1971,13 +1971,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, release_sub_crqs(adapter, 1); } else { rc = ibmvnic_reset_crq(adapter); - if (!rc) + if (rc == H_CLOSED || rc == H_SUCCESS) { rc = vio_enable_interrupts(adapter->vdev); + if (rc) + netdev_err(adapter->netdev, + "Reset failed to enable interrupts. rc=%d\n", + rc); + } } if (rc) { netdev_err(adapter->netdev, - "Couldn't initialize crq. rc=%d\n", rc); + "Reset couldn't initialize crq. rc=%d\n", rc); goto out; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index aa8026b1eb81..67806b7b2f49 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2072,6 +2072,9 @@ static int i40e_set_ringparam(struct net_device *netdev, err = i40e_setup_rx_descriptors(&rx_rings[i]); if (err) goto rx_unwind; + err = i40e_alloc_rx_bi(&rx_rings[i]); + if (err) + goto rx_unwind; /* now allocate the Rx buffers to make sure the OS * has enough memory, any failure here means abort diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 5d807c8004f8..56ecd6c3f236 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -439,11 +439,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, i40e_get_netdev_stats_struct_tx(ring, stats); if (i40e_enabled_xdp_vsi(vsi)) { - ring++; + ring = READ_ONCE(vsi->xdp_rings[i]); + if (!ring) + continue; i40e_get_netdev_stats_struct_tx(ring, stats); } - ring++; + ring = READ_ONCE(vsi->rx_rings[i]); + if (!ring) + continue; do { start = u64_stats_fetch_begin_irq(&ring->syncp); packets = ring->stats.packets; @@ -787,6 +791,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ p = READ_ONCE(vsi->tx_rings[q]); + if (!p) + continue; do { start = u64_stats_fetch_begin_irq(&p->syncp); @@ -800,8 +806,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; - /* Rx queue is part of the same block as Tx queue */ - p = &p[1]; + /* locate Rx ring */ + p = READ_ONCE(vsi->rx_rings[q]); + if (!p) + continue; + do { start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; @@ -10824,10 +10833,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) if (vsi->tx_rings && vsi->tx_rings[0]) { for (i = 0; i < vsi->alloc_queue_pairs; i++) { kfree_rcu(vsi->tx_rings[i], rcu); - vsi->tx_rings[i] = NULL; - vsi->rx_rings[i] = NULL; + WRITE_ONCE(vsi->tx_rings[i], NULL); + WRITE_ONCE(vsi->rx_rings[i], NULL); if (vsi->xdp_rings) - vsi->xdp_rings[i] = NULL; + WRITE_ONCE(vsi->xdp_rings[i], NULL); } } } @@ -10861,7 +10870,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->itr_setting = pf->tx_itr_default; - vsi->tx_rings[i] = ring++; + WRITE_ONCE(vsi->tx_rings[i], ring++); if (!i40e_enabled_xdp_vsi(vsi)) goto setup_rx; @@ -10879,7 +10888,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->itr_setting = pf->tx_itr_default; - vsi->xdp_rings[i] = ring++; + WRITE_ONCE(vsi->xdp_rings[i], ring++); setup_rx: ring->queue_index = i; @@ -10892,7 +10901,7 @@ setup_rx: ring->size = 0; ring->dcb_tc = 0; ring->itr_setting = pf->rx_itr_default; - vsi->rx_rings[i] = ring; + WRITE_ONCE(vsi->rx_rings[i], ring); } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 28b46cc9f5cb..2e3a39cea2c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1194,7 +1194,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) for (i = 0; i < vsi->alloc_txq; i++) { if (vsi->tx_rings[i]) { kfree_rcu(vsi->tx_rings[i], rcu); - vsi->tx_rings[i] = NULL; + WRITE_ONCE(vsi->tx_rings[i], NULL); } } } @@ -1202,7 +1202,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) for (i = 0; i < vsi->alloc_rxq; i++) { if (vsi->rx_rings[i]) { kfree_rcu(vsi->rx_rings[i], rcu); - vsi->rx_rings[i] = NULL; + WRITE_ONCE(vsi->rx_rings[i], NULL); } } } @@ -1235,7 +1235,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->vsi = vsi; ring->dev = dev; ring->count = vsi->num_tx_desc; - vsi->tx_rings[i] = ring; + WRITE_ONCE(vsi->tx_rings[i], ring); } /* Allocate Rx rings */ @@ -1254,7 +1254,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->netdev = vsi->netdev; ring->dev = dev; ring->count = vsi->num_rx_desc; - vsi->rx_rings[i] = ring; + WRITE_ONCE(vsi->rx_rings[i], ring); } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 082825e3cb39..4cbd49c87568 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1702,7 +1702,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) xdp_ring->netdev = NULL; xdp_ring->dev = dev; xdp_ring->count = vsi->num_tx_desc; - vsi->xdp_rings[i] = xdp_ring; + WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); if (ice_setup_tx_ring(xdp_ring)) goto free_xdp_rings; ice_set_ring_xdp(xdp_ring); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index fd9f5d41b594..2e35c5706cf1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -921,7 +921,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = txr_idx; /* assign ring to adapter */ - adapter->tx_ring[txr_idx] = ring; + WRITE_ONCE(adapter->tx_ring[txr_idx], ring); /* update count and index */ txr_count--; @@ -948,7 +948,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, set_ring_xdp(ring); /* assign ring to adapter */ - adapter->xdp_ring[xdp_idx] = ring; + WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); /* update count and index */ xdp_count--; @@ -991,7 +991,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = rxr_idx; /* assign ring to adapter */ - adapter->rx_ring[rxr_idx] = ring; + WRITE_ONCE(adapter->rx_ring[rxr_idx], ring); /* update count and index */ rxr_count--; @@ -1020,13 +1020,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) ixgbe_for_each_ring(ring, q_vector->tx) { if (ring_is_xdp(ring)) - adapter->xdp_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL); else - adapter->tx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL); } ixgbe_for_each_ring(ring, q_vector->rx) - adapter->rx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL); adapter->q_vector[v_idx] = NULL; napi_hash_del(&q_vector->napi); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f162b8b8f345..97a423ecf808 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7051,7 +7051,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) } for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); + + if (!rx_ring) + continue; non_eop_descs += rx_ring->rx_stats.non_eop_descs; alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; @@ -7072,15 +7075,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) packets = 0; /* gather some stats to the adapter struct that are per queue */ for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); + + if (!tx_ring) + continue; restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; bytes += tx_ring->stats.bytes; packets += tx_ring->stats.packets; } for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; + struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); + if (!xdp_ring) + continue; restart_queue += xdp_ring->tx_stats.restart_queue; tx_busy += xdp_ring->tx_stats.tx_busy; bytes += xdp_ring->stats.bytes; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 946925bbcb2d..7d5d9d34f4e4 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -106,9 +106,11 @@ #define MVNETA_TX_IN_PRGRS BIT(1) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c +/* Only exists on Armada XP and Armada 370 */ #define MVNETA_SERDES_CFG 0x24A0 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 #define MVNETA_QSGMII_SERDES_PROTO 0x0667 +#define MVNETA_HSGMII_SERDES_PROTO 0x1107 #define MVNETA_TYPE_PRIO 0x24bc #define MVNETA_FORCE_UNI BIT(21) #define MVNETA_TXQ_CMD_1 0x24e4 @@ -3529,26 +3531,60 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) return 0; } -static int mvneta_comphy_init(struct mvneta_port *pp) +static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) { int ret; - if (!pp->comphy) - return 0; - - ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, - pp->phy_interface); + ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); if (ret) return ret; return phy_power_on(pp->comphy); } +static int mvneta_config_interface(struct mvneta_port *pp, + phy_interface_t interface) +{ + int ret = 0; + + if (pp->comphy) { + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) { + ret = mvneta_comphy_init(pp, interface); + } + } else { + switch (interface) { + case PHY_INTERFACE_MODE_QSGMII: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_QSGMII_SERDES_PROTO); + break; + + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_SGMII_SERDES_PROTO); + break; + + case PHY_INTERFACE_MODE_2500BASEX: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_HSGMII_SERDES_PROTO); + break; + default: + break; + } + } + + pp->phy_interface = interface; + + return ret; +} + static void mvneta_start_dev(struct mvneta_port *pp) { int cpu; - WARN_ON(mvneta_comphy_init(pp)); + WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -3923,17 +3959,13 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, /* When at 2.5G, the link partner can send frames with shortened * preambles. */ - if (state->speed == SPEED_2500) + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; - if (pp->comphy && pp->phy_interface != state->interface && - (state->interface == PHY_INTERFACE_MODE_SGMII || - state->interface == PHY_INTERFACE_MODE_1000BASEX || - state->interface == PHY_INTERFACE_MODE_2500BASEX)) { - pp->phy_interface = state->interface; - - WARN_ON(phy_power_off(pp->comphy)); - WARN_ON(mvneta_comphy_init(pp)); + if (pp->phy_interface != state->interface) { + if (pp->comphy) + WARN_ON(phy_power_off(pp->comphy)); + WARN_ON(mvneta_config_interface(pp, state->interface)); } if (new_ctrl0 != gmac_ctrl0) @@ -4982,12 +5014,10 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) /* MAC Cause register should be cleared */ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); - if (phy_mode == PHY_INTERFACE_MODE_QSGMII) - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); - else if (phy_mode == PHY_INTERFACE_MODE_SGMII || - phy_interface_mode_is_8023z(phy_mode)) - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); - else if (!phy_interface_mode_is_rgmii(phy_mode)) + if (phy_mode != PHY_INTERFACE_MODE_QSGMII && + phy_mode != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(phy_mode) && + !phy_interface_mode_is_rgmii(phy_mode)) return -EINVAL; return 0; @@ -5176,10 +5206,10 @@ static int mvneta_probe(struct platform_device *pdev) if (err < 0) goto err_netdev; - err = mvneta_port_power_up(pp, phy_mode); + err = mvneta_port_power_up(pp, pp->phy_interface); if (err < 0) { dev_err(&pdev->dev, "can't power up port\n"); - goto err_netdev; + return err; } /* Armada3700 network controller does not support per-cpu diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 241f00716979..fe54764caea9 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -203,7 +203,7 @@ io_error: static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) { - u16 v; + u16 v = 0; __gm_phy_read(hw, port, reg, &v); return v; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h index 7be6b2d36b60..9976de8b9047 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h @@ -29,6 +29,7 @@ struct mlx5e_dcbx { bool manual_buffer; u32 cable_len; u32 xoff; + u16 port_buff_cell_sz; }; #define MLX5E_MAX_DSCP (64) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 2a8950b3056f..3cf3e35053f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_400GAUI_8] = 400000, }; +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev) +{ + struct mlx5e_port_eth_proto eproto; + int err; + + if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet)) + return true; + + err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto); + if (err) + return false; + + return !!eproto.cap; +} + static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, const u32 **arr, u32 *size, bool force_legacy) { - bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev); *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : ARRAY_SIZE(mlx5e_link_speed); @@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) bool ext; int err; - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = mlx5e_ptys_ext_supported(mdev); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) goto out; @@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) int err; int i; - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = mlx5e_ptys_ext_supported(mdev); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index a2ddd446dd59..7a7defe60792 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, bool force_legacy); - +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev); int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index ae99fac08b53..673f1c82d381 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -34,6 +34,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, struct mlx5e_port_buffer *port_buffer) { + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5_core_dev *mdev = priv->mdev; int sz = MLX5_ST_SZ_BYTES(pbmc_reg); u32 total_used = 0; @@ -57,11 +58,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, port_buffer->buffer[i].epsb = MLX5_GET(bufferx_reg, buffer, epsb); port_buffer->buffer[i].size = - MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz; port_buffer->buffer[i].xon = - MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz; port_buffer->buffer[i].xoff = - MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz; total_used += port_buffer->buffer[i].size; mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i, @@ -73,7 +74,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, } port_buffer->port_buffer_size = - MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz; port_buffer->spare_buffer_size = port_buffer->port_buffer_size - total_used; @@ -88,9 +89,9 @@ out: static int port_set_buffer(struct mlx5e_priv *priv, struct mlx5e_port_buffer *port_buffer) { + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5_core_dev *mdev = priv->mdev; int sz = MLX5_ST_SZ_BYTES(pbmc_reg); - void *buffer; void *in; int err; int i; @@ -104,16 +105,18 @@ static int port_set_buffer(struct mlx5e_priv *priv, goto out; for (i = 0; i < MLX5E_MAX_BUFFER; i++) { - buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); - - MLX5_SET(bufferx_reg, buffer, size, - port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT); - MLX5_SET(bufferx_reg, buffer, lossy, - port_buffer->buffer[i].lossy); - MLX5_SET(bufferx_reg, buffer, xoff_threshold, - port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT); - MLX5_SET(bufferx_reg, buffer, xon_threshold, - port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT); + void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); + u64 size = port_buffer->buffer[i].size; + u64 xoff = port_buffer->buffer[i].xoff; + u64 xon = port_buffer->buffer[i].xon; + + do_div(size, port_buff_cell_sz); + do_div(xoff, port_buff_cell_sz); + do_div(xon, port_buff_cell_sz); + MLX5_SET(bufferx_reg, buffer, size, size); + MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy); + MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff); + MLX5_SET(bufferx_reg, buffer, xon_threshold, xon); } err = mlx5e_port_set_pbmc(mdev, in); @@ -143,7 +146,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) } static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, - u32 xoff, unsigned int max_mtu) + u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz) { int i; @@ -155,7 +158,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, } if (port_buffer->buffer[i].size < - (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) { + (xoff + max_mtu + port_buff_cell_sz)) { pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n", i, port_buffer->buffer[i].size); return -ENOMEM; @@ -175,6 +178,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, * @pfc_en: <input> current pfc configuration * @buffer: <input> current prio to buffer mapping * @xoff: <input> xoff value + * @port_buff_cell_sz: <input> port buffer cell_size * @port_buffer: <output> port receive buffer configuration * @change: <output> * @@ -189,7 +193,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, * sets change to true if buffer configuration was modified. */ static int update_buffer_lossy(unsigned int max_mtu, - u8 pfc_en, u8 *buffer, u32 xoff, + u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz, struct mlx5e_port_buffer *port_buffer, bool *change) { @@ -225,7 +229,7 @@ static int update_buffer_lossy(unsigned int max_mtu, } if (changed) { - err = update_xoff_threshold(port_buffer, xoff, max_mtu); + err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; @@ -262,6 +266,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, u32 *buffer_size, u8 *prio2buffer) { + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5e_port_buffer port_buffer; u32 xoff = calculate_xoff(priv, mtu); bool update_prio2buffer = false; @@ -282,7 +287,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; } @@ -292,7 +297,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, + err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz, &port_buffer, &update_buffer); if (err) return err; @@ -304,7 +309,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz, xoff, &port_buffer, &update_buffer); if (err) return err; @@ -329,7 +334,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, return -EINVAL; update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; } @@ -337,7 +342,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, /* Need to update buffer configuration if xoff value is changed */ if (!update_buffer && xoff != priv->dcbx.xoff) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h index 34f55b81a0de..80af7a5ac604 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h @@ -36,7 +36,6 @@ #include "port.h" #define MLX5E_MAX_BUFFER 8 -#define MLX5E_BUFFER_CELL_SHIFT 7 #define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */ #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c index baa162432e75..c3d167fa944c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c @@ -6,7 +6,6 @@ #include <linux/rculist.h> #include <linux/rtnetlink.h> #include <linux/workqueue.h> -#include <linux/rwlock.h> #include <linux/spinlock.h> #include <linux/notifier.h> #include <net/netevent.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 80713123de5c..eefeb1cdc2ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -407,7 +407,9 @@ static int mlx5e_rep_indr_setup_block(struct net_device *netdev, struct mlx5e_rep_priv *rpriv, struct flow_block_offload *f, - flow_setup_cb_t *setup_cb) + flow_setup_cb_t *setup_cb, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); struct mlx5e_rep_indr_block_priv *indr_priv; @@ -438,8 +440,10 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, list_add(&indr_priv->list, &rpriv->uplink_priv.tc_indr_block_priv_list); - block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv, - mlx5e_rep_indr_block_unbind); + block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, + mlx5e_rep_indr_block_unbind, + f, netdev, data, rpriv, + cleanup); if (IS_ERR(block_cb)) { list_del(&indr_priv->list); kfree(indr_priv); @@ -458,7 +462,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, if (!block_cb) return -ENOENT; - flow_block_cb_remove(block_cb, f); + flow_indr_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); return 0; default: @@ -469,15 +473,19 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, static int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv, - enum tc_setup_type type, void *type_data) + enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { switch (type) { case TC_SETUP_BLOCK: return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, - mlx5e_rep_indr_setup_tc_cb); + mlx5e_rep_indr_setup_tc_cb, + data, cleanup); case TC_SETUP_FT: return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, - mlx5e_rep_indr_setup_ft_cb); + mlx5e_rep_indr_setup_ft_cb, + data, cleanup); default: return -EOPNOTSUPP; } @@ -496,7 +504,7 @@ int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv) void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) { flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv, - mlx5e_rep_indr_setup_tc_cb); + mlx5e_rep_indr_block_unbind); } #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 430025550fad..aad1c29b23db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1097,6 +1097,7 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) struct mlx5_ct_entry *entry = ptr; mlx5_tc_ct_entry_del_rules(ct_priv, entry); + kfree(entry); } static void diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index bc102d094bbd..d20243d6a032 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1217,6 +1217,24 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) return 0; } +#define MLX5E_BUFFER_CELL_SHIFT 7 + +static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {}; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return (1 << MLX5E_BUFFER_CELL_SHIFT); + + if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), + MLX5_REG_SBCAM, 0, 0)) + return (1 << MLX5E_BUFFER_CELL_SHIFT); + + return MLX5_GET(sbcam_reg, out, cap_cell_size); +} + void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) { struct mlx5e_dcbx *dcbx = &priv->dcbx; @@ -1234,6 +1252,7 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) priv->dcbx.cap |= DCB_CAP_DCBX_HOST; + priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv); priv->dcbx.manual_buffer = false; priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index ec5658bbe3c5..c2464c349117 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, struct ptys2ethtool_config **arr, u32 *size) { - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = mlx5e_ptys_ext_supported(mdev); *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : @@ -883,7 +883,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, struct ethtool_link_ksettings *link_ksettings) { unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = mlx5e_ptys_ext_supported(mdev); ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); } @@ -913,7 +913,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, __func__, err); goto err_query_regs; } - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_capability); eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, @@ -1066,7 +1066,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, autoneg = link_ksettings->base.autoneg; speed = link_ksettings->base.speed; - ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext_supported = mlx5e_ptys_ext_supported(mdev); ext = ext_requested(autoneg, adver, ext_supported); if (!ext_supported && ext) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a836a02a2116..081f15074cac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3104,9 +3104,6 @@ int mlx5e_open(struct net_device *netdev) mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); - if (mlx5_vxlan_allowed(priv->mdev->vxlan)) - udp_tunnel_get_rx_info(netdev); - return err; } @@ -5121,6 +5118,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) if (err) goto err_destroy_flow_steering; +#ifdef CONFIG_MLX5_EN_ARFS + priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev); +#endif + return 0; err_destroy_flow_steering: @@ -5202,6 +5203,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) rtnl_lock(); if (netif_running(netdev)) mlx5e_open(netdev); + if (mlx5_vxlan_allowed(priv->mdev->vxlan)) + udp_tunnel_get_rx_info(netdev); netif_device_attach(netdev); rtnl_unlock(); } @@ -5216,6 +5219,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) rtnl_lock(); if (netif_running(priv->netdev)) mlx5e_close(priv->netdev); + if (mlx5_vxlan_allowed(priv->mdev->vxlan)) + udp_tunnel_drop_rx_info(priv->netdev); netif_device_detach(priv->netdev); rtnl_unlock(); @@ -5288,10 +5293,6 @@ int mlx5e_netdev_init(struct net_device *netdev, /* netdev init */ netif_carrier_off(netdev); -#ifdef CONFIG_MLX5_EN_ARFS - netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev); -#endif - return 0; err_free_cpumask: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7fc84f58e28a..cc8412151ca0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -4670,9 +4670,10 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev, struct mlx5e_rep_priv *rpriv) { /* Offloaded flow rule is allowed to duplicate on non-uplink representor - * sharing tc block with other slaves of a lag device. + * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this + * function is called from NIC mode. */ - return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK; + return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK; } int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, @@ -4686,13 +4687,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, rcu_read_lock(); flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); - rcu_read_unlock(); if (flow) { /* Same flow rule offloaded to non-uplink representor sharing tc block, * just return 0. */ if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev) - goto out; + goto rcu_unlock; NL_SET_ERR_MSG_MOD(extack, "flow cookie already exists, ignoring"); @@ -4700,8 +4700,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, "flow cookie %lx already exists, ignoring\n", f->cookie); err = -EEXIST; - goto out; + goto rcu_unlock; } +rcu_unlock: + rcu_read_unlock(); + if (flow) + goto out; trace_mlx5e_configure_flower(f); err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index 5dc335e621c5..b68976b378b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -217,7 +217,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, } /* Create ingress allow rule */ - memset(spec, 0, sizeof(*spec)); spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 9f829e68fc73..e4186e84b3ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) return 0; } -static int mlx5_eeprom_page(int offset) +static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num, + u8 *module_id) +{ + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; + u32 out[MLX5_ST_SZ_DW(mcia_reg)]; + int err, status; + u8 *ptr; + + MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW); + MLX5_SET(mcia_reg, in, module, module_num); + MLX5_SET(mcia_reg, in, device_address, 0); + MLX5_SET(mcia_reg, in, page_number, 0); + MLX5_SET(mcia_reg, in, size, 1); + MLX5_SET(mcia_reg, in, l, 0); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MCIA, 0, 0); + if (err) + return err; + + status = MLX5_GET(mcia_reg, out, status); + if (status) { + mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + + *module_id = ptr[0]; + + return 0; +} + +static int mlx5_qsfp_eeprom_page(u16 offset) { if (offset < MLX5_EEPROM_PAGE_LENGTH) /* Addresses between 0-255 - page 00 */ @@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset) MLX5_EEPROM_HIGH_PAGE_LENGTH); } -static int mlx5_eeprom_high_page_offset(int page_num) +static int mlx5_qsfp_eeprom_high_page_offset(int page_num) { if (!page_num) /* Page 0 always start from low page */ return 0; @@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num) return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH; } +static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = MLX5_I2C_ADDR_LOW; + *page_num = mlx5_qsfp_eeprom_page(*offset); + *offset -= mlx5_qsfp_eeprom_high_page_offset(*page_num); +} + +static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = MLX5_I2C_ADDR_LOW; + *page_num = 0; + + if (*offset < MLX5_EEPROM_PAGE_LENGTH) + return; + + *i2c_addr = MLX5_I2C_ADDR_HIGH; + *offset -= MLX5_EEPROM_PAGE_LENGTH; +} + int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, u16 offset, u16 size, u8 *data) { - int module_num, page_num, status, err; + int module_num, status, err, page_num = 0; + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; u32 out[MLX5_ST_SZ_DW(mcia_reg)]; - u32 in[MLX5_ST_SZ_DW(mcia_reg)]; - u16 i2c_addr; - void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + u16 i2c_addr = 0; + u8 module_id; + void *ptr; err = mlx5_query_module_num(dev, &module_num); if (err) return err; - memset(in, 0, sizeof(in)); - size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); - - /* Get the page number related to the given offset */ - page_num = mlx5_eeprom_page(offset); + err = mlx5_query_module_id(dev, module_num, &module_id); + if (err) + return err; - /* Set the right offset according to the page number, - * For page_num > 0, relative offset is always >= 128 (high page). - */ - offset -= mlx5_eeprom_high_page_offset(page_num); + switch (module_id) { + case MLX5_MODULE_ID_SFP: + mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + break; + case MLX5_MODULE_ID_QSFP: + case MLX5_MODULE_ID_QSFP_PLUS: + case MLX5_MODULE_ID_QSFP28: + mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + break; + default: + mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; + } if (offset + size > MLX5_EEPROM_PAGE_LENGTH) /* Cross pages read, read until offset 256 in low page */ size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; - i2c_addr = MLX5_I2C_ADDR_LOW; + size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, module, module_num); @@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, return -EIO; } + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); memcpy(data, ptr, size); return size; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index fd0e97de44e7..c04ec1a92826 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1414,23 +1414,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, u16 num_pages; int err; - mutex_init(&mlxsw_pci->cmd.lock); - init_waitqueue_head(&mlxsw_pci->cmd.wait); - mlxsw_pci->core = mlxsw_core; mbox = mlxsw_cmd_mbox_alloc(); if (!mbox) return -ENOMEM; - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); - if (err) - goto mbox_put; - - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); - if (err) - goto err_out_mbox_alloc; - err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); if (err) goto err_sw_reset; @@ -1537,9 +1526,6 @@ err_query_fw: mlxsw_pci_free_irq_vectors(mlxsw_pci); err_alloc_irq: err_sw_reset: - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); -err_out_mbox_alloc: - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); mbox_put: mlxsw_cmd_mbox_free(mbox); return err; @@ -1553,8 +1539,6 @@ static void mlxsw_pci_fini(void *bus_priv) mlxsw_pci_aqs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); mlxsw_pci_free_irq_vectors(mlxsw_pci); - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); } static struct mlxsw_pci_queue * @@ -1776,6 +1760,37 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, }; +static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci) +{ + int err; + + mutex_init(&mlxsw_pci->cmd.lock); + init_waitqueue_head(&mlxsw_pci->cmd.wait); + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); + if (err) + goto err_in_mbox_alloc; + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + if (err) + goto err_out_mbox_alloc; + + return 0; + +err_out_mbox_alloc: + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); +err_in_mbox_alloc: + mutex_destroy(&mlxsw_pci->cmd.lock); + return err; +} + +static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci) +{ + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); + mutex_destroy(&mlxsw_pci->cmd.lock); +} + static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const char *driver_name = pdev->driver->name; @@ -1831,6 +1846,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->pdev = pdev; pci_set_drvdata(pdev, mlxsw_pci); + err = mlxsw_pci_cmd_init(mlxsw_pci); + if (err) + goto err_pci_cmd_init; + mlxsw_pci->bus_info.device_kind = driver_name; mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; @@ -1848,6 +1867,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_bus_device_register: + mlxsw_pci_cmd_fini(mlxsw_pci); +err_pci_cmd_init: iounmap(mlxsw_pci->hw_addr); err_ioremap: err_pci_resource_len_check: @@ -1865,6 +1886,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); + mlxsw_pci_cmd_fini(mlxsw_pci); iounmap(mlxsw_pci->hw_addr); pci_release_regions(mlxsw_pci->pdev); pci_disable_device(mlxsw_pci->pdev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 55af877763ed..029ea344ad65 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -978,10 +978,10 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, lossy = !(pfc || pause_en); thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); - mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &thres_cells); + thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells); delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, pause_en); - mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &delay_cells); + delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells); total_cells = thres_cells + delay_cells; taken_headroom_cells += total_cells; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 6e87457dd635..3abe3e7d89bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -374,17 +374,15 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port, return NULL; } -static inline void +static inline u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port, - u16 *p_size) + u32 size_cells) { /* Ports with eight lanes use two headroom buffers between which the * configured headroom size is split. Therefore, multiply the calculated * headroom size by two. */ - if (mlxsw_sp_port->mapping.width != 8) - return; - *p_size *= 2; + return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells; } enum mlxsw_sp_flood_type { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index f25a8b084b4b..6f84557a5a6f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -312,7 +312,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) if (i == MLXSW_SP_PB_UNUSED) continue; - mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &size); + size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size); mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size); } mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 770de0222e7b..019ed503aadf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6262,7 +6262,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, } fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); - if (WARN_ON(!fib_work)) + if (!fib_work) return NOTIFY_BAD; fib_work->mlxsw_sp = router->mlxsw_sp; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index f843545d3478..92351a79addc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -782,7 +782,7 @@ mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) speed = 0; buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu); - mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, (u16 *) &buffsize); + buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize); mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); } diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h index 628fa9b2f741..373165119850 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h @@ -297,7 +297,7 @@ struct vxge_hw_fifo_config { * @greedy_return: If Set it forces the device to return absolutely all RxD * that are consumed and still on board when a timer interrupt * triggers. If Clear, then if the device has already returned - * RxD before current timer interrupt trigerred and after the + * RxD before current timer interrupt triggered and after the * previous timer interrupt triggered, then the device is not * forced to returned the rest of the consumed RxD that it has * on board which account for a byte count less than the one diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index c39327677a7d..bb448c82cdc2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -861,7 +861,7 @@ static void nfp_flower_clean(struct nfp_app *app) flush_work(&app_priv->cmsg_work); flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app, - nfp_flower_setup_indr_block_cb); + nfp_flower_setup_indr_tc_release); if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) nfp_flower_qos_cleanup(app); diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 6c3dc3baf387..7f54a620acad 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -459,9 +459,10 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow); void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb); int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, - enum tc_setup_type type, void *type_data); -int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv); + enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)); +void nfp_flower_setup_indr_tc_release(void *cb_priv); void __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 695d24b9dd92..d7340dc09b4c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1619,8 +1619,8 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, return NULL; } -int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, - void *type_data, void *cb_priv) +static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) { struct nfp_flower_indr_block_cb_priv *priv = cb_priv; struct flow_cls_offload *flower = type_data; @@ -1637,7 +1637,7 @@ int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, } } -static void nfp_flower_setup_indr_tc_release(void *cb_priv) +void nfp_flower_setup_indr_tc_release(void *cb_priv) { struct nfp_flower_indr_block_cb_priv *priv = cb_priv; @@ -1647,7 +1647,8 @@ static void nfp_flower_setup_indr_tc_release(void *cb_priv) static int nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, - struct flow_block_offload *f) + struct flow_block_offload *f, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { struct nfp_flower_indr_block_cb_priv *cb_priv; struct nfp_flower_priv *priv = app->priv; @@ -1676,9 +1677,10 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, cb_priv->app = app; list_add(&cb_priv->list, &priv->indr_block_cb_priv); - block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb, - cb_priv, cb_priv, - nfp_flower_setup_indr_tc_release); + block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb, + cb_priv, cb_priv, + nfp_flower_setup_indr_tc_release, + f, netdev, data, app, cleanup); if (IS_ERR(block_cb)) { list_del(&cb_priv->list); kfree(cb_priv); @@ -1699,7 +1701,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, if (!block_cb) return -ENOENT; - flow_block_cb_remove(block_cb, f); + flow_indr_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); return 0; default: @@ -1710,7 +1712,9 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, - enum tc_setup_type type, void *type_data) + enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { if (!nfp_fl_is_netdev_to_offload(netdev)) return -EOPNOTSUPP; @@ -1718,7 +1722,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, switch (type) { case TC_SETUP_BLOCK: return nfp_flower_setup_indr_tc_block(netdev, cb_priv, - type_data); + type_data, data, cleanup); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 32b9d7705404..55cef5b16aa5 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -147,7 +147,7 @@ struct pch_gbe_regs { #define PCH_GBE_RH_ALM_FULL_8 0x00001000 /* 8 words */ #define PCH_GBE_RH_ALM_FULL_16 0x00002000 /* 16 words */ #define PCH_GBE_RH_ALM_FULL_32 0x00003000 /* 32 words */ -/* RX FIFO Read Triger Threshold */ +/* RX FIFO Read Trigger Threshold */ #define PCH_GBE_RH_RD_TRG_4 0x00000000 /* 4 words */ #define PCH_GBE_RH_RD_TRG_8 0x00000200 /* 8 words */ #define PCH_GBE_RH_RD_TRG_16 0x00000400 /* 16 words */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index f7e3ce3de04d..e03ea9b18f95 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -468,12 +468,18 @@ static void ionic_get_ringparam(struct net_device *netdev, ring->rx_pending = lif->nrxq_descs; } +static void ionic_set_ringsize(struct ionic_lif *lif, void *arg) +{ + struct ethtool_ringparam *ring = arg; + + lif->ntxq_descs = ring->tx_pending; + lif->nrxq_descs = ring->rx_pending; +} + static int ionic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ionic_lif *lif = netdev_priv(netdev); - bool running; - int err; if (ring->rx_mini_pending || ring->rx_jumbo_pending) { netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n"); @@ -491,22 +497,7 @@ static int ionic_set_ringparam(struct net_device *netdev, ring->rx_pending == lif->nrxq_descs) return 0; - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); - if (err) - return err; - - running = test_bit(IONIC_LIF_F_UP, lif->state); - if (running) - ionic_stop(netdev); - - lif->ntxq_descs = ring->tx_pending; - lif->nrxq_descs = ring->rx_pending; - - if (running) - ionic_open(netdev); - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); - - return 0; + return ionic_reset_queues(lif, ionic_set_ringsize, ring); } static void ionic_get_channels(struct net_device *netdev, @@ -521,12 +512,17 @@ static void ionic_get_channels(struct net_device *netdev, ch->combined_count = lif->nxqs; } +static void ionic_set_queuecount(struct ionic_lif *lif, void *arg) +{ + struct ethtool_channels *ch = arg; + + lif->nxqs = ch->combined_count; +} + static int ionic_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct ionic_lif *lif = netdev_priv(netdev); - bool running; - int err; if (!ch->combined_count || ch->other_count || ch->rx_count || ch->tx_count) @@ -535,21 +531,7 @@ static int ionic_set_channels(struct net_device *netdev, if (ch->combined_count == lif->nxqs) return 0; - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); - if (err) - return err; - - running = test_bit(IONIC_LIF_F_UP, lif->state); - if (running) - ionic_stop(netdev); - - lif->nxqs = ch->combined_count; - - if (running) - ionic_open(netdev); - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); - - return 0; + return ionic_reset_queues(lif, ionic_set_queuecount, ch); } static u32 ionic_get_priv_flags(struct net_device *netdev) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 9d8c969f21cb..f49486b6d04d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -96,7 +96,8 @@ static void ionic_link_status_check(struct ionic_lif *lif) u16 link_status; bool link_up; - if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) + if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) || + test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state)) return; link_status = le16_to_cpu(lif->info->status.link_status); @@ -1245,6 +1246,7 @@ static int ionic_init_nic_features(struct ionic_lif *lif) netdev->hw_features |= netdev->hw_enc_features; netdev->features |= netdev->hw_features; + netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; netdev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; @@ -1311,7 +1313,7 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu) return err; netdev->mtu = new_mtu; - err = ionic_reset_queues(lif); + err = ionic_reset_queues(lif, NULL, NULL); return err; } @@ -1323,7 +1325,7 @@ static void ionic_tx_timeout_work(struct work_struct *ws) netdev_info(lif->netdev, "Tx Timeout recovery\n"); rtnl_lock(); - ionic_reset_queues(lif); + ionic_reset_queues(lif, NULL, NULL); rtnl_unlock(); } @@ -1671,6 +1673,14 @@ int ionic_open(struct net_device *netdev) if (err) goto err_out; + err = netif_set_real_num_tx_queues(netdev, lif->nxqs); + if (err) + goto err_txrx_deinit; + + err = netif_set_real_num_rx_queues(netdev, lif->nxqs); + if (err) + goto err_txrx_deinit; + /* don't start the queues until we have link */ if (netif_carrier_ok(netdev)) { err = ionic_start_queues(lif); @@ -1692,15 +1702,15 @@ static void ionic_stop_queues(struct ionic_lif *lif) if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) return; - ionic_txrx_disable(lif); netif_tx_disable(lif->netdev); + ionic_txrx_disable(lif); } int ionic_stop(struct net_device *netdev) { struct ionic_lif *lif = netdev_priv(netdev); - if (!netif_device_present(netdev)) + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) return 0; ionic_stop_queues(lif); @@ -1978,24 +1988,32 @@ static const struct net_device_ops ionic_netdev_ops = { .ndo_get_vf_stats = ionic_get_vf_stats, }; -int ionic_reset_queues(struct ionic_lif *lif) +int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg) { bool running; int err = 0; - /* Put off the next watchdog timeout */ - netif_trans_update(lif->netdev); - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); if (err) return err; running = netif_running(lif->netdev); - if (running) + if (running) { + netif_device_detach(lif->netdev); err = ionic_stop(lif->netdev); - if (!err && running) - ionic_open(lif->netdev); + if (err) + goto reset_out; + } + + if (cb) + cb(lif, arg); + + if (running) { + err = ionic_open(lif->netdev); + netif_device_attach(lif->netdev); + } +reset_out: clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); return err; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index c3428034a17b..ed126dd74e01 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -248,6 +248,8 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units) return (units * div) / mult; } +typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg); + void ionic_link_status_check_request(struct ionic_lif *lif); void ionic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *ns); @@ -267,7 +269,7 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, int ionic_open(struct net_device *netdev); int ionic_stop(struct net_device *netdev); -int ionic_reset_queues(struct ionic_lif *lif); +int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg); static inline void debug_stats_txq_post(struct ionic_qcq *qcq, struct ionic_txq_desc *desc, bool dbell) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index a49743d56b9c..6c2f9ff4a53e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -876,6 +876,8 @@ struct qed_dev { struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM]; u8 engine_for_debug; bool disable_ilt_dump; + bool dbg_bin_dump; + DECLARE_HASHTABLE(connections, 10); const struct firmware *firmware; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 7b76667acaba..08ba9d54ab63 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -271,7 +271,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; } - iids->vf_cids += vf_cids * p_mngr->vf_count; + iids->vf_cids = vf_cids; iids->tids += vf_tids * p_mngr->vf_count; DP_VERBOSE(p_hwfn, QED_MSG_ILT, @@ -465,6 +465,20 @@ static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk) return p_blk; } +static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; + u32 cli_idx, blk_idx; + + for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) { + for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++) + clients[cli_idx].pf_blks[blk_idx].total_size = 0; + + for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++) + clients[cli_idx].vf_blks[blk_idx].total_size = 0; + } +} + int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; @@ -484,6 +498,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); + /* Reset all ILT blocks at the beginning of ILT computing in order + * to prevent memory allocation for irrelevant blocks afterwards. + */ + qed_cxt_ilt_blk_reset(p_hwfn); + DP_VERBOSE(p_hwfn, QED_MSG_ILT, "hwfn [%d] - Set context manager starting line to be 0x%08x\n", p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 57a0dab88431..3b9bbafafe68 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -5568,7 +5568,8 @@ static const char * const s_status_str[] = { /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */ "The filter/trigger constraint dword offsets are not enabled for recording", - + /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */ + "No matching framing mode", /* DBG_STATUS_VFC_READ_ERROR */ "Error reading from VFC", @@ -7505,6 +7506,12 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->print_dbg_data) qed_dbg_print_feature(text_buf, text_size_bytes); + /* Just return the original binary buffer if requested */ + if (p_hwfn->cdev->dbg_bin_dump) { + vfree(text_buf); + return DBG_STATUS_OK; + } + /* Free the old dump_buf and point the dump_buf to the newly allocagted * and formatted text buffer. */ @@ -7732,7 +7739,9 @@ int qed_dbg_mcp_trace_size(struct qed_dev *cdev) #define REGDUMP_HEADER_SIZE_SHIFT 0 #define REGDUMP_HEADER_SIZE_MASK 0xffffff #define REGDUMP_HEADER_FEATURE_SHIFT 24 -#define REGDUMP_HEADER_FEATURE_MASK 0x3f +#define REGDUMP_HEADER_FEATURE_MASK 0x1f +#define REGDUMP_HEADER_BIN_DUMP_SHIFT 29 +#define REGDUMP_HEADER_BIN_DUMP_MASK 0x1 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30 #define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1 #define REGDUMP_HEADER_ENGINE_SHIFT 31 @@ -7770,6 +7779,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev, feature, feature_size); SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature); + SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1); SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine); SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine); @@ -7793,6 +7803,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) omit_engine = 1; mutex_lock(&qed_dbg_lock); + cdev->dbg_bin_dump = true; org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { @@ -7930,6 +7941,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } + /* Re-populate nvm attribute info */ + qed_mcp_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_populate(p_hwfn); + /* nvm cfg1 */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + @@ -7992,6 +8007,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc); } + cdev->dbg_bin_dump = false; mutex_unlock(&qed_dbg_lock); return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 1eebf30fa798..9c26fde663b3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -980,7 +980,7 @@ int qed_llh_add_mac_filter(struct qed_dev *cdev, struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); union qed_llh_filter filter = {}; - u8 filter_idx, abs_ppfid; + u8 filter_idx, abs_ppfid = 0; u32 high, low, ref_cnt; int rc = 0; @@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) void qed_resc_free(struct qed_dev *cdev) { + struct qed_rdma_info *rdma_info; + struct qed_hwfn *p_hwfn; int i; if (IS_VF(cdev)) { @@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev) qed_llh_free(cdev); for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + p_hwfn = cdev->hwfns + i; + rdma_info = p_hwfn->p_rdma_info; qed_cxt_mngr_free(p_hwfn); qed_qm_info_free(p_hwfn); @@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev) qed_ooo_free(p_hwfn); } - if (QED_IS_RDMA_PERSONALITY(p_hwfn)) + if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) { + qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto); qed_rdma_info_free(p_hwfn); + } qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); @@ -4467,12 +4472,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return 0; } -static void qed_nvm_info_free(struct qed_hwfn *p_hwfn) -{ - kfree(p_hwfn->nvm_info.image_att); - p_hwfn->nvm_info.image_att = NULL; -} - static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_regview, void __iomem *p_doorbells, @@ -4557,7 +4556,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, return rc; err3: if (IS_LEAD_HWFN(p_hwfn)) - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); err2: if (IS_LEAD_HWFN(p_hwfn)) qed_iov_free_hw_info(p_hwfn->cdev); @@ -4618,7 +4617,7 @@ int qed_hw_prepare(struct qed_dev *cdev, if (rc) { if (IS_PF(cdev)) { qed_init_free(p_hwfn); - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); qed_mcp_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); } @@ -4652,7 +4651,7 @@ void qed_hw_remove(struct qed_dev *cdev) qed_iov_free_hw_info(cdev); - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); } static void qed_chain_free_next_ptr(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index d2fe61a5cf56..5409a2da6106 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -2836,8 +2836,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn) if (rc) return rc; - qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP); - return qed_iwarp_ll2_stop(p_hwfn); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 9624616806e7..0fd4520d0666 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3280,6 +3280,13 @@ err0: return rc; } +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->nvm_info.image_att); + p_hwfn->nvm_info.image_att = NULL; + p_hwfn->nvm_info.valid = false; +} + int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 5750b4c5ef63..12a705ed4bac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1221,6 +1221,13 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); /** + * @brief Delete nvm info shadow in the given hardware function + * + * @param p_hwfn + */ +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); + +/** * @brief Get the engine affinity configuration. * * @param p_hwfn diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 4566815f7b87..7271dd7166e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn) break; } } - qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE); } static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 856051f50eb7..adc2c8f3d48e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); } +#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90 +#define QED_VF_CHANNEL_USLEEP_DELAY 100 +#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10 +#define QED_VF_CHANNEL_MSLEEP_DELAY 25 + static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) { union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; struct ustorm_trigger_vf_zone trigger; struct ustorm_vf_zone *zone_data; - int rc = 0, time = 100; + int iter, rc = 0; zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; @@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); /* When PF would be done with the response, it would write back to the - * `done' address. Poll until then. + * `done' address from a coherent DMA zone. Poll until then. */ - while ((!*done) && time) { - msleep(25); - time--; + + iter = QED_VF_CHANNEL_USLEEP_ITERATIONS; + while (!*done && iter--) { + udelay(QED_VF_CHANNEL_USLEEP_DELAY); + dma_rmb(); + } + + iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS; + while (!*done && iter--) { + msleep(QED_VF_CHANNEL_MSLEEP_DELAY); + dma_rmb(); } if (!*done) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 756c05eb96f3..29e285430f99 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1229,7 +1229,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, /* PTP not supported on VFs */ if (!is_vf) - qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL)); + qede_ptp_enable(edev); edev->ops->register_ops(cdev, &qede_ll_ops, edev); @@ -1318,6 +1318,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) if (system_state == SYSTEM_POWER_OFF) return; qed_ops->common->remove(cdev); + edev->cdev = NULL; /* Since this can happen out-of-sync with other flows, * don't release the netdevice until after slowpath stop diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 4c7f7a7fc151..cd5841a9415e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -412,6 +412,7 @@ void qede_ptp_disable(struct qede_dev *edev) if (ptp->tx_skb) { dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; + clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); } /* Disable PTP in HW */ @@ -423,7 +424,7 @@ void qede_ptp_disable(struct qede_dev *edev) edev->ptp = NULL; } -static int qede_ptp_init(struct qede_dev *edev, bool init_tc) +static int qede_ptp_init(struct qede_dev *edev) { struct qede_ptp *ptp; int rc; @@ -444,25 +445,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc) /* Init work queue for Tx timestamping */ INIT_WORK(&ptp->work, qede_ptp_task); - /* Init cyclecounter and timecounter. This is done only in the first - * load. If done in every load, PTP application will fail when doing - * unload / load (e.g. MTU change) while it is running. - */ - if (init_tc) { - memset(&ptp->cc, 0, sizeof(ptp->cc)); - ptp->cc.read = qede_ptp_read_cc; - ptp->cc.mask = CYCLECOUNTER_MASK(64); - ptp->cc.shift = 0; - ptp->cc.mult = 1; - - timecounter_init(&ptp->tc, &ptp->cc, - ktime_to_ns(ktime_get_real())); - } + /* Init cyclecounter and timecounter */ + memset(&ptp->cc, 0, sizeof(ptp->cc)); + ptp->cc.read = qede_ptp_read_cc; + ptp->cc.mask = CYCLECOUNTER_MASK(64); + ptp->cc.shift = 0; + ptp->cc.mult = 1; - return rc; + timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real())); + + return 0; } -int qede_ptp_enable(struct qede_dev *edev, bool init_tc) +int qede_ptp_enable(struct qede_dev *edev) { struct qede_ptp *ptp; int rc; @@ -483,7 +478,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) edev->ptp = ptp; - rc = qede_ptp_init(edev, init_tc); + rc = qede_ptp_init(edev); if (rc) goto err1; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h index 691a14c4b2c5..89c7f3cf3ee2 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb); void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb); int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req); void qede_ptp_disable(struct qede_dev *edev); -int qede_ptp_enable(struct qede_dev *edev, bool init_tc); +int qede_ptp_enable(struct qede_dev *edev); int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts); static inline void qede_ptp_record_rx_ts(struct qede_dev *edev, diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 2d873ae8a234..668ccc9d49f8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev) qede_rdma_cleanup_event(edev); destroy_workqueue(edev->rdma_info.rdma_wq); + edev->rdma_info.rdma_wq = NULL; } int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) @@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev, if (edev->rdma_info.exp_recovery) return; - if (!edev->rdma_info.qedr_dev) + if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq) return; /* We don't want the cleanup flow to start while we're allocating and diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 40efe60eff8d..fcdecddb2812 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -47,15 +47,23 @@ static int rmnet_unregister_real_device(struct net_device *real_dev) return 0; } -static int rmnet_register_real_device(struct net_device *real_dev) +static int rmnet_register_real_device(struct net_device *real_dev, + struct netlink_ext_ack *extack) { struct rmnet_port *port; int rc, entry; ASSERT_RTNL(); - if (rmnet_is_real_dev_registered(real_dev)) + if (rmnet_is_real_dev_registered(real_dev)) { + port = rmnet_get_port_rtnl(real_dev); + if (port->rmnet_mode != RMNET_EPMODE_VND) { + NL_SET_ERR_MSG_MOD(extack, "bridge device already exists"); + return -EINVAL; + } + return 0; + } port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) @@ -133,7 +141,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); - err = rmnet_register_real_device(real_dev); + err = rmnet_register_real_device(real_dev, extack); if (err) goto err0; @@ -422,7 +430,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, } if (port->rmnet_mode != RMNET_EPMODE_VND) { - NL_SET_ERR_MSG_MOD(extack, "bridge device already exists"); + NL_SET_ERR_MSG_MOD(extack, "more than one bridge dev attached"); return -EINVAL; } @@ -433,7 +441,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, return -EBUSY; } - err = rmnet_register_real_device(slave_dev); + err = rmnet_register_real_device(slave_dev, extack); if (err) return -EBUSY; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index dad84ecf5a77..b660ddbe4025 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2114,8 +2114,11 @@ static void rtl_release_firmware(struct rtl8169_private *tp) void r8169_apply_firmware(struct rtl8169_private *tp) { /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ - if (tp->rtl_fw) + if (tp->rtl_fw) { rtl_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + } } static void rtl8168_config_eee_mac(struct rtl8169_private *tp) diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 328bc38848bb..0f366cc50b74 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1044,8 +1044,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) skb->ip_summed = CHECKSUM_UNNECESSARY; next: - if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) || - xdp_result) { + if (skb) + napi_gro_receive(&priv->napi, skb); + if (skb || xdp_result) { ndev->stats.rx_packets++; ndev->stats.rx_bytes += xdp.data_end - xdp.data; } diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 75266580b586..4661ef865807 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1649,6 +1649,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], geneve->collect_md = metadata; geneve->use_udp6_rx_checksums = use_udp6_rx_checksums; geneve->ttl_inherit = ttl_inherit; + geneve->df = df; geneve_unquiesce(geneve, gs4, gs6); return 0; diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 55226b264e3c..ac7e5a04c8ac 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -500,6 +500,13 @@ static int gsi_channel_stop_command(struct gsi_channel *channel) int ret; state = gsi_channel_state(channel); + + /* Channel could have entered STOPPED state since last call + * if it timed out. If so, we're done. + */ + if (state == GSI_CHANNEL_STATE_STOPPED) + return 0; + if (state != GSI_CHANNEL_STATE_STARTED && state != GSI_CHANNEL_STATE_STOP_IN_PROC) return -EINVAL; @@ -789,20 +796,11 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id) int gsi_channel_stop(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; - enum gsi_channel_state state; u32 retries; int ret; gsi_channel_freeze(channel); - /* Channel could have entered STOPPED state since last call if the - * STOP command timed out. We won't stop a channel if stopping it - * was successful previously (so we still want the freeze above). - */ - state = gsi_channel_state(channel); - if (state == GSI_CHANNEL_STATE_STOPPED) - return 0; - /* RX channels might require a little time to enter STOPPED state */ retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index c9ab865e7290..d92dd3f09b73 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -586,6 +586,21 @@ u32 ipa_cmd_tag_process_count(void) return 4; } +void ipa_cmd_tag_process(struct ipa *ipa) +{ + u32 count = ipa_cmd_tag_process_count(); + struct gsi_trans *trans; + + trans = ipa_cmd_trans_alloc(ipa, count); + if (trans) { + ipa_cmd_tag_process_add(trans); + gsi_trans_commit_wait(trans); + } else { + dev_err(&ipa->pdev->dev, + "error allocating %u entry tag transaction\n", count); + } +} + static struct ipa_cmd_info * ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count) { diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index e440aa69c8b5..1a646e0264a0 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -172,6 +172,14 @@ void ipa_cmd_tag_process_add(struct gsi_trans *trans); u32 ipa_cmd_tag_process_count(void); /** + * ipa_cmd_tag_process() - Perform a tag process + * + * @Return: The number of elements to allocate in a transaction + * to hold tag process commands + */ +void ipa_cmd_tag_process(struct ipa *ipa); + +/** * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint * @ipa: IPA pointer * @tre_count: Number of elements in the transaction diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c index 52d4b84e0dac..de2768d71ab5 100644 --- a/drivers/net/ipa/ipa_data-sdm845.c +++ b/drivers/net/ipa/ipa_data-sdm845.c @@ -44,7 +44,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .seq_type = IPA_SEQ_INVALID, .config = { - .checksum = true, .aggregation = true, .status_enable = true, .rx = { diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 9f50d0d11704..9e58e495d373 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1450,6 +1450,8 @@ void ipa_endpoint_suspend(struct ipa *ipa) if (ipa->modem_netdev) ipa_modem_suspend(ipa->modem_netdev); + ipa_cmd_tag_process(ipa); + ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); } diff --git a/drivers/net/ipa/ipa_gsi.c b/drivers/net/ipa/ipa_gsi.c index dc4a5c2196ae..d323adb03383 100644 --- a/drivers/net/ipa/ipa_gsi.c +++ b/drivers/net/ipa/ipa_gsi.c @@ -6,6 +6,7 @@ #include <linux/types.h> +#include "ipa_gsi.h" #include "gsi_trans.h" #include "ipa.h" #include "ipa_endpoint.h" diff --git a/drivers/net/ipa/ipa_gsi.h b/drivers/net/ipa/ipa_gsi.h index 3cf18600c68e..0a40f3dc55fc 100644 --- a/drivers/net/ipa/ipa_gsi.h +++ b/drivers/net/ipa/ipa_gsi.h @@ -8,7 +8,9 @@ #include <linux/types.h> +struct gsi; struct gsi_trans; +struct ipa_gsi_endpoint_data; /** * ipa_gsi_trans_complete() - GSI transaction completion callback diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 03a1d0e55964..73413371e3d3 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -119,7 +119,7 @@ struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = { sizeof_field(struct ipa_driver_init_complete_rsp, rsp), .tlv_type = 0x02, - .elem_size = offsetof(struct ipa_driver_init_complete_rsp, + .offset = offsetof(struct ipa_driver_init_complete_rsp, rsp), .ei_array = qmi_response_type_v01_ei, }, @@ -137,7 +137,7 @@ struct qmi_elem_info ipa_init_complete_ind_ei[] = { sizeof_field(struct ipa_init_complete_ind, status), .tlv_type = 0x02, - .elem_size = offsetof(struct ipa_init_complete_ind, + .offset = offsetof(struct ipa_init_complete_ind, status), .ei_array = qmi_response_type_v01_ei, }, @@ -218,7 +218,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { sizeof_field(struct ipa_init_modem_driver_req, platform_type_valid), .tlv_type = 0x10, - .elem_size = offsetof(struct ipa_init_modem_driver_req, + .offset = offsetof(struct ipa_init_modem_driver_req, platform_type_valid), }, { diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index e56547bfdac9..9159846b8b93 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -4052,9 +4052,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, return err; netdev_lockdep_set_classes(dev); - lockdep_set_class_and_subclass(&dev->addr_list_lock, - &macsec_netdev_addr_lock_key, - dev->lower_level); + lockdep_set_class(&dev->addr_list_lock, + &macsec_netdev_addr_lock_key); err = netdev_upper_dev_link(real_dev, dev, extack); if (err < 0) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 6a6cc9f75307..4942f6112e51 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -880,9 +880,8 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; static void macvlan_set_lockdep_class(struct net_device *dev) { netdev_lockdep_set_classes(dev); - lockdep_set_class_and_subclass(&dev->addr_list_lock, - &macvlan_netdev_addr_lock_key, - dev->lower_level); + lockdep_set_class(&dev->addr_list_lock, + &macvlan_netdev_addr_lock_key); } static int macvlan_init(struct net_device *dev) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index f25702386d83..e351d65533aa 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -480,8 +480,7 @@ config MICROCHIP_T1_PHY config MICROSEMI_PHY tristate "Microsemi PHYs" depends on MACSEC || MACSEC=n - select CRYPTO_AES - select CRYPTO_ECB + select CRYPTO_LIB_AES if MACSEC help Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index b4d3dc4068e2..d53ca884b5c9 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -10,7 +10,7 @@ #include <linux/phy.h> #include <dt-bindings/net/mscc-phy-vsc8531.h> -#include <crypto/skcipher.h> +#include <crypto/aes.h> #include <net/macsec.h> @@ -500,39 +500,17 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow) static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN], u16 key_len, u8 hkey[16]) { - struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); - struct skcipher_request *req = NULL; - struct scatterlist src, dst; - DECLARE_CRYPTO_WAIT(wait); - u32 input[4] = {0}; + const u8 input[AES_BLOCK_SIZE] = {0}; + struct crypto_aes_ctx ctx; int ret; - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - req = skcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) { - ret = -ENOMEM; - goto out; - } - - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, - &wait); - ret = crypto_skcipher_setkey(tfm, key, key_len); - if (ret < 0) - goto out; - - sg_init_one(&src, input, 16); - sg_init_one(&dst, hkey, 16); - skcipher_request_set_crypt(req, &src, &dst, 16, NULL); - - ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); + ret = aes_expandkey(&ctx, key, key_len); + if (ret) + return ret; -out: - skcipher_request_free(req); - crypto_free_skcipher(tfm); - return ret; + aes_encrypt(&ctx, hkey, input); + memzero_explicit(&ctx, sizeof(ctx)); + return 0; } static int vsc8584_macsec_transformation(struct phy_device *phydev, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1de3938628f4..56cfae950472 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -840,7 +840,7 @@ static void phy_error(struct phy_device *phydev) * phy_disable_interrupts - Disable the PHY interrupts from the PHY side * @phydev: target phy_device struct */ -static int phy_disable_interrupts(struct phy_device *phydev) +int phy_disable_interrupts(struct phy_device *phydev) { int err; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 04946de74fa0..b4978c5fb2ca 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -794,8 +794,10 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, /* Grab the bits from PHYIR2, and put them in the lower half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID2); - if (phy_reg < 0) - return -EIO; + if (phy_reg < 0) { + /* returning -ENODEV doesn't stop bus scanning */ + return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO; + } *phy_id |= phy_reg; @@ -1090,6 +1092,10 @@ int phy_init_hw(struct phy_device *phydev) if (ret < 0) return ret; + ret = phy_disable_interrupts(phydev); + if (ret) + return ret; + if (phydev->drv->config_init) ret = phydev->drv->config_init(phydev); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 0ab65fb75258..3b7c70e6c5dd 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1463,6 +1463,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, struct ethtool_pauseparam *pause) { struct phylink_link_state *config = &pl->link_config; + bool manual_changed; + int pause_state; ASSERT_RTNL(); @@ -1477,15 +1479,15 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, !pause->autoneg && pause->rx_pause != pause->tx_pause) return -EINVAL; - mutex_lock(&pl->state_mutex); - config->pause = 0; + pause_state = 0; if (pause->autoneg) - config->pause |= MLO_PAUSE_AN; + pause_state |= MLO_PAUSE_AN; if (pause->rx_pause) - config->pause |= MLO_PAUSE_RX; + pause_state |= MLO_PAUSE_RX; if (pause->tx_pause) - config->pause |= MLO_PAUSE_TX; + pause_state |= MLO_PAUSE_TX; + mutex_lock(&pl->state_mutex); /* * See the comments for linkmode_set_pause(), wrt the deficiencies * with the current implementation. A solution to this issue would @@ -1502,18 +1504,35 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, linkmode_set_pause(config->advertising, pause->tx_pause, pause->rx_pause); - /* If we have a PHY, phylib will call our link state function if the - * mode has changed, which will trigger a resolve and update the MAC - * configuration. + manual_changed = (config->pause ^ pause_state) & MLO_PAUSE_AN || + (!(pause_state & MLO_PAUSE_AN) && + (config->pause ^ pause_state) & MLO_PAUSE_TXRX_MASK); + + config->pause = pause_state; + + if (!pl->phydev && !test_bit(PHYLINK_DISABLE_STOPPED, + &pl->phylink_disable_state)) + phylink_pcs_config(pl, true, &pl->link_config); + + mutex_unlock(&pl->state_mutex); + + /* If we have a PHY, a change of the pause frame advertisement will + * cause phylib to renegotiate (if AN is enabled) which will in turn + * call our phylink_phy_change() and trigger a resolve. Note that + * we can't hold our state mutex while calling phy_set_asym_pause(). */ - if (pl->phydev) { + if (pl->phydev) phy_set_asym_pause(pl->phydev, pause->rx_pause, pause->tx_pause); - } else if (!test_bit(PHYLINK_DISABLE_STOPPED, - &pl->phylink_disable_state)) { - phylink_pcs_config(pl, true, &pl->link_config); + + /* If the manual pause settings changed, make sure we trigger a + * resolve to update their state; we can not guarantee that the + * link will cycle. + */ + if (manual_changed) { + pl->mac_link_dropped = true; + phylink_run_resolve(pl); } - mutex_unlock(&pl->state_mutex); return 0; } diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 93da7d3d0954..74568ae16125 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -122,10 +122,13 @@ static int lan87xx_read_status(struct phy_device *phydev) if (rc < 0) return rc; - /* Wait max 640 ms to detect energy */ - phy_read_poll_timeout(phydev, MII_LAN83C185_CTRL_STATUS, rc, - rc & MII_LAN83C185_ENERGYON, 10000, - 640000, true); + /* Wait max 640 ms to detect energy and the timeout is not + * an actual error. + */ + read_poll_timeout(phy_read, rc, + rc & MII_LAN83C185_ENERGYON || rc < 0, + 10000, 640000, true, phydev, + MII_LAN83C185_CTRL_STATUS); if (rc < 0) return rc; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 858b012074bd..7adeb91bd368 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -62,6 +62,7 @@ #include <net/rtnetlink.h> #include <net/sock.h> #include <net/xdp.h> +#include <net/ip_tunnels.h> #include <linux/seq_file.h> #include <linux/uio.h> #include <linux/skb_array.h> @@ -1351,6 +1352,7 @@ static void tun_net_init(struct net_device *dev) switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: dev->netdev_ops = &tun_netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; /* Point-to-Point TUN Device */ dev->hard_header_len = 0; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 950711448f39..a38e868e44d4 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1491,10 +1491,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } if (pkt_cnt == 0) { - /* Skip IP alignment psudo header */ - skb_pull(skb, 2); skb->len = pkt_len; - skb_set_tail_pointer(skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(skb, 2); + skb_set_tail_pointer(skb, skb->len); skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(skb, pkt_hdr); return 1; @@ -1503,8 +1503,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) ax_skb = skb_clone(skb, GFP_ATOMIC); if (ax_skb) { ax_skb->len = pkt_len; - ax_skb->data = skb->data + 2; - skb_set_tail_pointer(ax_skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(ax_skb, 2); + skb_set_tail_pointer(ax_skb, ax_skb->len); ax_skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(ax_skb, pkt_hdr); usbnet_skb_return(dev, ax_skb); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 31b1d4b959f6..07c42c0719f5 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1370,6 +1370,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 355be77f4241..bb4ccbda031a 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1287,11 +1287,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) /* Init all registers */ ret = smsc95xx_reset(dev); + if (ret) + goto free_pdata; /* detect device revision as different features may be available */ ret = smsc95xx_read_reg(dev, ID_REV, &val); if (ret < 0) - return ret; + goto free_pdata; + val >>= 16; pdata->chip_id = val; pdata->mdix_ctrl = get_mdix_status(dev->net); @@ -1317,6 +1320,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); return 0; + +free_pdata: + kfree(pdata); + return ret; } static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -1324,7 +1331,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); if (pdata) { - cancel_delayed_work(&pdata->carrier_check); + cancel_delayed_work_sync(&pdata->carrier_check); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); pdata = NULL; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e8085ab6d484..89d85dcb200e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1380,6 +1380,8 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct vxlan_rdst *rd; if (rcu_access_pointer(f->nh)) { + if (*idx < cb->args[2]) + goto skip_nh; err = vxlan_fdb_info(skb, vxlan, f, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@ -1387,6 +1389,8 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, NLM_F_MULTI, NULL); if (err < 0) goto out; +skip_nh: + *idx += 1; continue; } diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index e30d91a38cfb..284832314f31 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -303,7 +303,6 @@ static void lapbeth_setup(struct net_device *dev) dev->netdev_ops = &lapbeth_netdev_ops; dev->needs_free_netdev = true; dev->type = ARPHRD_X25; - dev->hard_header_len = 3; dev->mtu = 1000; dev->addr_len = 0; } @@ -324,6 +323,14 @@ static int lapbeth_new_device(struct net_device *dev) if (!ndev) goto out; + /* When transmitting data: + * first this driver removes a pseudo header of 1 byte, + * then the lapb module prepends an LAPB header of at most 3 bytes, + * then this driver prepends a length field of 2 bytes, + * then the underlying Ethernet device prepends its own header. + */ + ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len; + lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 3ac3f8570ca1..c9f65e96ccb0 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -45,17 +45,18 @@ static int wg_open(struct net_device *dev) if (dev_v6) dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; + mutex_lock(&wg->device_update_lock); ret = wg_socket_init(wg, wg->incoming_port); if (ret < 0) - return ret; - mutex_lock(&wg->device_update_lock); + goto out; list_for_each_entry(peer, &wg->peer_list, peer_list) { wg_packet_send_staged_packets(peer); if (peer->persistent_keepalive_interval) wg_packet_send_keepalive(peer); } +out: mutex_unlock(&wg->device_update_lock); - return 0; + return ret; } #ifdef CONFIG_PM_SLEEP @@ -225,6 +226,7 @@ static void wg_destruct(struct net_device *dev) list_del(&wg->device_list); rtnl_unlock(); mutex_lock(&wg->device_update_lock); + rcu_assign_pointer(wg->creating_net, NULL); wg->incoming_port = 0; wg_socket_reinit(wg, NULL, NULL); /* The final references are cleared in the below calls to destroy_workqueue. */ @@ -240,13 +242,11 @@ static void wg_destruct(struct net_device *dev) skb_queue_purge(&wg->incoming_handshakes); free_percpu(dev->tstats); free_percpu(wg->incoming_handshakes_worker); - if (wg->have_creating_net_ref) - put_net(wg->creating_net); kvfree(wg->index_hashtable); kvfree(wg->peer_hashtable); mutex_unlock(&wg->device_update_lock); - pr_debug("%s: Interface deleted\n", dev->name); + pr_debug("%s: Interface destroyed\n", dev->name); free_netdev(dev); } @@ -262,6 +262,7 @@ static void wg_setup(struct net_device *dev) max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); dev->netdev_ops = &netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; dev->hard_header_len = 0; dev->addr_len = 0; dev->needed_headroom = DATA_PACKET_HEAD_ROOM; @@ -292,7 +293,7 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, struct wg_device *wg = netdev_priv(dev); int ret = -ENOMEM; - wg->creating_net = src_net; + rcu_assign_pointer(wg->creating_net, src_net); init_rwsem(&wg->static_identity.lock); mutex_init(&wg->socket_update_lock); mutex_init(&wg->device_update_lock); @@ -393,30 +394,26 @@ static struct rtnl_link_ops link_ops __read_mostly = { .newlink = wg_newlink, }; -static int wg_netdevice_notification(struct notifier_block *nb, - unsigned long action, void *data) +static void wg_netns_pre_exit(struct net *net) { - struct net_device *dev = ((struct netdev_notifier_info *)data)->dev; - struct wg_device *wg = netdev_priv(dev); - - ASSERT_RTNL(); - - if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops) - return 0; + struct wg_device *wg; - if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) { - put_net(wg->creating_net); - wg->have_creating_net_ref = false; - } else if (dev_net(dev) != wg->creating_net && - !wg->have_creating_net_ref) { - wg->have_creating_net_ref = true; - get_net(wg->creating_net); + rtnl_lock(); + list_for_each_entry(wg, &device_list, device_list) { + if (rcu_access_pointer(wg->creating_net) == net) { + pr_debug("%s: Creating namespace exiting\n", wg->dev->name); + netif_carrier_off(wg->dev); + mutex_lock(&wg->device_update_lock); + rcu_assign_pointer(wg->creating_net, NULL); + wg_socket_reinit(wg, NULL, NULL); + mutex_unlock(&wg->device_update_lock); + } } - return 0; + rtnl_unlock(); } -static struct notifier_block netdevice_notifier = { - .notifier_call = wg_netdevice_notification +static struct pernet_operations pernet_ops = { + .pre_exit = wg_netns_pre_exit }; int __init wg_device_init(void) @@ -429,18 +426,18 @@ int __init wg_device_init(void) return ret; #endif - ret = register_netdevice_notifier(&netdevice_notifier); + ret = register_pernet_device(&pernet_ops); if (ret) goto error_pm; ret = rtnl_link_register(&link_ops); if (ret) - goto error_netdevice; + goto error_pernet; return 0; -error_netdevice: - unregister_netdevice_notifier(&netdevice_notifier); +error_pernet: + unregister_pernet_device(&pernet_ops); error_pm: #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&pm_notifier); @@ -451,7 +448,7 @@ error_pm: void wg_device_uninit(void) { rtnl_link_unregister(&link_ops); - unregister_netdevice_notifier(&netdevice_notifier); + unregister_pernet_device(&pernet_ops); #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&pm_notifier); #endif diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h index b15a8be9d816..4d0144e16947 100644 --- a/drivers/net/wireguard/device.h +++ b/drivers/net/wireguard/device.h @@ -40,7 +40,7 @@ struct wg_device { struct net_device *dev; struct crypt_queue encrypt_queue, decrypt_queue; struct sock __rcu *sock4, *sock6; - struct net *creating_net; + struct net __rcu *creating_net; struct noise_static_identity static_identity; struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; struct workqueue_struct *packet_crypt_wq; @@ -56,7 +56,6 @@ struct wg_device { unsigned int num_peers, device_update_gen; u32 fwmark; u16 incoming_port; - bool have_creating_net_ref; }; int wg_device_init(void); diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index 802099c8828a..20a4f3c0a0a1 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -511,11 +511,15 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info) if (flags & ~__WGDEVICE_F_ALL) goto out; - ret = -EPERM; - if ((info->attrs[WGDEVICE_A_LISTEN_PORT] || - info->attrs[WGDEVICE_A_FWMARK]) && - !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN)) - goto out; + if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) { + struct net *net; + rcu_read_lock(); + net = rcu_dereference(wg->creating_net); + ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0; + rcu_read_unlock(); + if (ret) + goto out; + } ++wg->device_update_gen; diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index 626433690abb..201a22681945 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -617,8 +617,8 @@ wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, memcpy(handshake->hash, hash, NOISE_HASH_LEN); memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); handshake->remote_index = src->sender_index; - if ((s64)(handshake->last_initiation_consumption - - (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0) + initiation_consumption = ktime_get_coarse_boottime_ns(); + if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0) handshake->last_initiation_consumption = initiation_consumption; handshake->state = HANDSHAKE_CONSUMED_INITIATION; up_write(&handshake->lock); diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index c58df439dbbe..dfb674e03076 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -11,6 +11,7 @@ #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <net/ip_tunnels.h> struct wg_device; struct wg_peer; @@ -65,25 +66,9 @@ struct packet_cb { #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) -/* Returns either the correct skb->protocol value, or 0 if invalid. */ -static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) -{ - if (skb_network_header(skb) >= skb->head && - (skb_network_header(skb) + sizeof(struct iphdr)) <= - skb_tail_pointer(skb) && - ip_hdr(skb)->version == 4) - return htons(ETH_P_IP); - if (skb_network_header(skb) >= skb->head && - (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= - skb_tail_pointer(skb) && - ipv6_hdr(skb)->version == 6) - return htons(ETH_P_IPV6); - return 0; -} - static inline bool wg_check_packet_protocol(struct sk_buff *skb) { - __be16 real_protocol = wg_examine_packet_protocol(skb); + __be16 real_protocol = ip_tunnel_parse_protocol(skb); return real_protocol && skb->protocol == real_protocol; } diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 91438144e4f7..2c9551ea6dc7 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -387,7 +387,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer, */ skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ~0; /* All levels */ - skb->protocol = wg_examine_packet_protocol(skb); + skb->protocol = ip_tunnel_parse_protocol(skb); if (skb->protocol == htons(ETH_P_IP)) { len = ntohs(ip_hdr(skb)->tot_len); if (unlikely(len < sizeof(struct iphdr))) @@ -414,14 +414,8 @@ static void wg_packet_consume_data_done(struct wg_peer *peer, if (unlikely(routed_peer != peer)) goto dishonest_packet_peer; - if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) { - ++dev->stats.rx_dropped; - net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n", - dev->name, peer->internal_id, - &peer->endpoint.addr); - } else { - update_rx_stats(peer, message_data_len(len_before_trim)); - } + napi_gro_receive(&peer->napi, skb); + update_rx_stats(peer, message_data_len(len_before_trim)); return; dishonest_packet_peer: diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index f9018027fc13..c33e2c81635f 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -347,6 +347,7 @@ static void set_sock_opts(struct socket *sock) int wg_socket_init(struct wg_device *wg, u16 port) { + struct net *net; int ret; struct udp_tunnel_sock_cfg cfg = { .sk_user_data = wg, @@ -371,37 +372,47 @@ int wg_socket_init(struct wg_device *wg, u16 port) }; #endif + rcu_read_lock(); + net = rcu_dereference(wg->creating_net); + net = net ? maybe_get_net(net) : NULL; + rcu_read_unlock(); + if (unlikely(!net)) + return -ENONET; + #if IS_ENABLED(CONFIG_IPV6) retry: #endif - ret = udp_sock_create(wg->creating_net, &port4, &new4); + ret = udp_sock_create(net, &port4, &new4); if (ret < 0) { pr_err("%s: Could not create IPv4 socket\n", wg->dev->name); - return ret; + goto out; } set_sock_opts(new4); - setup_udp_tunnel_sock(wg->creating_net, new4, &cfg); + setup_udp_tunnel_sock(net, new4, &cfg); #if IS_ENABLED(CONFIG_IPV6) if (ipv6_mod_enabled()) { port6.local_udp_port = inet_sk(new4->sk)->inet_sport; - ret = udp_sock_create(wg->creating_net, &port6, &new6); + ret = udp_sock_create(net, &port6, &new6); if (ret < 0) { udp_tunnel_sock_release(new4); if (ret == -EADDRINUSE && !port && retries++ < 100) goto retry; pr_err("%s: Could not create IPv6 socket\n", wg->dev->name); - return ret; + goto out; } set_sock_opts(new6); - setup_udp_tunnel_sock(wg->creating_net, new6, &cfg); + setup_udp_tunnel_sock(net, new6, &cfg); } #endif wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL); - return 0; + ret = 0; +out: + put_net(net); + return ret; } void wg_socket_reinit(struct wg_device *wg, struct sock *new4, diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index bc8c15fb609d..080e5aa60bea 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -897,7 +897,6 @@ static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb) void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid, struct wil_net_stats *stats, bool gro) { - gro_result_t rc = GRO_NORMAL; struct wil6210_vif *vif = ndev_to_vif(ndev); struct wil6210_priv *wil = ndev_to_wil(ndev); struct wireless_dev *wdev = vif_to_wdev(vif); @@ -908,22 +907,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid, */ int mcast = is_multicast_ether_addr(da); struct sk_buff *xmit_skb = NULL; - static const char * const gro_res_str[] = { - [GRO_MERGED] = "GRO_MERGED", - [GRO_MERGED_FREE] = "GRO_MERGED_FREE", - [GRO_HELD] = "GRO_HELD", - [GRO_NORMAL] = "GRO_NORMAL", - [GRO_DROP] = "GRO_DROP", - [GRO_CONSUMED] = "GRO_CONSUMED", - }; if (wdev->iftype == NL80211_IFTYPE_STATION) { sa = wil_skb_get_sa(skb); if (mcast && ether_addr_equal(sa, ndev->dev_addr)) { /* mcast packet looped back to us */ - rc = GRO_DROP; dev_kfree_skb(skb); - goto stats; + ndev->stats.rx_dropped++; + stats->rx_dropped++; + wil_dbg_txrx(wil, "Rx drop %d bytes\n", len); + return; } } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { if (mcast) { @@ -967,26 +960,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid, wil_rx_handle_eapol(vif, skb); if (gro) - rc = napi_gro_receive(&wil->napi_rx, skb); + napi_gro_receive(&wil->napi_rx, skb); else netif_rx_ni(skb); - wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", - len, gro_res_str[rc]); - } -stats: - /* statistics. rc set to GRO_NORMAL for AP bridging */ - if (unlikely(rc == GRO_DROP)) { - ndev->stats.rx_dropped++; - stats->rx_dropped++; - wil_dbg_txrx(wil, "Rx drop %d bytes\n", len); - } else { - ndev->stats.rx_packets++; - stats->rx_packets++; - ndev->stats.rx_bytes += len; - stats->rx_bytes += len; - if (mcast) - ndev->stats.multicast++; } + ndev->stats.rx_packets++; + stats->rx_packets++; + ndev->stats.rx_bytes += len; + stats->rx_bytes += len; + if (mcast) + ndev->stats.multicast++; } void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index 89b85970912d..4cef69bd3c1b 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -95,7 +95,7 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm, struct encrypted_key_payload *epayload; struct device *dev = &nvdimm->dev; - keyref = lookup_user_key(id, 0, 0); + keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH); if (IS_ERR(keyref)) return NULL; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c2c5bc4fb702..add040168e67 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1116,10 +1116,16 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, dev_warn(ctrl->device, "Identify Descriptors failed (%d)\n", status); /* - * Don't treat an error as fatal, as we potentially already - * have a NGUID or EUI-64. + * Don't treat non-retryable errors as fatal, as we potentially + * already have a NGUID or EUI-64. If we failed with DNR set, + * we want to silently ignore the error as we can still + * identify the device, but if the status has DNR set, we want + * to propagate the error back specifically for the disk + * revalidation flow to make sure we don't abandon the + * device just because of a temporal retry-able error (such + * as path of transport errors). */ - if (status > 0 && !(status & NVME_SC_DNR)) + if (status > 0 && (status & NVME_SC_DNR)) status = 0; goto free_data; } @@ -1974,7 +1980,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); blk_queue_stack_limits(ns->head->disk->queue, ns->queue); - revalidate_disk(ns->head->disk); + nvme_mpath_update_disk_size(ns->head->disk); } #endif return 0; @@ -4174,6 +4180,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->dev = dev; ctrl->ops = ops; ctrl->quirks = quirks; + ctrl->numa_node = NUMA_NO_NODE; INIT_WORK(&ctrl->scan_work, nvme_scan_work); INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index da78e499947a..66509472fe06 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -409,15 +409,14 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; - lockdep_assert_held(&ns->head->lock); - if (!head->disk) return; - if (!(head->disk->flags & GENHD_FL_UP)) + if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) device_add_disk(&head->subsys->dev, head->disk, nvme_ns_id_attr_groups); + mutex_lock(&head->lock); if (nvme_path_is_optimized(ns)) { int node, srcu_idx; @@ -426,9 +425,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) __nvme_find_path(head, node); srcu_read_unlock(&head->srcu, srcu_idx); } + mutex_unlock(&head->lock); - synchronize_srcu(&ns->head->srcu); - kblockd_schedule_work(&ns->head->requeue_work); + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); } static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, @@ -483,14 +483,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state) static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, struct nvme_ns *ns) { - mutex_lock(&ns->head->lock); ns->ana_grpid = le32_to_cpu(desc->grpid); ns->ana_state = desc->state; clear_bit(NVME_NS_ANA_PENDING, &ns->flags); if (nvme_state_is_live(ns->ana_state)) nvme_mpath_set_live(ns); - mutex_unlock(&ns->head->lock); } static int nvme_update_ana_state(struct nvme_ctrl *ctrl, @@ -640,38 +638,45 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, } DEVICE_ATTR_RO(ana_state); -static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl, +static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *desc, void *data) { - struct nvme_ns *ns = data; + struct nvme_ana_group_desc *dst = data; - if (ns->ana_grpid == le32_to_cpu(desc->grpid)) { - nvme_update_ns_ana_state(desc, ns); - return -ENXIO; /* just break out of the loop */ - } + if (desc->grpid != dst->grpid) + return 0; - return 0; + *dst = *desc; + return -ENXIO; /* just break out of the loop */ } void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) { if (nvme_ctrl_use_ana(ns->ctrl)) { + struct nvme_ana_group_desc desc = { + .grpid = id->anagrpid, + .state = 0, + }; + mutex_lock(&ns->ctrl->ana_lock); ns->ana_grpid = le32_to_cpu(id->anagrpid); - nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state); + nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); mutex_unlock(&ns->ctrl->ana_lock); + if (desc.state) { + /* found the group desc: update */ + nvme_update_ns_ana_state(&desc, ns); + } } else { - mutex_lock(&ns->head->lock); ns->ana_state = NVME_ANA_OPTIMIZED; nvme_mpath_set_live(ns); - mutex_unlock(&ns->head->lock); } if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) { - struct backing_dev_info *info = - ns->head->disk->queue->backing_dev_info; + struct gendisk *disk = ns->head->disk; - info->capabilities |= BDI_CAP_STABLE_WRITES; + if (disk) + disk->queue->backing_dev_info->capabilities |= + BDI_CAP_STABLE_WRITES; } } @@ -686,6 +691,14 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); blk_cleanup_queue(head->disk->queue); + if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + /* + * if device_add_disk wasn't called, prevent + * disk release to put a bogus reference on the + * request queue + */ + head->disk->queue = NULL; + } put_disk(head->disk); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index c0f4226d3299..1de3f9b827aa 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -364,6 +364,8 @@ struct nvme_ns_head { spinlock_t requeue_lock; struct work_struct requeue_work; struct mutex lock; + unsigned long flags; +#define NVME_NSHEAD_DISK_LIVE 0 struct nvme_ns __rcu *current_path[]; #endif }; @@ -602,6 +604,16 @@ static inline void nvme_trace_bio_complete(struct request *req, trace_block_bio_complete(ns->head->disk->queue, req->bio); } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ + struct block_device *bdev = bdget_disk(disk, 0); + + if (bdev) { + bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT); + bdput(bdev); + } +} + extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; extern struct device_attribute subsys_attr_iopolicy; @@ -677,6 +689,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) { } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ +} #endif /* CONFIG_NVME_MULTIPATH */ #ifdef CONFIG_NVM diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index e2bacd369a88..b1d18f0633c7 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1593,7 +1593,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; dev->admin_tagset.timeout = ADMIN_TIMEOUT; - dev->admin_tagset.numa_node = dev_to_node(dev->dev); + dev->admin_tagset.numa_node = dev->ctrl.numa_node; dev->admin_tagset.cmd_size = sizeof(struct nvme_iod); dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; dev->admin_tagset.driver_data = dev; @@ -1669,6 +1669,8 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) if (result) return result; + dev->ctrl.numa_node = dev_to_node(dev->dev); + nvmeq = &dev->queues[0]; aqa = nvmeq->q_depth - 1; aqa |= aqa << 16; @@ -2257,7 +2259,7 @@ static void nvme_dev_add(struct nvme_dev *dev) if (dev->io_queues[HCTX_TYPE_POLL]) dev->tagset.nr_maps++; dev->tagset.timeout = NVME_IO_TIMEOUT; - dev->tagset.numa_node = dev_to_node(dev->dev); + dev->tagset.numa_node = dev->ctrl.numa_node; dev->tagset.queue_depth = min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; dev->tagset.cmd_size = sizeof(struct nvme_iod); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f8f856dc0c67..13506a87a444 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -470,7 +470,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) * Spread I/O queues completion vectors according their queue index. * Admin queues can always go on completion vector 0. */ - comp_vector = idx == 0 ? idx : idx - 1; + comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; /* Polling queues need direct cq polling context */ if (nvme_rdma_poll_queue(queue)) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 3345ec7efaff..79ef2b8e2b3c 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1532,7 +1532,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, set->ops = &nvme_tcp_admin_mq_ops; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; set->reserved_tags = 2; /* connect + keep-alive */ - set->numa_node = NUMA_NO_NODE; + set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_BLOCKING; set->cmd_size = sizeof(struct nvme_tcp_request); set->driver_data = ctrl; @@ -1544,7 +1544,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, set->ops = &nvme_tcp_mq_ops; set->queue_depth = nctrl->sqsize + 1; set->reserved_tags = 1; /* fabric connect */ - set->numa_node = NUMA_NO_NODE; + set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; set->cmd_size = sizeof(struct nvme_tcp_request); set->driver_data = ctrl; diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 0d54e730cbf2..6344e73c9354 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -340,7 +340,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops; ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ - ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; + ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); ctrl->admin_tag_set.driver_data = ctrl; @@ -512,7 +512,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) ctrl->tag_set.ops = &nvme_loop_mq_ops; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; ctrl->tag_set.reserved_tags = 1; /* fabric connect */ - ctrl->tag_set.numa_node = NUMA_NO_NODE; + ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index a04afe79529c..ef6f818ce5b3 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -314,10 +314,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) child, addr); if (of_mdiobus_child_is_phy(child)) { + /* -ENODEV is the return code that PHYLIB has + * standardized on to indicate that bus + * scanning should continue. + */ rc = of_mdiobus_register_phy(mdio, child, addr); - if (rc && rc != -ENODEV) + if (!rc) + break; + if (rc != -ENODEV) goto unregister; - break; } } } diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 9a5873591a40..314f306140a1 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -902,6 +902,10 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) return -EINVAL; } + mutex_lock(&opp_table->lock); + opp_table->parsed_static_opps = 1; + mutex_unlock(&opp_table->lock); + val = prop->value; while (nr) { unsigned long freq = be32_to_cpup(val++) * 1000; diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index e386d4eac407..9a64cf90c291 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -546,9 +546,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, x86_vector_domain); - irq_domain_free_fwnode(fn); - if (!vmd->irq_domain) + if (!vmd->irq_domain) { + irq_domain_free_fwnode(fn); return -ENODEV; + } pci_add_resource(&resources, &vmd->resources[0]); pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 1b8e337a29ca..87c4be9dd412 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -1718,6 +1718,7 @@ static struct platform_driver cci_pmu_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = arm_cci_pmu_matches, + .suppress_bind_attrs = true, }, .probe = cci_pmu_probe, .remove = cci_pmu_remove, diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index d50edef91f59..7b7d23f25713 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1545,6 +1545,7 @@ static struct platform_driver arm_ccn_driver = { .driver = { .name = "arm-ccn", .of_match_table = arm_ccn_match, + .suppress_bind_attrs = true, }, .probe = arm_ccn_probe, .remove = arm_ccn_remove, diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 518d0603e24f..96ed93cc78e6 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -757,6 +757,7 @@ static struct platform_driver dsu_pmu_driver = { .driver = { .name = DRVNAME, .of_match_table = of_match_ptr(dsu_pmu_of_match), + .suppress_bind_attrs = true, }, .probe = dsu_pmu_device_probe, .remove = dsu_pmu_device_remove, diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 48e28ef93a70..4cdb35d166ac 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -742,6 +742,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, smmu_pmu); smmu_pmu->pmu = (struct pmu) { + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .pmu_enable = smmu_pmu_enable, .pmu_disable = smmu_pmu_disable, @@ -859,6 +860,7 @@ static void smmu_pmu_shutdown(struct platform_device *pdev) static struct platform_driver smmu_pmu_driver = { .driver = { .name = "arm-smmu-v3-pmcg", + .suppress_bind_attrs = true, }, .probe = smmu_pmu_probe, .remove = smmu_pmu_remove, diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index d80f48798bce..e51ddb6d63ed 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -1226,6 +1226,7 @@ static struct platform_driver arm_spe_pmu_driver = { .driver = { .name = DRVNAME, .of_match_table = of_match_ptr(arm_spe_pmu_of_match), + .suppress_bind_attrs = true, }, .probe = arm_spe_pmu_device_probe, .remove = arm_spe_pmu_device_remove, diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 90884d14f95f..397540a4b799 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -512,6 +512,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, { *pmu = (struct ddr_pmu) { .pmu = (struct pmu) { + .module = THIS_MODULE, .capabilities = PERF_PMU_CAP_NO_EXCLUDE, .task_ctx_nr = perf_invalid_context, .attr_groups = attr_groups, @@ -706,6 +707,7 @@ static struct platform_driver imx_ddr_pmu_driver = { .driver = { .name = "imx-ddr-pmu", .of_match_table = imx_ddr_pmu_dt_ids, + .suppress_bind_attrs = true, }, .probe = ddr_perf_probe, .remove = ddr_perf_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 15713faaa07e..5e3645c96443 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -378,6 +378,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) ddrc_pmu->sccl_id, ddrc_pmu->index_id); ddrc_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -418,6 +419,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = { .driver = { .name = "hisi_ddrc_pmu", .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_ddrc_pmu_probe, .remove = hisi_ddrc_pmu_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index dcc5600788a9..5eb8168029c0 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -390,6 +390,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) hha_pmu->sccl_id, hha_pmu->index_id); hha_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -430,6 +431,7 @@ static struct platform_driver hisi_hha_pmu_driver = { .driver = { .name = "hisi_hha_pmu", .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_hha_pmu_probe, .remove = hisi_hha_pmu_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 7719ae4e2c56..3e8b5eab5514 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -380,6 +380,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) l3c_pmu->sccl_id, l3c_pmu->index_id); l3c_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -420,6 +421,7 @@ static struct platform_driver hisi_l3c_pmu_driver = { .driver = { .name = "hisi_l3c_pmu", .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_l3c_pmu_probe, .remove = hisi_l3c_pmu_remove, diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 21d6991dbe0b..4da37f650f98 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -1028,6 +1028,7 @@ static struct platform_driver l2_cache_pmu_driver = { .driver = { .name = "qcom-l2cache-pmu", .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = l2_cache_pmu_probe, .remove = l2_cache_pmu_remove, diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c index 656e830798d9..9ddb577c542b 100644 --- a/drivers/perf/qcom_l3_pmu.c +++ b/drivers/perf/qcom_l3_pmu.c @@ -814,6 +814,7 @@ static struct platform_driver qcom_l3_cache_pmu_driver = { .driver = { .name = "qcom-l3cache-pmu", .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = qcom_l3_cache_pmu_probe, }; diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c index 51b31d6ff2c4..aac9823b0c6b 100644 --- a/drivers/perf/thunderx2_pmu.c +++ b/drivers/perf/thunderx2_pmu.c @@ -1017,6 +1017,7 @@ static struct platform_driver tx2_uncore_driver = { .driver = { .name = "tx2-uncore-pmu", .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match), + .suppress_bind_attrs = true, }, .probe = tx2_uncore_probe, .remove = tx2_uncore_remove, diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 46ee6807d533..edac28cd25dd 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -1975,6 +1975,7 @@ static struct platform_driver xgene_pmu_driver = { .name = "xgene-pmu", .of_match_table = xgene_pmu_of_match, .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match), + .suppress_bind_attrs = true, }, }; diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 856927382248..e5842e48a5e0 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -545,13 +545,14 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) struct sun4i_usb_phy_data *data = container_of(work, struct sun4i_usb_phy_data, detect.work); struct phy *phy0 = data->phys[0].phy; - struct sun4i_usb_phy *phy = phy_get_drvdata(phy0); + struct sun4i_usb_phy *phy; bool force_session_end, id_notify = false, vbus_notify = false; int id_det, vbus_det; - if (phy0 == NULL) + if (!phy0) return; + phy = phy_get_drvdata(phy0); id_det = sun4i_usb_phy0_get_id_det(data); vbus_det = sun4i_usb_phy0_get_vbus_det(data); diff --git a/drivers/phy/intel/phy-intel-combo.c b/drivers/phy/intel/phy-intel-combo.c index c2a35be4cdfb..360b1eb2ebd6 100644 --- a/drivers/phy/intel/phy-intel-combo.c +++ b/drivers/phy/intel/phy-intel-combo.c @@ -134,7 +134,7 @@ static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg, reg_val = readl(base + reg); reg_val &= ~mask; - reg_val |= FIELD_PREP(mask, val); + reg_val |= val; writel(reg_val, base + reg); } @@ -169,7 +169,7 @@ static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy) return 0; combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL, - PCIE_PHY_CLK_PAD, 0); + PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0)); /* Delay for stable clock PLL */ usleep_range(50, 100); @@ -192,14 +192,14 @@ static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy) return 0; combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL, - PCIE_PHY_CLK_PAD, 1); + PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1)); return 0; } static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy) { - enum intel_combo_mode cb_mode = PHY_PCIE_MODE; + enum intel_combo_mode cb_mode; enum aggregated_mode aggr = cbphy->aggr_mode; struct device *dev = cbphy->dev; enum intel_phy_mode mode; @@ -224,6 +224,8 @@ static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy) cb_mode = SATA0_SATA1_MODE; break; + default: + return -EINVAL; } ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode); @@ -385,7 +387,7 @@ static int intel_cbphy_calibrate(struct phy *phy) /* trigger auto RX adaptation */ combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id), - ADAPT_REQ_MSK, 3); + ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3)); /* Wait RX adaptation to finish */ ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id), val, val & RX_ADAPT_ACK_BIT, 10, 5000); @@ -396,7 +398,7 @@ static int intel_cbphy_calibrate(struct phy *phy) /* Stop RX adaptation */ combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id), - ADAPT_REQ_MSK, 0); + ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0)); return ret; } diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c index a7c6c940a3a8..8af8c6c5cc02 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c @@ -607,8 +607,8 @@ static int inno_dsidphy_probe(struct platform_device *pdev) platform_set_drvdata(pdev, inno); inno->phy_base = devm_platform_ioremap_resource(pdev, 0); - if (!inno->phy_base) - return -ENOMEM; + if (IS_ERR(inno->phy_base)) + return PTR_ERR(inno->phy_base); inno->ref_clk = devm_clk_get(dev, "ref"); if (IS_ERR(inno->ref_clk)) { diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index 0a166d5a6414..a174b3c3f010 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -72,7 +72,7 @@ struct serdes_am654_clk_mux { #define to_serdes_am654_clk_mux(_hw) \ container_of(_hw, struct serdes_am654_clk_mux, hw) -static struct regmap_config serdes_am654_regmap_config = { +static const struct regmap_config serdes_am654_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index 30ea5b207285..33c4cf0105a4 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -117,7 +117,7 @@ struct wiz_clk_mux { struct wiz_clk_divider { struct clk_hw hw; struct regmap_field *field; - struct clk_div_table *table; + const struct clk_div_table *table; struct clk_init_data clk_data; }; @@ -131,7 +131,7 @@ struct wiz_clk_mux_sel { struct wiz_clk_div_sel { struct regmap_field *field; - struct clk_div_table *table; + const struct clk_div_table *table; const char *node_name; }; @@ -173,7 +173,7 @@ static struct wiz_clk_mux_sel clk_mux_sel_10g[] = { }, }; -static struct clk_div_table clk_div_table[] = { +static const struct clk_div_table clk_div_table[] = { { .val = 0, .div = 1, }, { .val = 1, .div = 2, }, { .val = 2, .div = 4, }, @@ -559,7 +559,7 @@ static const struct clk_ops wiz_clk_div_ops = { static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node, struct regmap_field *field, - struct clk_div_table *table) + const struct clk_div_table *table) { struct device *dev = wiz->dev; struct wiz_clk_divider *div; @@ -756,7 +756,7 @@ static const struct reset_control_ops wiz_phy_reset_ops = { .deassert = wiz_phy_reset_deassert, }; -static struct regmap_config wiz_regmap_config = { +static const struct regmap_config wiz_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 0ff7c55173da..615174a9d1e0 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -800,6 +800,21 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev, pm_runtime_put(vg->dev); } +static void byt_gpio_direct_irq_check(struct intel_pinctrl *vg, + unsigned int offset) +{ + void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); + + /* + * Before making any direction modifications, do a check if gpio is set + * for direct IRQ. On Bay Trail, setting GPIO to output does not make + * sense, so let's at least inform the caller before they shoot + * themselves in the foot. + */ + if (readl(conf_reg) & BYT_DIRECT_IRQ_EN) + dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output"); +} + static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, struct pinctrl_gpio_range *range, unsigned int offset, @@ -807,7 +822,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, { struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev); void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); - void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); unsigned long flags; u32 value; @@ -817,14 +831,8 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, value &= ~BYT_DIR_MASK; if (input) value |= BYT_OUTPUT_EN; - else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN) - /* - * Before making any direction modifications, do a check if gpio - * is set for direct IRQ. On baytrail, setting GPIO to output - * does not make sense, so let's at least inform the caller before - * they shoot themselves in the foot. - */ - dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output"); + else + byt_gpio_direct_irq_check(vg, offset); writel(value, val_reg); @@ -1165,19 +1173,50 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { - return pinctrl_gpio_direction_input(chip->base + offset); + struct intel_pinctrl *vg = gpiochip_get_data(chip); + void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); + unsigned long flags; + u32 reg; + + raw_spin_lock_irqsave(&byt_lock, flags); + + reg = readl(val_reg); + reg &= ~BYT_DIR_MASK; + reg |= BYT_OUTPUT_EN; + writel(reg, val_reg); + + raw_spin_unlock_irqrestore(&byt_lock, flags); + return 0; } +/* + * Note despite the temptation this MUST NOT be converted into a call to + * pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this + * MUST be done as a single BYT_VAL_REG register write. + * See the commit message of the commit adding this comment for details. + */ static int byt_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { - int ret = pinctrl_gpio_direction_output(chip->base + offset); + struct intel_pinctrl *vg = gpiochip_get_data(chip); + void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); + unsigned long flags; + u32 reg; - if (ret) - return ret; + raw_spin_lock_irqsave(&byt_lock, flags); + + byt_gpio_direct_irq_check(vg, offset); - byt_gpio_set(chip, offset, value); + reg = readl(val_reg); + reg &= ~BYT_DIR_MASK; + if (value) + reg |= BYT_LEVEL; + else + reg &= ~BYT_LEVEL; + writel(reg, val_reg); + + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h index 3e5760f1a715..d4a192df5fab 100644 --- a/drivers/pinctrl/pinctrl-amd.h +++ b/drivers/pinctrl/pinctrl-amd.h @@ -252,7 +252,7 @@ static const struct amd_pingroup kerncz_groups[] = { { .name = "uart0", .pins = uart0_pins, - .npins = 9, + .npins = 5, }, { .name = "uart1", diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 877aade19497..8f4acdc06b13 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -441,6 +441,7 @@ static int asus_wmi_battery_add(struct power_supply *battery) * battery is named BATT. */ if (strcmp(battery->desc->name, "BAT0") != 0 && + strcmp(battery->desc->name, "BAT1") != 0 && strcmp(battery->desc->name, "BATT") != 0) return -ENODEV; diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h index 1409a5bb5582..4f6f7f0761fc 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h @@ -13,6 +13,9 @@ #define INTEL_RAPL_PRIO_DEVID_0 0x3451 #define INTEL_CFG_MBOX_DEVID_0 0x3459 +#define INTEL_RAPL_PRIO_DEVID_1 0x3251 +#define INTEL_CFG_MBOX_DEVID_1 0x3259 + /* * Validate maximum commands in a single request. * This is enough to handle command to every core in one ioctl, or all diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c index d84e2174cbde..95f01e7a87d5 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c @@ -147,6 +147,7 @@ static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume) static const struct pci_device_id isst_if_mbox_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_0)}, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_1)}, { 0 }, }; MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids); diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c index 3584859fcc42..aa17fd7817f8 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c @@ -72,6 +72,7 @@ static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume) static const struct pci_device_id isst_if_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_0)}, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_1)}, { 0 }, }; MODULE_DEVICE_TABLE(pci, isst_if_ids); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index ff7f0a4f2475..0f6fceda5fc0 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -885,11 +885,19 @@ static ssize_t dispatch_proc_write(struct file *file, if (!ibm || !ibm->write) return -EINVAL; + if (count > PAGE_SIZE - 1) + return -EINVAL; + + kernbuf = kmalloc(count + 1, GFP_KERNEL); + if (!kernbuf) + return -ENOMEM; - kernbuf = strndup_user(userbuf, PAGE_SIZE); - if (IS_ERR(kernbuf)) - return PTR_ERR(kernbuf); + if (copy_from_user(kernbuf, userbuf, count)) { + kfree(kernbuf); + return -EFAULT; + } + kernbuf[count] = 0; ret = ibm->write(kernbuf); if (ret == 0) ret = count; diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 8f677f5d79b4..edb1c4f8b496 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -684,7 +684,7 @@ config REGULATOR_MT6323 config REGULATOR_MT6358 tristate "MediaTek MT6358 PMIC" - depends on MFD_MT6397 && BROKEN + depends on MFD_MT6397 help Say y here to select this option to enable the power regulator of MediaTek MT6358 PMIC. diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index e8f163371071..0796e4a47afa 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -31,7 +31,7 @@ obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o -obj-$(CONFIG_REGULATOR_DA903X) += da903x.o +obj-$(CONFIG_REGULATOR_DA903X) += da903x-regulator.o obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o obj-$(CONFIG_REGULATOR_DA9062) += da9062-regulator.o diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x-regulator.c index 770e694824ac..770e694824ac 100644 --- a/drivers/regulator/da903x.c +++ b/drivers/regulator/da903x-regulator.c diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c index e1d6c8f6d40b..fe65b5acaf28 100644 --- a/drivers/regulator/da9063-regulator.c +++ b/drivers/regulator/da9063-regulator.c @@ -512,7 +512,6 @@ static const struct da9063_regulator_info da9063_regulator_info[] = { }, { DA9063_LDO(DA9063, LDO9, 950, 50, 3600), - .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL), }, { DA9063_LDO(DA9063, LDO11, 900, 50, 3600), diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index e970e9d2f8be..e4bb09bbd3fa 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -486,7 +486,7 @@ int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev, continue; } - ret = selector + sel; + ret = selector + sel - range->min_sel; voltage = rdev->desc->ops->list_voltage(rdev, ret); diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 689537927f6f..4c8e8b472287 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -209,6 +209,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = { }; +static const struct regulator_ops pfuze3000_sw_regulator_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = pfuze100_set_ramp_delay, + +}; + #define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \ [_chip ## _ ## _name] = { \ .desc = { \ @@ -318,23 +331,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = { .stby_mask = 0x20, \ } - -#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \ - .desc = { \ - .name = #_name,\ - .n_voltages = ((max) - (min)) / (step) + 1, \ - .ops = &pfuze100_sw_regulator_ops, \ - .type = REGULATOR_VOLTAGE, \ - .id = _chip ## _ ## _name, \ - .owner = THIS_MODULE, \ - .min_uV = (min), \ - .uV_step = (step), \ - .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ - .vsel_mask = 0x7, \ - }, \ - .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ - .stby_mask = 0x7, \ -} +/* No linar case for the some switches of PFUZE3000 */ +#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \ + [_chip ## _ ## _name] = { \ + .desc = { \ + .name = #_name, \ + .n_voltages = ARRAY_SIZE(voltages), \ + .ops = &pfuze3000_sw_regulator_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = _chip ## _ ## _name, \ + .owner = THIS_MODULE, \ + .volt_table = voltages, \ + .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ + .vsel_mask = (mask), \ + .enable_reg = (base) + PFUZE100_MODE_OFFSET, \ + .enable_mask = 0xf, \ + .enable_val = 0x8, \ + .enable_time = 500, \ + }, \ + .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ + .stby_mask = (mask), \ + .sw_reg = true, \ + } #define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \ .desc = { \ @@ -391,9 +409,9 @@ static struct pfuze_regulator pfuze200_regulators[] = { }; static struct pfuze_regulator pfuze3000_regulators[] = { - PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), + PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000), - PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), + PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000), PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst), PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), @@ -407,8 +425,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = { }; static struct pfuze_regulator pfuze3001_regulators[] = { - PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), - PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), + PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), + PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000), PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000), diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 53a64d856926..7f5c318c8259 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -821,7 +821,7 @@ static const struct rpm_regulator_data rpm_pm8994_regulators[] = { static const struct rpm_regulator_data rpm_pmi8994_regulators[] = { { "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" }, { "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" }, - { "s2", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" }, + { "s3", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" }, { "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" }, {} }; diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c index a646fc81c872..13b26a1c7988 100644 --- a/drivers/s390/cio/vfio_ccw_chp.c +++ b/drivers/s390/cio/vfio_ccw_chp.c @@ -8,6 +8,7 @@ * Eric Farman <farman@linux.ibm.com> */ +#include <linux/slab.h> #include <linux/vfio.h> #include "vfio_ccw_private.h" diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 18a0fb75a710..88e998de2d03 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4544,9 +4544,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); - if (cmd->hdr.return_code) - return -EIO; - qeth_setadpparms_inspect_rc(cmd); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; QETH_CARD_TEXT_(card, 2, "rc=%d", @@ -4556,7 +4553,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", access_ctrl_req->subcmd_code, CARD_DEVID(card), cmd->data.setadapterparms.hdr.return_code); - switch (cmd->data.setadapterparms.hdr.return_code) { + switch (qeth_setadpparms_inspect_rc(cmd)) { case SET_ACCESS_CTRL_RC_SUCCESS: if (card->options.isolation == ISOLATION_MODE_NONE) { dev_info(&card->gdev->dev, @@ -6840,9 +6837,11 @@ netdev_features_t qeth_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { + struct qeth_card *card = dev->ml_priv; + /* Traffic with local next-hop is not eligible for some offloads: */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - struct qeth_card *card = dev->ml_priv; + if (skb->ip_summed == CHECKSUM_PARTIAL && + card->options.isolation != ISOLATION_MODE_FWD) { netdev_features_t restricted = 0; if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index db320dab1fee..79f6e8fb03ca 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -577,7 +577,10 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) ZFCP_STATUS_ERP_TIMEDOUT)) { req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; zfcp_dbf_rec_run("erscf_1", act); - req->erp_action = NULL; + /* lock-free concurrent access with + * zfcp_erp_timeout_handler() + */ + WRITE_ONCE(req->erp_action, NULL); } if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) zfcp_dbf_rec_run("erscf_2", act); @@ -613,8 +616,14 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask) void zfcp_erp_timeout_handler(struct timer_list *t) { struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); - struct zfcp_erp_action *act = fsf_req->erp_action; + struct zfcp_erp_action *act; + if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) + return; + /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */ + act = READ_ONCE(fsf_req->erp_action); + if (!act) + return; zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT); } diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 773c45af9387..278d15ff1c5a 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -133,8 +133,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) lockdep_assert_held(&lport->disc.disc_mutex); rdata = fc_rport_lookup(lport, port_id); - if (rdata) + if (rdata) { + kref_put(&rdata->kref, fc_rport_destroy); return rdata; + } if (lport->rport_priv_size > 0) rport_priv_size = lport->rport_priv_size; @@ -481,10 +483,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, fc_rport_state_enter(rdata, RPORT_ST_DELETE); - kref_get(&rdata->kref); - if (rdata->event == RPORT_EV_NONE && - !queue_work(rport_event_queue, &rdata->event_work)) - kref_put(&rdata->kref, fc_rport_destroy); + if (rdata->event == RPORT_EV_NONE) { + kref_get(&rdata->kref); + if (!queue_work(rport_event_queue, &rdata->event_work)) + kref_put(&rdata->kref, fc_rport_destroy); + } rdata->event = event; } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index e5a64d4f255c..49c8a1818baf 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -2629,7 +2629,7 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, "iscsi_q_%d", shost->host_no); ihost->workq = alloc_workqueue("%s", WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, - 2, ihost->workq_name); + 1, ihost->workq_name); if (!ihost->workq) goto free_host; } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 69a5249e007a..6637f84a3d1b 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -11878,7 +11878,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) lpfc_sli4_xri_exchange_busy_wait(phba); /* per-phba callback de-registration for hotplug event */ - lpfc_cpuhp_remove(phba); + if (phba->pport) + lpfc_cpuhp_remove(phba); /* Disable PCI subsystem interrupt */ lpfc_sli4_disable_intr(phba); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 319f241da4b6..fcf03f733e41 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3739,10 +3739,8 @@ static irqreturn_t megasas_isr_fusion(int irq, void *devp) if (instance->mask_interrupts) return IRQ_NONE; -#if defined(ENABLE_IRQ_POLL) if (irq_context->irq_poll_scheduled) return IRQ_HANDLED; -#endif if (!instance->msix_vectors) { mfiStatus = instance->instancet->clear_intr(instance); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 62e552838565..983e568ff231 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -3145,19 +3145,18 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, if (!ioc->is_warpdrive) { ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n", __func__); - goto out; + return 0; } /* pci_access_mutex lock acquired by sysfs show path */ mutex_lock(&ioc->pci_access_mutex); - if (ioc->pci_error_recovery || ioc->remove_host) { - mutex_unlock(&ioc->pci_access_mutex); - return 0; - } + if (ioc->pci_error_recovery || ioc->remove_host) + goto out; /* allocate upto GPIOVal 36 entries */ sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); io_unit_pg3 = kzalloc(sz, GFP_KERNEL); if (!io_unit_pg3) { + rc = -ENOMEM; ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n", __func__, sz); goto out; @@ -3167,6 +3166,7 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, 0) { ioc_err(ioc, "%s: failed reading iounit_pg3\n", __func__); + rc = -EINVAL; goto out; } @@ -3174,12 +3174,14 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n", __func__, ioc_status); + rc = -EINVAL; goto out; } if (io_unit_pg3->GPIOCount < 25) { ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n", __func__, io_unit_pg3->GPIOCount); + rc = -EINVAL; goto out; } diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 42c3ad27f1cb..df670fba2ab8 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3496,7 +3496,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) qla2x00_clear_loop_id(fcport); fcport->flags |= FCF_FABRIC_DEVICE; } else if (fcport->d_id.b24 != rp->id.b24 || - fcport->scan_needed) { + (fcport->scan_needed && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_NVME_INITIATOR)) { qlt_schedule_sess_for_deletion(fcport); } fcport->d_id.b24 = rp->id.b24; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 4576d3ae9937..2436a17f5cd9 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -5944,7 +5944,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) break; } - if (NVME_TARGET(vha->hw, fcport)) { + if (found && NVME_TARGET(vha->hw, fcport)) { if (fcport->disc_state == DSC_DELETE_PEND) { qla2x00_set_fcport_disc_state(fcport, DSC_GNL); vha->fcport_count--; diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index d66d47a0f958..fa695a4007f8 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -139,11 +139,12 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) sp->priv = NULL; if (priv->comp_status == QLA_SUCCESS) { fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len); + fd->status = NVME_SC_SUCCESS; } else { fd->rcv_rsplen = 0; fd->transferred_length = 0; + fd->status = NVME_SC_INTERNAL; } - fd->status = 0; spin_unlock_irqrestore(&priv->cmd_lock, flags); fd->done(fd); diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index eed31021e788..ba84244c1b4f 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -239,6 +239,7 @@ static struct { {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES | BLIST_INQUIRY_36}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 42f0550d6b11..6f41e4b5a2b8 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -63,6 +63,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"LSI", "INF-01-00", "rdac", }, {"ENGENIO", "INF-01-00", "rdac", }, {"LENOVO", "DE_Series", "rdac", }, + {"FUJITSU", "ETERNUS_AHB", "rdac", }, {NULL, NULL, NULL }, }; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index f4cc08eb47ba..7ae5024e7824 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -4760,7 +4760,7 @@ static __init int iscsi_transport_init(void) iscsi_eh_timer_workq = alloc_workqueue("%s", WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, - 2, "iscsi_eh"); + 1, "iscsi_eh"); if (!iscsi_eh_timer_workq) { err = -ENOMEM; goto release_nls; diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index f8661062ef95..f3d5b1bbd5aa 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -339,7 +339,7 @@ store_spi_transport_##field(struct device *dev, \ struct spi_transport_attrs *tp \ = (struct spi_transport_attrs *)&starget->starget_data; \ \ - if (i->f->set_##field) \ + if (!i->f->set_##field) \ return -EINVAL; \ val = simple_strtoul(buf, NULL, 0); \ if (val > tp->max_##field) \ diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c index 01fc0d20a70d..6f54bd832c8b 100644 --- a/drivers/soc/amlogic/meson-gx-socinfo.c +++ b/drivers/soc/amlogic/meson-gx-socinfo.c @@ -66,10 +66,12 @@ static const struct meson_gx_package_id { { "A113D", 0x25, 0x22, 0xff }, { "S905D2", 0x28, 0x10, 0xf0 }, { "S905X2", 0x28, 0x40, 0xf0 }, - { "S922X", 0x29, 0x40, 0xf0 }, { "A311D", 0x29, 0x10, 0xf0 }, - { "S905X3", 0x2b, 0x5, 0xf }, - { "S905D3", 0x2b, 0xb0, 0xf0 }, + { "S922X", 0x29, 0x40, 0xf0 }, + { "S905D3", 0x2b, 0x4, 0xf5 }, + { "S905X3", 0x2b, 0x5, 0xf5 }, + { "S905X3", 0x2b, 0x10, 0x3f }, + { "S905D3", 0x2b, 0x30, 0x3f }, { "A113L", 0x2c, 0x0, 0xf8 }, }; diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c index fec3d672b606..01bfea1cb64a 100644 --- a/drivers/soc/imx/soc-imx.c +++ b/drivers/soc/imx/soc-imx.c @@ -33,6 +33,9 @@ static int __init imx_soc_device_init(void) u32 val; int ret; + if (of_machine_is_compatible("fsl,ls1021a")) + return 0; + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return -ENOMEM; diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c index 7b0759adb47d..cc57a384d74d 100644 --- a/drivers/soc/imx/soc-imx8m.c +++ b/drivers/soc/imx/soc-imx8m.c @@ -22,6 +22,8 @@ #define OCOTP_UID_LOW 0x410 #define OCOTP_UID_HIGH 0x420 +#define IMX8MP_OCOTP_UID_OFFSET 0x10 + /* Same as ANADIG_DIGPROG_IMX7D */ #define ANADIG_DIGPROG_IMX8MM 0x800 @@ -87,6 +89,8 @@ static void __init imx8mm_soc_uid(void) { void __iomem *ocotp_base; struct device_node *np; + u32 offset = of_machine_is_compatible("fsl,imx8mp") ? + IMX8MP_OCOTP_UID_OFFSET : 0; np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp"); if (!np) @@ -95,9 +99,9 @@ static void __init imx8mm_soc_uid(void) ocotp_base = of_iomap(np, 0); WARN_ON(!ocotp_base); - soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); + soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset); soc_uid <<= 32; - soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); + soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset); iounmap(ocotp_base); of_node_put(np); @@ -146,7 +150,7 @@ static const struct imx8_soc_data imx8mp_soc_data = { .soc_revision = imx8mm_soc_revision, }; -static const struct of_device_id imx8_soc_match[] = { +static __maybe_unused const struct of_device_id imx8_soc_match[] = { { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, }, { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, }, { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, }, diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c index 96c6f777519c..c9b3f9ebf0bb 100644 --- a/drivers/soc/ti/omap_prm.c +++ b/drivers/soc/ti/omap_prm.c @@ -256,10 +256,10 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev, goto exit; /* wait for the status to be set */ - ret = readl_relaxed_poll_timeout(reset->prm->base + - reset->prm->data->rstst, - v, v & BIT(st_bit), 1, - OMAP_RESET_MAX_WAIT); + ret = readl_relaxed_poll_timeout_atomic(reset->prm->base + + reset->prm->data->rstst, + v, v & BIT(st_bit), 1, + OMAP_RESET_MAX_WAIT); if (ret) pr_err("%s: timedout waiting for %s:%lu\n", __func__, reset->prm->data->name, id); diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 4cfdd074e310..c7422740edd4 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -930,8 +930,9 @@ static int intel_create_dai(struct sdw_cdns *cdns, /* TODO: Read supported rates/formats from hardware */ for (i = off; i < (off + num); i++) { - dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d", - cdns->instance, i); + dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL, + "SDW%d Pin%d", + cdns->instance, i); if (!dais[i].name) return -ENOMEM; diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index a35faced0456..91c6affe139c 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -588,14 +588,14 @@ static void dspi_release_dma(struct fsl_dspi *dspi) return; if (dma->chan_tx) { - dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys, - dma_bufsize, DMA_TO_DEVICE); + dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize, + dma->tx_dma_buf, dma->tx_dma_phys); dma_release_channel(dma->chan_tx); } if (dma->chan_rx) { - dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys, - dma_bufsize, DMA_FROM_DEVICE); + dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize, + dma->rx_dma_buf, dma->rx_dma_phys); dma_release_channel(dma->chan_rx); } } @@ -1109,6 +1109,8 @@ static int dspi_suspend(struct device *dev) struct spi_controller *ctlr = dev_get_drvdata(dev); struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); + if (dspi->irq) + disable_irq(dspi->irq); spi_controller_suspend(ctlr); clk_disable_unprepare(dspi->clk); @@ -1129,6 +1131,8 @@ static int dspi_resume(struct device *dev) if (ret) return ret; spi_controller_resume(ctlr); + if (dspi->irq) + enable_irq(dspi->irq); return 0; } @@ -1385,22 +1389,22 @@ static int dspi_probe(struct platform_device *pdev) goto poll_mode; } - ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, - IRQF_SHARED, pdev->name, dspi); + init_completion(&dspi->xfer_done); + + ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL, + IRQF_SHARED, pdev->name, dspi); if (ret < 0) { dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); goto out_clk_put; } - init_completion(&dspi->xfer_done); - poll_mode: if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { ret = dspi_request_dma(dspi, res->start); if (ret < 0) { dev_err(&pdev->dev, "can't get dma channels\n"); - goto out_clk_put; + goto out_free_irq; } } @@ -1415,11 +1419,14 @@ poll_mode: ret = spi_register_controller(ctlr); if (ret != 0) { dev_err(&pdev->dev, "Problem registering DSPI ctlr\n"); - goto out_clk_put; + goto out_free_irq; } return ret; +out_free_irq: + if (dspi->irq) + free_irq(dspi->irq, dspi); out_clk_put: clk_disable_unprepare(dspi->clk); out_ctlr_put: @@ -1434,18 +1441,8 @@ static int dspi_remove(struct platform_device *pdev) struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); /* Disconnect from the SPI framework */ - dspi_release_dma(dspi); - clk_disable_unprepare(dspi->clk); spi_unregister_controller(dspi->ctlr); - return 0; -} - -static void dspi_shutdown(struct platform_device *pdev) -{ - struct spi_controller *ctlr = platform_get_drvdata(pdev); - struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); - /* Disable RX and TX */ regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF, @@ -1455,8 +1452,16 @@ static void dspi_shutdown(struct platform_device *pdev) regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT); dspi_release_dma(dspi); + if (dspi->irq) + free_irq(dspi->irq, dspi); clk_disable_unprepare(dspi->clk); - spi_unregister_controller(dspi->ctlr); + + return 0; +} + +static void dspi_shutdown(struct platform_device *pdev) +{ + dspi_remove(pdev); } static struct platform_driver fsl_dspi_driver = { diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 6783e12c40c2..a556795caeef 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -36,7 +36,6 @@ #define SPI_CFG0_SCK_LOW_OFFSET 8 #define SPI_CFG0_CS_HOLD_OFFSET 16 #define SPI_CFG0_CS_SETUP_OFFSET 24 -#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 @@ -48,6 +47,8 @@ #define SPI_CFG1_CS_IDLE_MASK 0xff #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 +#define SPI_CFG2_SCK_HIGH_OFFSET 0 +#define SPI_CFG2_SCK_LOW_OFFSET 16 #define SPI_CMD_ACT BIT(0) #define SPI_CMD_RESUME BIT(1) @@ -283,7 +284,7 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable) static void mtk_spi_prepare_transfer(struct spi_master *master, struct spi_transfer *xfer) { - u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; + u32 spi_clk_hz, div, sck_time, cs_time, reg_val; struct mtk_spi *mdata = spi_master_get_devdata(master); spi_clk_hz = clk_get_rate(mdata->spi_clk); @@ -296,18 +297,18 @@ static void mtk_spi_prepare_transfer(struct spi_master *master, cs_time = sck_time * 2; if (mdata->dev_comp->enhance_timing) { + reg_val = (((sck_time - 1) & 0xffff) + << SPI_CFG2_SCK_HIGH_OFFSET); reg_val |= (((sck_time - 1) & 0xffff) - << SPI_CFG0_SCK_HIGH_OFFSET); - reg_val |= (((sck_time - 1) & 0xffff) - << SPI_ADJUST_CFG0_SCK_LOW_OFFSET); + << SPI_CFG2_SCK_LOW_OFFSET); writel(reg_val, mdata->base + SPI_CFG2_REG); - reg_val |= (((cs_time - 1) & 0xffff) + reg_val = (((cs_time - 1) & 0xffff) << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); reg_val |= (((cs_time - 1) & 0xffff) << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); writel(reg_val, mdata->base + SPI_CFG0_REG); } else { - reg_val |= (((sck_time - 1) & 0xff) + reg_val = (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET); reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 6721910e5f2a..0040362b7162 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1485,6 +1485,11 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP }, { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP }, { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP }, + /* TGL-H */ + { PCI_VDEVICE(INTEL, 0x43aa), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x43ab), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x43fb), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x43fd), LPSS_CNL_SSP }, /* APL */ { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP }, diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 06192c9ea813..cbc2387d450c 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -179,7 +179,7 @@ struct rspi_data { void __iomem *addr; - u32 max_speed_hz; + u32 speed_hz; struct spi_controller *ctlr; struct platform_device *pdev; wait_queue_head_t wait; @@ -258,8 +258,7 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size) rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); /* Sets transfer bit rate */ - spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), - 2 * rspi->max_speed_hz) - 1; + spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz) - 1; rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); /* Disable dummy transmission, set 16-bit word access, 1 frame */ @@ -299,14 +298,14 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size) clksrc = clk_get_rate(rspi->clk); while (div < 3) { - if (rspi->max_speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */ + if (rspi->speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */ break; div++; clksrc /= 2; } /* Sets transfer bit rate */ - spbr = DIV_ROUND_UP(clksrc, 2 * rspi->max_speed_hz) - 1; + spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1; rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); rspi->spcmd |= div << 2; @@ -341,7 +340,7 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size) rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); /* Sets transfer bit rate */ - spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz); + spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz); rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); /* Disable dummy transmission, set byte access */ @@ -949,9 +948,24 @@ static int rspi_prepare_message(struct spi_controller *ctlr, { struct rspi_data *rspi = spi_controller_get_devdata(ctlr); struct spi_device *spi = msg->spi; + const struct spi_transfer *xfer; int ret; - rspi->max_speed_hz = spi->max_speed_hz; + /* + * As the Bit Rate Register must not be changed while the device is + * active, all transfers in a message must use the same bit rate. + * In theory, the sequencer could be enabled, and each Command Register + * could divide the base bit rate by a different value. + * However, most RSPI variants do not have Transfer Data Length + * Multiplier Setting Registers, so each sequence step would be limited + * to a single word, making this feature unsuitable for large + * transfers, which would gain most from it. + */ + rspi->speed_hz = spi->max_speed_hz; + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (xfer->speed_hz < rspi->speed_hz) + rspi->speed_hz = xfer->speed_hz; + } rspi->spcmd = SPCMD_SSLKP; if (spi->mode & SPI_CPOL) diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index 88e6543648cb..bd23c4689b46 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -389,9 +389,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this, sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val); /* Load the watchdog timeout value, 50ms is always enough. */ + sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0); sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW, WDG_LOAD_VAL & WDG_LOAD_MASK); - sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0); /* Start the watchdog to reset system */ sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val); diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 3c44bb2fd9b1..a900962b4336 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -553,20 +553,6 @@ static const struct spi_controller_mem_ops stm32_qspi_mem_ops = { .exec_op = stm32_qspi_exec_op, }; -static void stm32_qspi_release(struct stm32_qspi *qspi) -{ - pm_runtime_get_sync(qspi->dev); - /* disable qspi */ - writel_relaxed(0, qspi->io_base + QSPI_CR); - stm32_qspi_dma_free(qspi); - mutex_destroy(&qspi->lock); - pm_runtime_put_noidle(qspi->dev); - pm_runtime_disable(qspi->dev); - pm_runtime_set_suspended(qspi->dev); - pm_runtime_dont_use_autosuspend(qspi->dev); - clk_disable_unprepare(qspi->clk); -} - static int stm32_qspi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -642,7 +628,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) if (IS_ERR(rstc)) { ret = PTR_ERR(rstc); if (ret == -EPROBE_DEFER) - goto err_qspi_release; + goto err_clk_disable; } else { reset_control_assert(rstc); udelay(2); @@ -653,7 +639,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, qspi); ret = stm32_qspi_dma_setup(qspi); if (ret) - goto err_qspi_release; + goto err_dma_free; mutex_init(&qspi->lock); @@ -673,15 +659,26 @@ static int stm32_qspi_probe(struct platform_device *pdev) ret = devm_spi_register_master(dev, ctrl); if (ret) - goto err_qspi_release; + goto err_pm_runtime_free; pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; -err_qspi_release: - stm32_qspi_release(qspi); +err_pm_runtime_free: + pm_runtime_get_sync(qspi->dev); + /* disable qspi */ + writel_relaxed(0, qspi->io_base + QSPI_CR); + mutex_destroy(&qspi->lock); + pm_runtime_put_noidle(qspi->dev); + pm_runtime_disable(qspi->dev); + pm_runtime_set_suspended(qspi->dev); + pm_runtime_dont_use_autosuspend(qspi->dev); +err_dma_free: + stm32_qspi_dma_free(qspi); +err_clk_disable: + clk_disable_unprepare(qspi->clk); err_master_put: spi_master_put(qspi->ctrl); @@ -692,7 +689,16 @@ static int stm32_qspi_remove(struct platform_device *pdev) { struct stm32_qspi *qspi = platform_get_drvdata(pdev); - stm32_qspi_release(qspi); + pm_runtime_get_sync(qspi->dev); + /* disable qspi */ + writel_relaxed(0, qspi->io_base + QSPI_CR); + stm32_qspi_dma_free(qspi); + mutex_destroy(&qspi->lock); + pm_runtime_put_noidle(qspi->dev); + pm_runtime_disable(qspi->dev); + pm_runtime_set_suspended(qspi->dev); + pm_runtime_dont_use_autosuspend(qspi->dev); + clk_disable_unprepare(qspi->clk); return 0; } diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index ecea15534c42..fa11cc0e809b 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -198,7 +198,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, struct spi_transfer *tfr) { struct sun6i_spi *sspi = spi_master_get_devdata(master); - unsigned int mclk_rate, div, timeout; + unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout; unsigned int start, end, tx_time; unsigned int trig_level; unsigned int tx_len = 0; @@ -287,14 +287,12 @@ static int sun6i_spi_transfer_one(struct spi_master *master, * First try CDR2, and if we can't reach the expected * frequency, fall back to CDR1. */ - div = mclk_rate / (2 * tfr->speed_hz); - if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { - if (div > 0) - div--; - - reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS; + div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz); + div_cdr2 = DIV_ROUND_UP(div_cdr1, 2); + if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { + reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS; } else { - div = ilog2(mclk_rate) - ilog2(tfr->speed_hz); + div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1)); reg = SUN6I_CLK_CTL_CDR1(div); } diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index d753df700e9e..59e07675ef86 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -609,15 +609,20 @@ err_find_dev: static int spidev_release(struct inode *inode, struct file *filp) { struct spidev_data *spidev; + int dofree; mutex_lock(&device_list_lock); spidev = filp->private_data; filp->private_data = NULL; + spin_lock_irq(&spidev->spi_lock); + /* ... after we unbound from the underlying device? */ + dofree = (spidev->spi == NULL); + spin_unlock_irq(&spidev->spi_lock); + /* last close? */ spidev->users--; if (!spidev->users) { - int dofree; kfree(spidev->tx_buffer); spidev->tx_buffer = NULL; @@ -625,19 +630,14 @@ static int spidev_release(struct inode *inode, struct file *filp) kfree(spidev->rx_buffer); spidev->rx_buffer = NULL; - spin_lock_irq(&spidev->spi_lock); - if (spidev->spi) - spidev->speed_hz = spidev->spi->max_speed_hz; - - /* ... after we unbound from the underlying device? */ - dofree = (spidev->spi == NULL); - spin_unlock_irq(&spidev->spi_lock); - if (dofree) kfree(spidev); + else + spidev->speed_hz = spidev->spi->max_speed_hz; } #ifdef CONFIG_SPI_SLAVE - spi_slave_abort(spidev->spi); + if (!dofree) + spi_slave_abort(spidev->spi); #endif mutex_unlock(&device_list_lock); @@ -787,13 +787,13 @@ static int spidev_remove(struct spi_device *spi) { struct spidev_data *spidev = spi_get_drvdata(spi); + /* prevent new opens */ + mutex_lock(&device_list_lock); /* make sure ops on existing fds can abort cleanly */ spin_lock_irq(&spidev->spi_lock); spidev->spi = NULL; spin_unlock_irq(&spidev->spi_lock); - /* prevent new opens */ - mutex_lock(&device_list_lock); list_del(&spidev->device_entry); device_destroy(spidev_class, spidev->devt); clear_bit(MINOR(spidev->devt), minors); diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c index 45ad4ba92f94..689acd69a1b9 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c @@ -456,9 +456,9 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, unsigned int lo_mask = data[5] << shift; unsigned int chan_mask = hi_mask | lo_mask; unsigned int old_mask = (1 << shift) - 1; - unsigned int pm = devpriv->pm[trig] & old_mask; - unsigned int pt = devpriv->pt[trig] & old_mask; - unsigned int pp = devpriv->pp[trig] & old_mask; + unsigned int pm; + unsigned int pt; + unsigned int pp; if (trig > 1) { dev_dbg(dev->class_dev, @@ -471,6 +471,10 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, return -EINVAL; } + pm = devpriv->pm[trig] & old_mask; + pt = devpriv->pt[trig] & old_mask; + pp = devpriv->pp[trig] & old_mask; + switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: /* clear trigger configuration */ diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c index 69bcd172b298..a3ea7ce3e12e 100644 --- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c @@ -1824,12 +1824,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len); if (!pIE) return _FAIL; + if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates)) + return _FAIL; memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len); supportRateNum = ie_len; pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len); - if (pIE) + if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum)) memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len); return _SUCCESS; diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c index 893b67f2f792..5110f9b93762 100644 --- a/drivers/staging/wfx/hif_tx.c +++ b/drivers/staging/wfx/hif_tx.c @@ -240,7 +240,7 @@ int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, } int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req, - int chan_start_idx, int chan_num) + int chan_start_idx, int chan_num, int *timeout) { int ret, i; struct hif_msg *hif; @@ -289,11 +289,13 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req, tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay; tmo_chan_fg *= body->num_of_probe_requests; tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg) + 512 * USEC_PER_TU; + if (timeout) + *timeout = usecs_to_jiffies(tmo); wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len); ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); kfree(hif); - return ret ? ret : usecs_to_jiffies(tmo); + return ret; } int hif_stop_scan(struct wfx_vif *wvif) diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h index e9eca9330178..e1da28aef706 100644 --- a/drivers/staging/wfx/hif_tx.h +++ b/drivers/staging/wfx/hif_tx.h @@ -42,7 +42,7 @@ int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *buf, size_t buf_size); int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211, - int chan_start, int chan_num); + int chan_start, int chan_num, int *timeout); int hif_stop_scan(struct wfx_vif *wvif); int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, struct ieee80211_channel *channel, const u8 *ssid, int ssidlen); diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c index 3248ecefda56..93ea2b72febd 100644 --- a/drivers/staging/wfx/queue.c +++ b/drivers/staging/wfx/queue.c @@ -246,7 +246,7 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev) for (i = 0; i < IEEE80211_NUM_ACS; i++) { sorted_queues[i] = &wdev->tx_queue[i]; for (j = i; j > 0; j--) - if (atomic_read(&sorted_queues[j]->pending_frames) > + if (atomic_read(&sorted_queues[j]->pending_frames) < atomic_read(&sorted_queues[j - 1]->pending_frames)) swap(sorted_queues[j - 1], sorted_queues[j]); } @@ -291,15 +291,12 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev) if (atomic_read(&wdev->tx_lock)) return NULL; - - for (;;) { - skb = wfx_tx_queues_get_skb(wdev); - if (!skb) - return NULL; - skb_queue_tail(&wdev->tx_pending, skb); - wake_up(&wdev->tx_dequeue); - tx_priv = wfx_skb_tx_priv(skb); - tx_priv->xmit_timestamp = ktime_get(); - return (struct hif_msg *)skb->data; - } + skb = wfx_tx_queues_get_skb(wdev); + if (!skb) + return NULL; + skb_queue_tail(&wdev->tx_pending, skb); + wake_up(&wdev->tx_dequeue); + tx_priv = wfx_skb_tx_priv(skb); + tx_priv->xmit_timestamp = ktime_get(); + return (struct hif_msg *)skb->data; } diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c index 57ea9997800b..e9de19784865 100644 --- a/drivers/staging/wfx/scan.c +++ b/drivers/staging/wfx/scan.c @@ -56,10 +56,10 @@ static int send_scan_req(struct wfx_vif *wvif, wfx_tx_lock_flush(wvif->wdev); wvif->scan_abort = false; reinit_completion(&wvif->scan_complete); - timeout = hif_scan(wvif, req, start_idx, i - start_idx); - if (timeout < 0) { + ret = hif_scan(wvif, req, start_idx, i - start_idx, &timeout); + if (ret) { wfx_tx_unlock(wvif->wdev); - return timeout; + return -EIO; } ret = wait_for_completion_timeout(&wvif->scan_complete, timeout); if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower) diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index 9e124020519f..6c0e1b053126 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -123,12 +123,12 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, { int i; - for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) { - if (power > cpufreq_cdev->em->table[i].power) + for (i = cpufreq_cdev->max_level; i >= 0; i--) { + if (power >= cpufreq_cdev->em->table[i].power) break; } - return cpufreq_cdev->em->table[i + 1].frequency; + return cpufreq_cdev->em->table[i].frequency; } /** diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index e761c9b42217..1b84ea674edb 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -649,7 +649,7 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match); static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) { struct device_node *np; - int ret; + int ret = 0; data->policy = cpufreq_cpu_get(0); if (!data->policy) { @@ -664,11 +664,12 @@ static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) if (IS_ERR(data->cdev)) { ret = PTR_ERR(data->cdev); cpufreq_cpu_put(data->policy); - return ret; } } - return 0; + of_node_put(np); + + return ret; } static void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data) diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 0b3a62655843..12448ccd27f1 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -216,11 +216,16 @@ static int int3400_thermal_run_osc(acpi_handle handle, acpi_status status; int result = 0; struct acpi_osc_context context = { - .uuid_str = int3400_thermal_uuids[uuid], + .uuid_str = NULL, .rev = 1, .cap.length = 8, }; + if (uuid < 0 || uuid >= INT3400_THERMAL_MAXIMUM_UUID) + return -EINVAL; + + context.uuid_str = int3400_thermal_uuids[uuid]; + buf[OSC_QUERY_DWORD] = 0; buf[OSC_SUPPORT_DWORD] = enable; diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index f86cbb125e2f..ec1d58c4ceaa 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -74,7 +74,7 @@ static void int3403_notify(acpi_handle handle, THERMAL_TRIP_CHANGED); break; default: - dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); + dev_dbg(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); break; } } diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index 76e30603d4d5..42c9cd0e5f77 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -211,6 +211,9 @@ enum { /* The total number of temperature sensors in the MT8183 */ #define MT8183_NUM_SENSORS 6 +/* The number of banks in the MT8183 */ +#define MT8183_NUM_ZONES 1 + /* The number of sensing points per bank */ #define MT8183_NUM_SENSORS_PER_ZONE 6 @@ -497,7 +500,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = { */ static const struct mtk_thermal_data mt8183_thermal_data = { .auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL, - .num_banks = MT8183_NUM_SENSORS_PER_ZONE, + .num_banks = MT8183_NUM_ZONES, .num_sensors = MT8183_NUM_SENSORS, .vts_index = mt8183_vts_index, .cali_val = MT8183_CALIBRATION, @@ -591,8 +594,7 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank) u32 raw; for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) { - raw = readl(mt->thermal_base + - conf->msr[conf->bank_data[bank->id].sensors[i]]); + raw = readl(mt->thermal_base + conf->msr[i]); temp = raw_to_mcelsius(mt, conf->bank_data[bank->id].sensors[i], @@ -733,8 +735,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num, for (i = 0; i < conf->bank_data[num].num_sensors; i++) writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]], - mt->thermal_base + - conf->adcpnp[conf->bank_data[num].sensors[i]]); + mt->thermal_base + conf->adcpnp[i]); writel((1 << conf->bank_data[num].num_sensors) - 1, controller_base + TEMP_MONCTL0); diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index 8d3e94d2a9ed..39c4462e38f6 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -382,7 +382,7 @@ static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver) * * Return: IRQ_HANDLED */ -irqreturn_t tsens_critical_irq_thread(int irq, void *data) +static irqreturn_t tsens_critical_irq_thread(int irq, void *data) { struct tsens_priv *priv = data; struct tsens_irq_data d; @@ -452,7 +452,7 @@ irqreturn_t tsens_critical_irq_thread(int irq, void *data) * * Return: IRQ_HANDLED */ -irqreturn_t tsens_irq_thread(int irq, void *data) +static irqreturn_t tsens_irq_thread(int irq, void *data) { struct tsens_priv *priv = data; struct tsens_irq_data d; @@ -520,7 +520,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data) return IRQ_HANDLED; } -int tsens_set_trips(void *_sensor, int low, int high) +static int tsens_set_trips(void *_sensor, int low, int high) { struct tsens_sensor *s = _sensor; struct tsens_priv *priv = s->priv; @@ -557,7 +557,7 @@ int tsens_set_trips(void *_sensor, int low, int high) return 0; } -int tsens_enable_irq(struct tsens_priv *priv) +static int tsens_enable_irq(struct tsens_priv *priv) { int ret; int val = tsens_version(priv) > VER_1_X ? 7 : 1; @@ -570,7 +570,7 @@ int tsens_enable_irq(struct tsens_priv *priv) return ret; } -void tsens_disable_irq(struct tsens_priv *priv) +static void tsens_disable_irq(struct tsens_priv *priv) { regmap_field_write(priv->rf[INT_EN], 0); } diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 58fe7c1ef00b..c48c5e9b8f20 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -167,7 +167,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp) { struct rcar_gen3_thermal_tsc *tsc = devdata; int mcelsius, val; - u32 reg; + int reg; /* Read register and convert to mili Celsius */ reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK; diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c index a340374e8c51..4cde70dcf655 100644 --- a/drivers/thermal/sprd_thermal.c +++ b/drivers/thermal/sprd_thermal.c @@ -348,8 +348,8 @@ static int sprd_thm_probe(struct platform_device *pdev) thm->var_data = pdata; thm->base = devm_platform_ioremap_resource(pdev, 0); - if (!thm->base) - return -ENOMEM; + if (IS_ERR(thm->base)) + return PTR_ERR(thm->base); thm->nr_sensors = of_get_child_count(np); if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) { diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index dbe90bcf4ad4..c144ca9b032c 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -913,21 +913,21 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down) * case. */ path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, - &tunnel->dst_port, "USB3 Up"); + &tunnel->dst_port, "USB3 Down"); if (!path) { /* Just disable the downstream port */ tb_usb3_port_enable(down, false); goto err_free; } - tunnel->paths[TB_USB3_PATH_UP] = path; - tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); + tunnel->paths[TB_USB3_PATH_DOWN] = path; + tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, - "USB3 Down"); + "USB3 Up"); if (!path) goto err_deactivate; - tunnel->paths[TB_USB3_PATH_DOWN] = path; - tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); + tunnel->paths[TB_USB3_PATH_UP] = path; + tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); /* Validate that the tunnel is complete */ if (!tb_port_is_usb3_up(tunnel->dst_port)) { diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index a04f74d2e854..4df47d02b34b 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -1215,7 +1215,12 @@ static int cpm_uart_init_port(struct device_node *np, pinfo->gpios[i] = NULL; - gpiod = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS); + gpiod = devm_gpiod_get_index_optional(dev, NULL, i, GPIOD_ASIS); + + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + goto out_irq; + } if (gpiod) { if (i == GPIO_RTS || i == GPIO_DTR) @@ -1237,6 +1242,8 @@ static int cpm_uart_init_port(struct device_node *np, return cpm_uart_request_port(&pinfo->port); +out_irq: + irq_dispose_mapping(pinfo->port.irq); out_pram: cpm_uart_unmap_pram(pinfo, pram); out_mem: diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c index 5022447afa23..6004c0c1d173 100644 --- a/drivers/tty/serial/kgdb_nmi.c +++ b/drivers/tty/serial/kgdb_nmi.c @@ -50,7 +50,7 @@ static int kgdb_nmi_console_setup(struct console *co, char *options) * I/O utilities that messages sent to the console will automatically * be displayed on the dbg_io. */ - dbg_io_ops->is_console = true; + dbg_io_ops->cons = co; return 0; } diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index 41396982e9e0..84ffede27f23 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -45,7 +45,6 @@ static struct platform_device *kgdboc_pdev; #if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) static struct kgdb_io kgdboc_earlycon_io_ops; -static struct console *earlycon; static int (*earlycon_orig_exit)(struct console *con); #endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */ @@ -145,7 +144,7 @@ static void kgdboc_unregister_kbd(void) #if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) static void cleanup_earlycon(void) { - if (earlycon) + if (kgdboc_earlycon_io_ops.cons) kgdb_unregister_io_module(&kgdboc_earlycon_io_ops); } #else /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */ @@ -178,7 +177,7 @@ static int configure_kgdboc(void) goto noconfig; } - kgdboc_io_ops.is_console = 0; + kgdboc_io_ops.cons = NULL; kgdb_tty_driver = NULL; kgdboc_use_kms = 0; @@ -198,7 +197,7 @@ static int configure_kgdboc(void) int idx; if (cons->device && cons->device(cons, &idx) == p && idx == tty_line) { - kgdboc_io_ops.is_console = 1; + kgdboc_io_ops.cons = cons; break; } } @@ -433,7 +432,8 @@ static int kgdboc_earlycon_get_char(void) { char c; - if (!earlycon->read(earlycon, &c, 1)) + if (!kgdboc_earlycon_io_ops.cons->read(kgdboc_earlycon_io_ops.cons, + &c, 1)) return NO_POLL_CHAR; return c; @@ -441,7 +441,8 @@ static int kgdboc_earlycon_get_char(void) static void kgdboc_earlycon_put_char(u8 chr) { - earlycon->write(earlycon, &chr, 1); + kgdboc_earlycon_io_ops.cons->write(kgdboc_earlycon_io_ops.cons, &chr, + 1); } static void kgdboc_earlycon_pre_exp_handler(void) @@ -461,7 +462,7 @@ static void kgdboc_earlycon_pre_exp_handler(void) * boot if we detect this case. */ for_each_console(con) - if (con == earlycon) + if (con == kgdboc_earlycon_io_ops.cons) return; already_warned = true; @@ -484,25 +485,25 @@ static int kgdboc_earlycon_deferred_exit(struct console *con) static void kgdboc_earlycon_deinit(void) { - if (!earlycon) + if (!kgdboc_earlycon_io_ops.cons) return; - if (earlycon->exit == kgdboc_earlycon_deferred_exit) + if (kgdboc_earlycon_io_ops.cons->exit == kgdboc_earlycon_deferred_exit) /* * kgdboc_earlycon is exiting but original boot console exit * was never called (AKA kgdboc_earlycon_deferred_exit() * didn't ever run). Undo our trap. */ - earlycon->exit = earlycon_orig_exit; - else if (earlycon->exit) + kgdboc_earlycon_io_ops.cons->exit = earlycon_orig_exit; + else if (kgdboc_earlycon_io_ops.cons->exit) /* * We skipped calling the exit() routine so we could try to * keep using the boot console even after it went away. We're * finally done so call the function now. */ - earlycon->exit(earlycon); + kgdboc_earlycon_io_ops.cons->exit(kgdboc_earlycon_io_ops.cons); - earlycon = NULL; + kgdboc_earlycon_io_ops.cons = NULL; } static struct kgdb_io kgdboc_earlycon_io_ops = { @@ -511,7 +512,6 @@ static struct kgdb_io kgdboc_earlycon_io_ops = { .write_char = kgdboc_earlycon_put_char, .pre_exception = kgdboc_earlycon_pre_exp_handler, .deinit = kgdboc_earlycon_deinit, - .is_console = true, }; #define MAX_CONSOLE_NAME_LEN (sizeof((struct console *) 0)->name) @@ -557,10 +557,10 @@ static int __init kgdboc_earlycon_init(char *opt) goto unlock; } - earlycon = con; + kgdboc_earlycon_io_ops.cons = con; pr_info("Going to register kgdb with earlycon '%s'\n", con->name); if (kgdb_register_io_module(&kgdboc_earlycon_io_ops) != 0) { - earlycon = NULL; + kgdboc_earlycon_io_ops.cons = NULL; pr_info("Failed to register kgdb with earlycon\n"); } else { /* Trap exit so we can keep earlycon longer if needed. */ diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index b4f835e7de23..b784323a6a7b 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1698,21 +1698,21 @@ static int mxs_auart_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; - goto out_disable_clks; + goto out_iounmap; } s->port.irq = irq; ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s); if (ret) - goto out_disable_clks; + goto out_iounmap; platform_set_drvdata(pdev, s); ret = mxs_auart_init_gpios(s, &pdev->dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize GPIOs.\n"); - goto out_disable_clks; + goto out_iounmap; } /* @@ -1720,7 +1720,7 @@ static int mxs_auart_probe(struct platform_device *pdev) */ ret = mxs_auart_request_gpio_irq(s); if (ret) - goto out_disable_clks; + goto out_iounmap; auart_port[s->port.line] = s; @@ -1746,6 +1746,9 @@ out_free_qpio_irq: mxs_auart_free_gpio_irq(s); auart_port[pdev->id] = NULL; +out_iounmap: + iounmap(s->port.membase); + out_disable_clks: if (is_asm9260_auart(s)) { clk_disable_unprepare(s->clk); @@ -1761,6 +1764,7 @@ static int mxs_auart_remove(struct platform_device *pdev) uart_remove_one_port(&auart_driver, &s->port); auart_port[pdev->id] = NULL; mxs_auart_free_gpio_irq(s); + iounmap(s->port.membase); if (is_asm9260_auart(s)) { clk_disable_unprepare(s->clk); clk_disable_unprepare(s->clk_ahb); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 57840cf90388..5f3daabdc916 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -41,8 +41,6 @@ static struct lock_class_key port_lock_key; #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) -#define SYSRQ_TIMEOUT (HZ * 5) - static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, struct ktermios *old_termios); static void uart_wait_until_sent(struct tty_struct *tty, int timeout); @@ -1916,6 +1914,12 @@ static inline bool uart_console_enabled(struct uart_port *port) return uart_console(port) && (port->cons->flags & CON_ENABLED); } +static void __uart_port_spin_lock_init(struct uart_port *port) +{ + spin_lock_init(&port->lock); + lockdep_set_class(&port->lock, &port_lock_key); +} + /* * Ensure that the serial console lock is initialised early. * If this port is a console, then the spinlock is already initialised. @@ -1925,8 +1929,7 @@ static inline void uart_port_spin_lock_init(struct uart_port *port) if (uart_console(port)) return; - spin_lock_init(&port->lock); - lockdep_set_class(&port->lock, &port_lock_key); + __uart_port_spin_lock_init(port); } #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL) @@ -2373,6 +2376,13 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, uart_change_pm(state, UART_PM_STATE_ON); /* + * If this driver supports console, and it hasn't been + * successfully registered yet, initialise spin lock for it. + */ + if (port->cons && !(port->cons->flags & CON_ENABLED)) + __uart_port_spin_lock_init(port); + + /* * Ensure that the modem control lines are de-activated. * keep the DTR setting that is set in uart_set_options() * We probably don't need a spinlock around this, but @@ -3163,7 +3173,7 @@ static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on); * Returns false if @ch is out of enabling sequence and should be * handled some other way, true if @ch was consumed. */ -static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) +bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) { int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq); @@ -3186,99 +3196,9 @@ static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) port->sysrq = 0; return true; } -#else -static inline bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) -{ - return false; -} +EXPORT_SYMBOL_GPL(uart_try_toggle_sysrq); #endif -int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) -{ - if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL)) - return 0; - - if (!port->has_sysrq || !port->sysrq) - return 0; - - if (ch && time_before(jiffies, port->sysrq)) { - if (sysrq_mask()) { - handle_sysrq(ch); - port->sysrq = 0; - return 1; - } - if (uart_try_toggle_sysrq(port, ch)) - return 1; - } - port->sysrq = 0; - - return 0; -} -EXPORT_SYMBOL_GPL(uart_handle_sysrq_char); - -int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) -{ - if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL)) - return 0; - - if (!port->has_sysrq || !port->sysrq) - return 0; - - if (ch && time_before(jiffies, port->sysrq)) { - if (sysrq_mask()) { - port->sysrq_ch = ch; - port->sysrq = 0; - return 1; - } - if (uart_try_toggle_sysrq(port, ch)) - return 1; - } - port->sysrq = 0; - - return 0; -} -EXPORT_SYMBOL_GPL(uart_prepare_sysrq_char); - -void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags) -__releases(&port->lock) -{ - if (port->has_sysrq) { - int sysrq_ch = port->sysrq_ch; - - port->sysrq_ch = 0; - spin_unlock_irqrestore(&port->lock, flags); - if (sysrq_ch) - handle_sysrq(sysrq_ch); - } else { - spin_unlock_irqrestore(&port->lock, flags); - } -} -EXPORT_SYMBOL_GPL(uart_unlock_and_check_sysrq); - -/* - * We do the SysRQ and SAK checking like this... - */ -int uart_handle_break(struct uart_port *port) -{ - struct uart_state *state = port->state; - - if (port->handle_break) - port->handle_break(port); - - if (port->has_sysrq && uart_console(port)) { - if (!port->sysrq) { - port->sysrq = jiffies + SYSRQ_TIMEOUT; - return 1; - } - port->sysrq = 0; - } - - if (port->flags & UPF_SAK) - do_SAK(state->port.tty); - return 0; -} -EXPORT_SYMBOL_GPL(uart_handle_break); - EXPORT_SYMBOL(uart_write_wakeup); EXPORT_SYMBOL(uart_register_driver); EXPORT_SYMBOL(uart_unregister_driver); @@ -3289,8 +3209,7 @@ EXPORT_SYMBOL(uart_remove_one_port); /** * uart_get_rs485_mode() - retrieve rs485 properties for given uart - * @dev: uart device - * @rs485conf: output parameter + * @port: uart device's target port * * This function implements the device tree binding described in * Documentation/devicetree/bindings/serial/rs485.txt. diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index e1179e74a2b8..204bb68ce3ca 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -3301,6 +3301,9 @@ static int sci_probe_single(struct platform_device *dev, sciport->port.flags |= UPF_HARD_FLOW; } + if (sci_uart_driver.cons->index == sciport->port.line) + spin_lock_init(&sciport->port.lock); + ret = uart_add_one_port(&sci_uart_driver, &sciport->port); if (ret) { sci_cleanup_single(sciport); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index b9d672af8b65..672cfa075e28 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1465,7 +1465,6 @@ static int cdns_uart_probe(struct platform_device *pdev) cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE cdns_uart_uart_driver.cons = &cdns_uart_console; - cdns_uart_console.index = id; #endif rc = uart_register_driver(&cdns_uart_uart_driver); diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c index ae319ef3a832..b60173bc93ce 100644 --- a/drivers/uio/uio_pdrv_genirq.c +++ b/drivers/uio/uio_pdrv_genirq.c @@ -159,9 +159,9 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev) priv->pdev = pdev; if (!uioinfo->irq) { - ret = platform_get_irq(pdev, 0); + ret = platform_get_irq_optional(pdev, 0); uioinfo->irq = ret; - if (ret == -ENXIO && pdev->dev.of_node) + if (ret == -ENXIO) uioinfo->irq = UIO_IRQ_NONE; else if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c index 633c52de3bb3..9865750bc31e 100644 --- a/drivers/usb/c67x00/c67x00-sched.c +++ b/drivers/usb/c67x00/c67x00-sched.c @@ -486,7 +486,7 @@ c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status) c67x00_release_urb(c67x00, urb); usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb); spin_unlock(&c67x00->lock); - usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status); + usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status); spin_lock(&c67x00->lock); } diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c index 82645a2a0f52..5aa69980e7ff 100644 --- a/drivers/usb/cdns3/ep0.c +++ b/drivers/usb/cdns3/ep0.c @@ -37,18 +37,18 @@ static void cdns3_ep0_run_transfer(struct cdns3_device *priv_dev, struct cdns3_usb_regs __iomem *regs = priv_dev->regs; struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; - priv_ep->trb_pool[0].buffer = TRB_BUFFER(dma_addr); - priv_ep->trb_pool[0].length = TRB_LEN(length); + priv_ep->trb_pool[0].buffer = cpu_to_le32(TRB_BUFFER(dma_addr)); + priv_ep->trb_pool[0].length = cpu_to_le32(TRB_LEN(length)); if (zlp) { - priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_TYPE(TRB_NORMAL); - priv_ep->trb_pool[1].buffer = TRB_BUFFER(dma_addr); - priv_ep->trb_pool[1].length = TRB_LEN(0); - priv_ep->trb_pool[1].control = TRB_CYCLE | TRB_IOC | - TRB_TYPE(TRB_NORMAL); + priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_NORMAL)); + priv_ep->trb_pool[1].buffer = cpu_to_le32(TRB_BUFFER(dma_addr)); + priv_ep->trb_pool[1].length = cpu_to_le32(TRB_LEN(0)); + priv_ep->trb_pool[1].control = cpu_to_le32(TRB_CYCLE | TRB_IOC | + TRB_TYPE(TRB_NORMAL)); } else { - priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_IOC | - TRB_TYPE(TRB_NORMAL); + priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_IOC | + TRB_TYPE(TRB_NORMAL)); priv_ep->trb_pool[1].control = 0; } @@ -264,11 +264,11 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, case USB_RECIP_INTERFACE: return cdns3_ep0_delegate_req(priv_dev, ctrl); case USB_RECIP_ENDPOINT: - index = cdns3_ep_addr_to_index(ctrl->wIndex); + index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); priv_ep = priv_dev->eps[index]; /* check if endpoint is stalled or stall is pending */ - cdns3_select_ep(priv_dev, ctrl->wIndex); + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) || (priv_ep->flags & EP_STALL_PENDING)) usb_status = BIT(USB_ENDPOINT_HALT); @@ -327,7 +327,8 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, if (!set || (tmode & 0xff) != 0) return -EINVAL; - switch (tmode >> 8) { + tmode >>= 8; + switch (tmode) { case TEST_J: case TEST_K: case TEST_SE0_NAK: @@ -380,10 +381,10 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev, if (!(ctrl->wIndex & ~USB_DIR_IN)) return 0; - index = cdns3_ep_addr_to_index(ctrl->wIndex); + index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); priv_ep = priv_dev->eps[index]; - cdns3_select_ep(priv_dev, ctrl->wIndex); + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); if (set) __cdns3_gadget_ep_set_halt(priv_ep); @@ -444,7 +445,7 @@ static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev, if (priv_dev->gadget.state < USB_STATE_ADDRESS) return -EINVAL; - if (ctrl_req->wLength != 6) { + if (le16_to_cpu(ctrl_req->wLength) != 6) { dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n", ctrl_req->wLength); return -EINVAL; @@ -468,7 +469,7 @@ static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev, if (ctrl_req->wIndex || ctrl_req->wLength) return -EINVAL; - priv_dev->isoch_delay = ctrl_req->wValue; + priv_dev->isoch_delay = le16_to_cpu(ctrl_req->wValue); return 0; } @@ -704,15 +705,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep, int ret = 0; u8 zlp = 0; + spin_lock_irqsave(&priv_dev->lock, flags); trace_cdns3_ep0_queue(priv_dev, request); /* cancel the request if controller receive new SETUP packet. */ - if (cdns3_check_new_setup(priv_dev)) + if (cdns3_check_new_setup(priv_dev)) { + spin_unlock_irqrestore(&priv_dev->lock, flags); return -ECONNRESET; + } /* send STATUS stage. Should be called only for SET_CONFIGURATION */ if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) { - spin_lock_irqsave(&priv_dev->lock, flags); cdns3_select_ep(priv_dev, 0x00); erdy_sent = !priv_dev->hw_configured_flag; @@ -737,7 +740,6 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep, return 0; } - spin_lock_irqsave(&priv_dev->lock, flags); if (!list_empty(&priv_ep->pending_req_list)) { dev_err(priv_dev->dev, "can't handle multiple requests for ep0\n"); diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h index 8d121e207fd8..0a2a3269bfac 100644 --- a/drivers/usb/cdns3/trace.h +++ b/drivers/usb/cdns3/trace.h @@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep0_irq, __dynamic_array(char, str, CDNS3_MSG_MAX) ), TP_fast_assign( - __entry->ep_dir = priv_dev->ep0_data_dir; + __entry->ep_dir = priv_dev->selected_ep; __entry->ep_sts = ep_sts; ), TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str), @@ -404,9 +404,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb, TP_fast_assign( __assign_str(name, priv_ep->name); __entry->trb = trb; - __entry->buffer = trb->buffer; - __entry->length = trb->length; - __entry->control = trb->control; + __entry->buffer = le32_to_cpu(trb->buffer); + __entry->length = le32_to_cpu(trb->length); + __entry->control = le32_to_cpu(trb->control); __entry->type = usb_endpoint_type(priv_ep->endpoint.desc); __entry->last_stream_id = priv_ep->last_stream_id; ), diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 9a7c53d09ab4..bb133245beed 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1243,6 +1243,29 @@ static void ci_controller_suspend(struct ci_hdrc *ci) enable_irq(ci->irq); } +/* + * Handle the wakeup interrupt triggered by extcon connector + * We need to call ci_irq again for extcon since the first + * interrupt (wakeup int) only let the controller be out of + * low power mode, but not handle any interrupts. + */ +static void ci_extcon_wakeup_int(struct ci_hdrc *ci) +{ + struct ci_hdrc_cable *cable_id, *cable_vbus; + u32 otgsc = hw_read_otgsc(ci, ~0); + + cable_id = &ci->platdata->id_extcon; + cable_vbus = &ci->platdata->vbus_extcon; + + if (!IS_ERR(cable_id->edev) && ci->is_otg && + (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) + ci_irq(ci->irq, ci); + + if (!IS_ERR(cable_vbus->edev) && ci->is_otg && + (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) + ci_irq(ci->irq, ci); +} + static int ci_controller_resume(struct device *dev) { struct ci_hdrc *ci = dev_get_drvdata(dev); @@ -1275,6 +1298,7 @@ static int ci_controller_resume(struct device *dev) enable_irq(ci->irq); if (ci_otg_is_fsm_mode(ci)) ci_otg_fsm_wakeup_by_srp(ci); + ci_extcon_wakeup_int(ci); } return 0; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index f67088bb8218..d5187b50fc82 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1689,6 +1689,8 @@ static int acm_pre_reset(struct usb_interface *intf) static const struct usb_device_id acm_ids[] = { /* quirky and broken devices */ + { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */ + .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */ { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 3e8efe759c3e..e0b77674869c 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -218,11 +218,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* Logitech HD Webcam C270 */ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ + /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT }, /* Logitech ConferenceCam CC3000e */ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 12b98b466287..7faf5f8c056d 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -4920,12 +4920,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg) epnum, 0); } - ret = usb_add_gadget_udc(dev, &hsotg->gadget); - if (ret) { - dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, - hsotg->ctrl_req); - return ret; - } dwc2_hsotg_dump(hsotg); return 0; diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index e571c8ae65ec..cb8ddbd53718 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -342,7 +342,8 @@ static void dwc2_driver_shutdown(struct platform_device *dev) { struct dwc2_hsotg *hsotg = platform_get_drvdata(dev); - disable_irq(hsotg->irq); + dwc2_disable_global_interrupts(hsotg); + synchronize_irq(hsotg->irq); } /** @@ -575,6 +576,17 @@ static int dwc2_driver_probe(struct platform_device *dev) if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) dwc2_lowlevel_hw_disable(hsotg); +#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) + /* Postponed adding a new gadget to the udc class driver list */ + if (hsotg->gadget_enabled) { + retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget); + if (retval) { + dwc2_hsotg_remove(hsotg); + goto error_init; + } + } +#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ return 0; error_init: diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 48b68b6f0dc8..90bb022737da 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -162,12 +162,6 @@ static const struct dwc3_exynos_driverdata exynos5250_drvdata = { .suspend_clk_idx = -1, }; -static const struct dwc3_exynos_driverdata exynos5420_drvdata = { - .clk_names = { "usbdrd30", "usbdrd30_susp_clk"}, - .num_clks = 2, - .suspend_clk_idx = 1, -}; - static const struct dwc3_exynos_driverdata exynos5433_drvdata = { .clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" }, .num_clks = 4, @@ -185,9 +179,6 @@ static const struct of_device_id exynos_dwc3_match[] = { .compatible = "samsung,exynos5250-dwusb3", .data = &exynos5250_drvdata, }, { - .compatible = "samsung,exynos5420-dwusb3", - .data = &exynos5420_drvdata, - }, { .compatible = "samsung,exynos5433-dwusb3", .data = &exynos5433_drvdata, }, { diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index b67372737dc9..139474c3e77b 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -38,6 +38,8 @@ #define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee #define PCI_DEVICE_ID_INTEL_EHLLP 0x4b7e #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee +#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee +#define PCI_DEVICE_ID_INTEL_JSP 0x4dee #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 @@ -206,8 +208,10 @@ static void dwc3_pci_resume_work(struct work_struct *work) int ret; ret = pm_runtime_get_sync(&dwc3->dev); - if (ret) + if (ret) { + pm_runtime_put_sync_autosuspend(&dwc3->dev); return; + } pm_runtime_mark_last_busy(&dwc3->dev); pm_runtime_put_sync_autosuspend(&dwc3->dev); @@ -356,6 +360,12 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP), (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPH), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB), (kernel_ulong_t) &dwc3_pci_amd_properties, }, { } /* Terminating Entry */ diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index ea0d531c63e2..775cf70cfb3e 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -1058,7 +1058,8 @@ static int __init kgdbdbgp_parse_config(char *str) kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10); } kgdb_register_io_module(&kgdbdbgp_io_ops); - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1; + if (early_dbgp_console.index != -1) + kgdbdbgp_io_ops.cons = &early_dbgp_console; return 0; } diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c index 349deae7cabd..e2d7f69128a0 100644 --- a/drivers/usb/gadget/function/f_uac1_legacy.c +++ b/drivers/usb/gadget/function/f_uac1_legacy.c @@ -336,7 +336,9 @@ static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req) /* Copy buffer is full, add it to the play_queue */ if (audio_buf_size - copy_buf->actual < req->actual) { + spin_lock_irq(&audio->lock); list_add_tail(©_buf->list, &audio->play_queue); + spin_unlock_irq(&audio->lock); schedule_work(&audio->playback_work); copy_buf = f_audio_buffer_alloc(audio_buf_size); if (IS_ERR(copy_buf)) diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index d69f61ff0181..9342a3d24963 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -676,13 +676,7 @@ static int usba_ep_disable(struct usb_ep *_ep) if (!ep->ep.desc) { spin_unlock_irqrestore(&udc->lock, flags); - /* REVISIT because this driver disables endpoints in - * reset_all_endpoints() before calling disconnect(), - * most gadget drivers would trigger this non-error ... - */ - if (udc->gadget.speed != USB_SPEED_UNKNOWN) - DBG(DBG_ERR, "ep_disable: %s not enabled\n", - ep->ep.name); + DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name); return -EINVAL; } ep->ep.desc = NULL; @@ -871,7 +865,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) u32 status; DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", - ep->ep.name, req); + ep->ep.name, _req); spin_lock_irqsave(&udc->lock, flags); diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 7164ad9800f1..7419889ebe9a 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -1980,9 +1980,12 @@ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit) if (num == 0) { _req = gr_alloc_request(&ep->ep, GFP_ATOMIC); + if (!_req) + return -ENOMEM; + buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC); - if (!_req || !buf) { - /* possible _req freed by gr_probe via gr_remove */ + if (!buf) { + gr_free_request(&ep->ep, _req); return -ENOMEM; } diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index cafde053788b..80a1b52c656e 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev) return 0; err_create_workqueue: - destroy_workqueue(udc->qwork); + if (udc->qwork) + destroy_workqueue(udc->qwork); err_destroy_dma: dma_pool_destroy(udc->dtd_pool); err_free_dma: diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c index 58a4d3325090..119505fac777 100644 --- a/drivers/usb/gadget/usbstring.c +++ b/drivers/usb/gadget/usbstring.c @@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_get_string); /** * usb_validate_langid - validate usb language identifiers - * @lang: usb language identifier + * @langid: usb language identifier * * Returns true for valid language identifier, otherwise false. */ diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c index a4e9abcbdc4f..1a9b7572e17f 100644 --- a/drivers/usb/host/ehci-exynos.c +++ b/drivers/usb/host/ehci-exynos.c @@ -203,9 +203,8 @@ static int exynos_ehci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - err = -ENODEV; + if (irq < 0) { + err = irq; goto fail_io; } diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 3c3820ad9092..af3c1b9b38b2 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -216,6 +216,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd) ehci_info(ehci, "applying MosChip frame-index workaround\n"); ehci->frame_index_bug = 1; break; + case PCI_VENDOR_ID_HUAWEI: + /* Synopsys HC bug */ + if (pdev->device == 0xa239) { + ehci_info(ehci, "applying Synopsys HC workaround\n"); + ehci->has_synopsys_hc_bug = 1; + } + break; } /* optional debug port, normally in the first BAR */ diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index cff965240327..b91d50da6127 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c @@ -191,6 +191,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev) struct resource *mem; usb_remove_hcd(hcd); + iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index bfbdb3ceed29..4311d4c9b68d 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -587,6 +587,9 @@ static int xhci_mtk_remove(struct platform_device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct usb_hcd *shared_hcd = xhci->shared_hcd; + pm_runtime_put_noidle(&dev->dev); + pm_runtime_disable(&dev->dev); + usb_remove_hcd(shared_hcd); xhci->shared_hcd = NULL; device_init_wakeup(&dev->dev, false); @@ -597,8 +600,6 @@ static int xhci_mtk_remove(struct platform_device *dev) xhci_mtk_sch_exit(mtk); xhci_mtk_clks_disable(mtk); xhci_mtk_ldos_disable(mtk); - pm_runtime_put_sync(&dev->dev); - pm_runtime_disable(&dev->dev); return 0; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index bee5deccc83d..ed468eed299c 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1430,6 +1430,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, xhci->devs[slot_id]->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); + ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); @@ -4390,6 +4391,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, int hird, exit_latency; int ret; + if (xhci->quirks & XHCI_HW_LPM_DISABLE) + return -EPERM; + if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || !udev->lpm_capable) return -EPERM; @@ -4412,7 +4416,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", enable ? "enable" : "disable", port_num + 1); - if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) { + if (enable) { /* Host supports BESL timeout instead of HIRD */ if (udev->usb2_hw_lpm_besl_capable) { /* if device doesn't have a preferred BESL value use a @@ -4471,6 +4475,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, mutex_lock(hcd->bandwidth_mutex); xhci_change_max_exit_latency(xhci, udev, 0); mutex_unlock(hcd->bandwidth_mutex); + readl_poll_timeout(ports[port_num]->addr, pm_val, + (pm_val & PORT_PLS_MASK) == XDEV_U0, + 100, 10000); return 0; } } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 2c6c4f8d1ee1..c295e8a7f5ae 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -716,7 +716,7 @@ struct xhci_ep_ctx { * 4 - TRB error * 5-7 - reserved */ -#define EP_STATE_MASK (0xf) +#define EP_STATE_MASK (0x7) #define EP_STATE_DISABLED 0 #define EP_STATE_RUNNING 1 #define EP_STATE_HALTED 2 diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 98ada1a3425c..bae88893ee8e 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -2873,6 +2873,7 @@ static void usbtest_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); dev_dbg(&intf->dev, "disconnect\n"); + kfree(dev->buf); kfree(dev); } diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index cffe2aced488..03a333797382 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c @@ -1199,11 +1199,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tegra_phy); - err = usb_add_phy_dev(&tegra_phy->u_phy); - if (err) - return err; - - return 0; + return usb_add_phy_dev(&tegra_phy->u_phy); } static int tegra_usb_phy_remove(struct platform_device *pdev) diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 01c6a48c41bc..ac9a81ae8216 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -803,7 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) return info->dma_map_ctrl(chan->device->dev, pkt, map); } -static void usbhsf_dma_complete(void *arg); +static void usbhsf_dma_complete(void *arg, + const struct dmaengine_result *result); static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) { struct usbhs_pipe *pipe = pkt->pipe; @@ -813,6 +814,7 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) struct dma_chan *chan; struct device *dev = usbhs_priv_to_dev(priv); enum dma_transfer_direction dir; + dma_cookie_t cookie; fifo = usbhs_pipe_to_fifo(pipe); if (!fifo) @@ -827,11 +829,11 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) if (!desc) return; - desc->callback = usbhsf_dma_complete; - desc->callback_param = pipe; + desc->callback_result = usbhsf_dma_complete; + desc->callback_param = pkt; - pkt->cookie = dmaengine_submit(desc); - if (pkt->cookie < 0) { + cookie = dmaengine_submit(desc); + if (cookie < 0) { dev_err(dev, "Failed to submit dma descriptor\n"); return; } @@ -1152,12 +1154,10 @@ static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt, struct dma_chan *chan, int dtln) { struct usbhs_pipe *pipe = pkt->pipe; - struct dma_tx_state state; size_t received_size; int maxp = usbhs_pipe_get_maxpacket(pipe); - dmaengine_tx_status(chan, pkt->cookie, &state); - received_size = pkt->length - state.residue; + received_size = pkt->length - pkt->dma_result->residue; if (dtln) { received_size -= USBHS_USB_DMAC_XFER_SIZE; @@ -1363,13 +1363,16 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv, return 0; } -static void usbhsf_dma_complete(void *arg) +static void usbhsf_dma_complete(void *arg, + const struct dmaengine_result *result) { - struct usbhs_pipe *pipe = arg; + struct usbhs_pkt *pkt = arg; + struct usbhs_pipe *pipe = pkt->pipe; struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); struct device *dev = usbhs_priv_to_dev(priv); int ret; + pkt->dma_result = result; ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE); if (ret < 0) dev_err(dev, "dma_complete run_error %d : %d\n", diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h index 7d3700bf41d9..039a2b993157 100644 --- a/drivers/usb/renesas_usbhs/fifo.h +++ b/drivers/usb/renesas_usbhs/fifo.h @@ -50,7 +50,7 @@ struct usbhs_pkt { struct usbhs_pkt *pkt); struct work_struct work; dma_addr_t dma; - dma_cookie_t cookie; + const struct dmaengine_result *dma_result; void *buf; int length; int trans; diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 89675ee29645..8fbaef5c9d69 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -77,6 +77,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x4348, 0x5523) }, + { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, { USB_DEVICE(0x1a86, 0x5523) }, { }, diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 216edd5826ca..ecda82198798 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -59,6 +59,7 @@ static const struct usb_device_id id_table_earthmate[] = { static const struct usb_device_id id_table_cyphidcomrs232[] = { { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { } /* Terminating entry */ @@ -73,6 +74,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h index 35e223751c0e..16b7410ad057 100644 --- a/drivers/usb/serial/cypress_m8.h +++ b/drivers/usb/serial/cypress_m8.h @@ -25,6 +25,9 @@ #define VENDOR_ID_CYPRESS 0x04b4 #define PRODUCT_ID_CYPHIDCOM 0x5500 +/* Simply Automated HID->COM UPB PIM (using Cypress PID 0x5500) */ +#define VENDOR_ID_SAI 0x17dd + /* FRWD Dongle - a GPS sports watch */ #define VENDOR_ID_FRWD 0x6737 #define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001 diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index d5bff69b1769..b8dfeb4fb2ed 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -697,14 +697,16 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port, struct iuu_private *priv = usb_get_serial_port_data(port); unsigned long flags; - if (count > 256) - return -ENOMEM; - spin_lock_irqsave(&priv->lock, flags); + count = min(count, 256 - priv->writelen); + if (count == 0) + goto out; + /* fill the buffer */ memcpy(priv->writebuf + priv->writelen, buf, count); priv->writelen += count; +out: spin_unlock_irqrestore(&priv->lock, flags); return count; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 254a8bbeea67..9b7cee98ea60 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -245,6 +245,7 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC25 0x0125 +#define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM12 0x0512 @@ -1097,6 +1098,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), .driver_info = RSVD(4) }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95), + .driver_info = RSVD(4) }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), @@ -2028,6 +2031,9 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ .driver_info = RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index 962bc69a6a59..70ddc9d6d49e 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -148,7 +148,8 @@ pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_mux_state *state) msg[0] = PMC_USB_DP_HPD; msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; - msg[1] = PMC_USB_DP_HPD_IRQ; + if (data->status & DP_STATUS_IRQ_HPD) + msg[1] = PMC_USB_DP_HPD_IRQ; if (data->status & DP_STATUS_HPD_STATE) msg[1] |= PMC_USB_DP_HPD_LVL; @@ -161,6 +162,7 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state) { struct typec_displayport_data *data = state->data; struct altmode_req req = { }; + int ret; if (data->status & DP_STATUS_IRQ_HPD) return pmc_usb_mux_dp_hpd(port, state); @@ -181,7 +183,14 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state) if (data->status & DP_STATUS_HPD_STATE) req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH; - return pmc_usb_command(port, (void *)&req, sizeof(req)); + ret = pmc_usb_command(port, (void *)&req, sizeof(req)); + if (ret) + return ret; + + if (data->status & DP_STATUS_HPD_STATE) + return pmc_usb_mux_dp_hpd(port, state); + + return 0; } static int diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c index 017389021b96..b56a0880a044 100644 --- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c +++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c @@ -179,26 +179,6 @@ out: return tcpci_irq(chip->tcpci); } -static int rt1711h_init_alert(struct rt1711h_chip *chip, - struct i2c_client *client) -{ - int ret; - - /* Disable chip interrupts before requesting irq */ - ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0); - if (ret < 0) - return ret; - - ret = devm_request_threaded_irq(chip->dev, client->irq, NULL, - rt1711h_irq, - IRQF_ONESHOT | IRQF_TRIGGER_LOW, - dev_name(chip->dev), chip); - if (ret < 0) - return ret; - enable_irq_wake(client->irq); - return 0; -} - static int rt1711h_sw_reset(struct rt1711h_chip *chip) { int ret; @@ -260,7 +240,8 @@ static int rt1711h_probe(struct i2c_client *client, if (ret < 0) return ret; - ret = rt1711h_init_alert(chip, client); + /* Disable chip interrupts before requesting irq */ + ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0); if (ret < 0) return ret; @@ -271,6 +252,14 @@ static int rt1711h_probe(struct i2c_client *client, if (IS_ERR_OR_NULL(chip->tcpci)) return PTR_ERR(chip->tcpci); + ret = devm_request_threaded_irq(chip->dev, client->irq, NULL, + rt1711h_irq, + IRQF_ONESHOT | IRQF_TRIGGER_LOW, + dev_name(chip->dev), chip); + if (ret < 0) + return ret; + enable_irq_wake(client->irq); + return 0; } diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index ff6562f602e0..de211ef3738c 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -63,7 +63,7 @@ static void vdpa_release_dev(struct device *d) * @config: the bus operations that is supported by this device * @size: size of the parent structure that contains private data * - * Drvier should use vdap_alloc_device() wrapper macro instead of + * Driver should use vdpa_alloc_device() wrapper macro instead of * using this directly. * * Returns an error when parent/config/dma_dev is not set or fail to get diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 7c0779018b1b..f634c81998bb 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -521,10 +521,14 @@ static void vfio_pci_release(void *device_data) vfio_pci_vf_token_user_add(vdev, -1); vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); - if (vdev->err_trigger) + if (vdev->err_trigger) { eventfd_ctx_put(vdev->err_trigger); - if (vdev->req_trigger) + vdev->err_trigger = NULL; + } + if (vdev->req_trigger) { eventfd_ctx_put(vdev->req_trigger); + vdev->req_trigger = NULL; + } } mutex_unlock(&vdev->reflck->lock); diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 8746c943247a..d98843feddce 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -398,9 +398,15 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) /* Caller should hold memory_lock semaphore */ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) { + struct pci_dev *pdev = vdev->pdev; u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); - return cmd & PCI_COMMAND_MEMORY; + /* + * SR-IOV VF memory enable is handled by the MSE bit in the + * PF SR-IOV capability, there's therefore no need to trigger + * faults based on the virtual value. + */ + return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); } /* @@ -1728,6 +1734,15 @@ int vfio_config_init(struct vfio_pci_device *vdev) vconfig[PCI_INTERRUPT_PIN]); vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ + + /* + * VFs do no implement the memory enable bit of the COMMAND + * register therefore we'll not have it set in our initial + * copy of config space after pci_enable_device(). For + * consistency with PFs, set the virtual enable bit here. + */ + *(__le16 *)&vconfig[PCI_COMMAND] |= + cpu_to_le16(PCI_COMMAND_MEMORY); } if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 0466921f4772..a09dedc79f68 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -263,9 +263,62 @@ static int vhost_test_set_features(struct vhost_test *n, u64 features) return 0; } +static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd) +{ + static void *backend; + + const bool enable = fd != -1; + struct vhost_virtqueue *vq; + int r; + + mutex_lock(&n->dev.mutex); + r = vhost_dev_check_owner(&n->dev); + if (r) + goto err; + + if (index >= VHOST_TEST_VQ_MAX) { + r = -ENOBUFS; + goto err; + } + vq = &n->vqs[index]; + mutex_lock(&vq->mutex); + + /* Verify that ring has been setup correctly. */ + if (!vhost_vq_access_ok(vq)) { + r = -EFAULT; + goto err_vq; + } + if (!enable) { + vhost_poll_stop(&vq->poll); + backend = vhost_vq_get_backend(vq); + vhost_vq_set_backend(vq, NULL); + } else { + vhost_vq_set_backend(vq, backend); + r = vhost_vq_init_access(vq); + if (r == 0) + r = vhost_poll_start(&vq->poll, vq->kick); + } + + mutex_unlock(&vq->mutex); + + if (enable) { + vhost_test_flush_vq(n, index); + } + + mutex_unlock(&n->dev.mutex); + return 0; + +err_vq: + mutex_unlock(&vq->mutex); +err: + mutex_unlock(&n->dev.mutex); + return r; +} + static long vhost_test_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { + struct vhost_vring_file backend; struct vhost_test *n = f->private_data; void __user *argp = (void __user *)arg; u64 __user *featurep = argp; @@ -277,6 +330,10 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl, if (copy_from_user(&test, argp, sizeof test)) return -EFAULT; return vhost_test_run(n, test); + case VHOST_TEST_SET_BACKEND: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + return vhost_test_set_backend(n, backend.index, backend.fd); case VHOST_GET_FEATURES: features = VHOST_FEATURES; if (copy_to_user(featurep, &features, sizeof features)) diff --git a/drivers/vhost/test.h b/drivers/vhost/test.h index 7dd265bfdf81..822bc4bee03a 100644 --- a/drivers/vhost/test.h +++ b/drivers/vhost/test.h @@ -4,5 +4,6 @@ /* Start a given test on the virtio null device. 0 stops all tests. */ #define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int) +#define VHOST_TEST_SET_BACKEND _IOW(VHOST_VIRTIO, 0x32, int) #endif diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 7580e34f76c1..a54b60d6623f 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -818,7 +818,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma) struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; struct vdpa_notification_area notify; - int index = vma->vm_pgoff; + unsigned long index = vma->vm_pgoff; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 6af2734f2a7b..af9f5ab96f74 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2402,7 +2402,8 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) ops->graphics = 1; if (!blank) { - var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE; + var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE | + FB_ACTIVATE_KD_TEXT; fb_set_var(info, &var); ops->graphics = 0; ops->var = info->var; diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c index bee29aadc646..def14ac0ebe1 100644 --- a/drivers/video/fbdev/uvesafb.c +++ b/drivers/video/fbdev/uvesafb.c @@ -1836,7 +1836,7 @@ static int uvesafb_setup(char *options) else if (!strcmp(this_opt, "noedid")) noedid = true; else if (!strcmp(this_opt, "noblank")) - blank = true; + blank = false; else if (!strncmp(this_opt, "vtotal:", 7)) vram_total = simple_strtoul(this_opt + 7, NULL, 0); else if (!strncmp(this_opt, "vremap:", 7)) diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index b690a8a4bf9e..18ebd7a6af98 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -1444,7 +1444,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, or_mask = caps->u.in.or_mask; not_mask = caps->u.in.not_mask; - if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) + if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK) return -EINVAL; ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, @@ -1520,7 +1520,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */ if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) || - req == VBG_IOCTL_VMMDEV_REQUEST_BIG) + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT) return vbg_ioctl_vmmrequest(gdev, session, data); if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT) @@ -1558,6 +1559,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) case VBG_IOCTL_HGCM_CALL(0): return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); case VBG_IOCTL_LOG(0): + case VBG_IOCTL_LOG_ALT(0): return vbg_ioctl_log(data); } diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h index 4188c12b839f..77c3a9c8255d 100644 --- a/drivers/virt/vboxguest/vboxguest_core.h +++ b/drivers/virt/vboxguest/vboxguest_core.h @@ -15,6 +15,21 @@ #include <linux/vboxguest.h> #include "vmmdev.h" +/* + * The mainline kernel version (this version) of the vboxguest module + * contained a bug where it defined VBGL_IOCTL_VMMDEV_REQUEST_BIG and + * VBGL_IOCTL_LOG using _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead + * of _IO(V, ...) as the out of tree VirtualBox upstream version does. + * + * These _ALT definitions keep compatibility with the wrong defines the + * mainline kernel version used for a while. + * Note the VirtualBox userspace bits have always been built against + * VirtualBox upstream's headers, so this is likely not necessary. But + * we must never break our ABI so we keep these around to be 100% sure. + */ +#define VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) +#define VBG_IOCTL_LOG_ALT(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) + struct vbg_session; /** VBox guest memory balloon. */ diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index 6e8c0f1c1056..32c2c52f7e84 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -131,7 +131,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, * the need for a bounce-buffer and another copy later on. */ is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || - req == VBG_IOCTL_VMMDEV_REQUEST_BIG; + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT; if (is_vmmdev_req) buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT, diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h index 6337b8d75d96..21f408120e3f 100644 --- a/drivers/virt/vboxguest/vmmdev.h +++ b/drivers/virt/vboxguest/vmmdev.h @@ -206,6 +206,8 @@ VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8); * not. */ #define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2) +/* The mask of valid capabilities, for sanity checking. */ +#define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U /** struct vmmdev_hypervisorinfo - Hypervisor info structure. */ struct vmmdev_hypervisorinfo { diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 50c689f25045..f26f5f64ae82 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -101,6 +101,11 @@ struct virtio_mem { /* The parent resource for all memory added via this device. */ struct resource *parent_resource; + /* + * Copy of "System RAM (virtio_mem)" to be used for + * add_memory_driver_managed(). + */ + const char *resource_name; /* Summary of all memory block states. */ unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT]; @@ -414,8 +419,20 @@ static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id) if (nid == NUMA_NO_NODE) nid = memory_add_physaddr_to_nid(addr); + /* + * When force-unloading the driver and we still have memory added to + * Linux, the resource name has to stay. + */ + if (!vm->resource_name) { + vm->resource_name = kstrdup_const("System RAM (virtio_mem)", + GFP_KERNEL); + if (!vm->resource_name) + return -ENOMEM; + } + dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id); - return add_memory(nid, addr, memory_block_size_bytes()); + return add_memory_driver_managed(nid, addr, memory_block_size_bytes(), + vm->resource_name); } /* @@ -1192,7 +1209,7 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id, VIRTIO_MEM_MB_STATE_OFFLINE); } - return rc; + return 0; } /* @@ -1890,10 +1907,12 @@ static void virtio_mem_remove(struct virtio_device *vdev) vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] || vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] || vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] || - vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) + vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) { dev_warn(&vdev->dev, "device still has system memory added\n"); - else + } else { virtio_mem_delete_resource(vm); + kfree_const(vm->resource_name); + } /* remove all tracking data - no locking needed */ vfree(vm->mb_state); diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 040d2a43e8e3..786fbb7d8be0 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -69,11 +69,27 @@ struct xenbus_map_node { unsigned int nr_handles; }; +struct map_ring_valloc { + struct xenbus_map_node *node; + + /* Why do we need two arrays? See comment of __xenbus_map_ring */ + union { + unsigned long addrs[XENBUS_MAX_RING_GRANTS]; + pte_t *ptes[XENBUS_MAX_RING_GRANTS]; + }; + phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; + + struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; + + unsigned int idx; /* HVM only. */ +}; + static DEFINE_SPINLOCK(xenbus_valloc_lock); static LIST_HEAD(xenbus_valloc_pages); struct xenbus_ring_ops { - int (*map)(struct xenbus_device *dev, + int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info, grant_ref_t *gnt_refs, unsigned int nr_grefs, void **vaddr); int (*unmap)(struct xenbus_device *dev, void *vaddr); @@ -440,8 +456,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn); * Map @nr_grefs pages of memory into this domain from another * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs * pages of virtual address space, maps the pages to that address, and - * sets *vaddr to that address. Returns 0 on success, and GNTST_* - * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on + * sets *vaddr to that address. Returns 0 on success, and -errno on * error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ @@ -449,12 +464,25 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, unsigned int nr_grefs, void **vaddr) { int err; + struct map_ring_valloc *info; + + *vaddr = NULL; + + if (nr_grefs > XENBUS_MAX_RING_GRANTS) + return -EINVAL; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; - err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); - /* Some hypervisors are buggy and can return 1. */ - if (err > 0) - err = GNTST_general_error; + info->node = kzalloc(sizeof(*info->node), GFP_KERNEL); + if (!info->node) + err = -ENOMEM; + else + err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr); + kfree(info->node); + kfree(info); return err; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); @@ -466,62 +494,57 @@ static int __xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs, unsigned int nr_grefs, grant_handle_t *handles, - phys_addr_t *addrs, + struct map_ring_valloc *info, unsigned int flags, bool *leaked) { - struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; - struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; int i, j; - int err = GNTST_okay; if (nr_grefs > XENBUS_MAX_RING_GRANTS) return -EINVAL; for (i = 0; i < nr_grefs; i++) { - memset(&map[i], 0, sizeof(map[i])); - gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i], - dev->otherend_id); + gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags, + gnt_refs[i], dev->otherend_id); handles[i] = INVALID_GRANT_HANDLE; } - gnttab_batch_map(map, i); + gnttab_batch_map(info->map, i); for (i = 0; i < nr_grefs; i++) { - if (map[i].status != GNTST_okay) { - err = map[i].status; - xenbus_dev_fatal(dev, map[i].status, + if (info->map[i].status != GNTST_okay) { + xenbus_dev_fatal(dev, info->map[i].status, "mapping in shared page %d from domain %d", gnt_refs[i], dev->otherend_id); goto fail; } else - handles[i] = map[i].handle; + handles[i] = info->map[i].handle; } - return GNTST_okay; + return 0; fail: for (i = j = 0; i < nr_grefs; i++) { if (handles[i] != INVALID_GRANT_HANDLE) { - memset(&unmap[j], 0, sizeof(unmap[j])); - gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i], + gnttab_set_unmap_op(&info->unmap[j], + info->phys_addrs[i], GNTMAP_host_map, handles[i]); j++; } } - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j)) + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j)) BUG(); *leaked = false; for (i = 0; i < j; i++) { - if (unmap[i].status != GNTST_okay) { + if (info->unmap[i].status != GNTST_okay) { *leaked = true; break; } } - return err; + return -ENOENT; } /** @@ -566,21 +589,12 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles, return err; } -struct map_ring_valloc_hvm -{ - unsigned int idx; - - /* Why do we need two arrays? See comment of __xenbus_map_ring */ - phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; - unsigned long addrs[XENBUS_MAX_RING_GRANTS]; -}; - static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, unsigned int goffset, unsigned int len, void *data) { - struct map_ring_valloc_hvm *info = data; + struct map_ring_valloc *info = data; unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); info->phys_addrs[info->idx] = vaddr; @@ -589,39 +603,28 @@ static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, info->idx++; } -static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, - grant_ref_t *gnt_ref, - unsigned int nr_grefs, - void **vaddr) +static int xenbus_map_ring_hvm(struct xenbus_device *dev, + struct map_ring_valloc *info, + grant_ref_t *gnt_ref, + unsigned int nr_grefs, + void **vaddr) { - struct xenbus_map_node *node; + struct xenbus_map_node *node = info->node; int err; void *addr; bool leaked = false; - struct map_ring_valloc_hvm info = { - .idx = 0, - }; unsigned int nr_pages = XENBUS_PAGES(nr_grefs); - if (nr_grefs > XENBUS_MAX_RING_GRANTS) - return -EINVAL; - - *vaddr = NULL; - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (!node) - return -ENOMEM; - err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); if (err) goto out_err; gnttab_foreach_grant(node->hvm.pages, nr_grefs, xenbus_map_ring_setup_grant_hvm, - &info); + info); err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, - info.phys_addrs, GNTMAP_host_map, &leaked); + info, GNTMAP_host_map, &leaked); node->nr_handles = nr_grefs; if (err) @@ -641,11 +644,13 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, spin_unlock(&xenbus_valloc_lock); *vaddr = addr; + info->node = NULL; + return 0; out_xenbus_unmap_ring: if (!leaked) - xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs); + xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs); else pr_alert("leaking %p size %u page(s)", addr, nr_pages); @@ -653,7 +658,6 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, if (!leaked) free_xenballooned_pages(nr_pages, node->hvm.pages); out_err: - kfree(node); return err; } @@ -676,40 +680,28 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); #ifdef CONFIG_XEN_PV -static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, - grant_ref_t *gnt_refs, - unsigned int nr_grefs, - void **vaddr) +static int xenbus_map_ring_pv(struct xenbus_device *dev, + struct map_ring_valloc *info, + grant_ref_t *gnt_refs, + unsigned int nr_grefs, + void **vaddr) { - struct xenbus_map_node *node; + struct xenbus_map_node *node = info->node; struct vm_struct *area; - pte_t *ptes[XENBUS_MAX_RING_GRANTS]; - phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; int err = GNTST_okay; int i; bool leaked; - *vaddr = NULL; - - if (nr_grefs > XENBUS_MAX_RING_GRANTS) - return -EINVAL; - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (!node) + area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes); + if (!area) return -ENOMEM; - area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes); - if (!area) { - kfree(node); - return -ENOMEM; - } - for (i = 0; i < nr_grefs; i++) - phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; + info->phys_addrs[i] = + arbitrary_virt_to_machine(info->ptes[i]).maddr; err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, - phys_addrs, - GNTMAP_host_map | GNTMAP_contains_pte, + info, GNTMAP_host_map | GNTMAP_contains_pte, &leaked); if (err) goto failed; @@ -722,6 +714,8 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, spin_unlock(&xenbus_valloc_lock); *vaddr = area->addr; + info->node = NULL; + return 0; failed: @@ -730,11 +724,10 @@ failed: else pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); - kfree(node); return err; } -static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) +static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr) { struct xenbus_map_node *node; struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; @@ -798,12 +791,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) } static const struct xenbus_ring_ops ring_ops_pv = { - .map = xenbus_map_ring_valloc_pv, - .unmap = xenbus_unmap_ring_vfree_pv, + .map = xenbus_map_ring_pv, + .unmap = xenbus_unmap_ring_pv, }; #endif -struct unmap_ring_vfree_hvm +struct unmap_ring_hvm { unsigned int idx; unsigned long addrs[XENBUS_MAX_RING_GRANTS]; @@ -814,19 +807,19 @@ static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn, unsigned int len, void *data) { - struct unmap_ring_vfree_hvm *info = data; + struct unmap_ring_hvm *info = data; info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); info->idx++; } -static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) +static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr) { int rv; struct xenbus_map_node *node; void *addr; - struct unmap_ring_vfree_hvm info = { + struct unmap_ring_hvm info = { .idx = 0, }; unsigned int nr_pages; @@ -887,8 +880,8 @@ enum xenbus_state xenbus_read_driver_state(const char *path) EXPORT_SYMBOL_GPL(xenbus_read_driver_state); static const struct xenbus_ring_ops ring_ops_hvm = { - .map = xenbus_map_ring_valloc_hvm, - .unmap = xenbus_unmap_ring_vfree_hvm, + .map = xenbus_map_ring_hvm, + .unmap = xenbus_unmap_ring_hvm, }; void __init xenbus_ring_ops_init(void) |