diff options
Diffstat (limited to 'drivers')
677 files changed, 5789 insertions, 3251 deletions
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index 3f045b5953b2..a0c1a665dfc1 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c @@ -99,13 +99,12 @@ acpi_status acpi_ns_root_initialize(void) * just create and link the new node(s) here. */ new_node = - ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node)); + acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name)); if (!new_node) { status = AE_NO_MEMORY; goto unlock_and_exit; } - ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name); new_node->descriptor_type = ACPI_DESC_TYPE_NAMED; new_node->type = init_val->type; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index e6a5d997241c..cb8f70842249 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -9,6 +9,8 @@ #ifndef _ACPI_INTERNAL_H_ #define _ACPI_INTERNAL_H_ +#include <linux/idr.h> + #define PREFIX "ACPI: " int early_acpi_osi_init(void); @@ -96,9 +98,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context); extern struct list_head acpi_bus_id_list; +#define ACPI_MAX_DEVICE_INSTANCES 4096 + struct acpi_device_bus_id { const char *bus_id; - unsigned int instance_no; + struct ida instance_ida; struct list_head node; }; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index d93e400940a3..768a6b4d2368 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -29,6 +29,7 @@ */ #ifdef CONFIG_X86 #include <asm/apic.h> +#include <asm/cpu.h> #endif #define _COMPONENT ACPI_PROCESSOR_COMPONENT @@ -541,6 +542,12 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) wait_for_freeze(); } else return -ENODEV; + +#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU) + /* If NMI wants to wake up CPU0, start CPU0. */ + if (wakeup_cpu0()) + start_cpu0(); +#endif } /* Never reached */ diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index a184529d8fa4..6efe7edd7b1e 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -479,9 +479,8 @@ static void acpi_device_del(struct acpi_device *device) list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) if (!strcmp(acpi_device_bus_id->bus_id, acpi_device_hid(device))) { - if (acpi_device_bus_id->instance_no > 0) - acpi_device_bus_id->instance_no--; - else { + ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no); + if (ida_is_empty(&acpi_device_bus_id->instance_ida)) { list_del(&acpi_device_bus_id->node); kfree_const(acpi_device_bus_id->bus_id); kfree(acpi_device_bus_id); @@ -631,6 +630,21 @@ static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id) return NULL; } +static int acpi_device_set_name(struct acpi_device *device, + struct acpi_device_bus_id *acpi_device_bus_id) +{ + struct ida *instance_ida = &acpi_device_bus_id->instance_ida; + int result; + + result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL); + if (result < 0) + return result; + + device->pnp.instance_no = result; + dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result); + return 0; +} + int acpi_device_add(struct acpi_device *device, void (*release)(struct device *)) { @@ -665,7 +679,9 @@ int acpi_device_add(struct acpi_device *device, acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device)); if (acpi_device_bus_id) { - acpi_device_bus_id->instance_no++; + result = acpi_device_set_name(device, acpi_device_bus_id); + if (result) + goto err_unlock; } else { acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id), GFP_KERNEL); @@ -681,9 +697,16 @@ int acpi_device_add(struct acpi_device *device, goto err_unlock; } + ida_init(&acpi_device_bus_id->instance_ida); + + result = acpi_device_set_name(device, acpi_device_bus_id); + if (result) { + kfree(acpi_device_bus_id); + goto err_unlock; + } + list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); } - dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); if (device->parent) list_add_tail(&device->node, &device->parent->children); @@ -1647,6 +1670,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, device_initialize(&device->dev); dev_set_uevent_suppress(&device->dev, true); acpi_init_coherency(device); + /* Assume there are unmet deps to start with. */ + device->dep_unmet = 1; } void acpi_device_add_finalize(struct acpi_device *device) @@ -1910,6 +1935,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev) { struct acpi_dep_data *dep; + adev->dep_unmet = 0; + mutex_lock(&acpi_dep_list_lock); list_for_each_entry(dep, &acpi_dep_list, node) { @@ -1957,7 +1984,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, return AE_CTRL_DEPTH; acpi_scan_init_hotplug(device); - if (!check_dep) + /* + * If check_dep is true at this point, the device has no dependencies, + * or the creation of the device object would have been postponed above. + */ + if (check_dep) + device->dep_unmet = 0; + else acpi_scan_dep_init(device); out: diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index e48690a006a4..9d581045acff 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -780,7 +780,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table, } /* - * acpi_table_init() + * acpi_locate_initial_tables() * * find RSDP, find and checksum SDT/XSDT. * checksum all tables, print SDT/XSDT @@ -788,7 +788,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table, * result: sdt_entry[] is initialized */ -int __init acpi_table_init(void) +int __init acpi_locate_initial_tables(void) { acpi_status status; @@ -803,9 +803,45 @@ int __init acpi_table_init(void) status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); if (ACPI_FAILURE(status)) return -EINVAL; - acpi_table_initrd_scan(); + return 0; +} + +void __init acpi_reserve_initial_tables(void) +{ + int i; + + for (i = 0; i < ACPI_MAX_TABLES; i++) { + struct acpi_table_desc *table_desc = &initial_tables[i]; + u64 start = table_desc->address; + u64 size = table_desc->length; + + if (!start || !size) + break; + + pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n", + table_desc->signature.ascii, start, start + size - 1); + + memblock_reserve(start, size); + } +} + +void __init acpi_table_init_complete(void) +{ + acpi_table_initrd_scan(); check_multiple_madt(); +} + +int __init acpi_table_init(void) +{ + int ret; + + ret = acpi_locate_initial_tables(); + if (ret) + return ret; + + acpi_table_init_complete(); + return 0; } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 811d298637cb..83cd4c95faf0 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -147,6 +147,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, }, { + .callback = video_detect_force_vendor, .ident = "Sony VPCEH3U1E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 316a9947541f..b574cce98dc3 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2260,7 +2260,8 @@ out: return rc; err_eni_release: - eni_do_release(dev); + dev->phy = NULL; + iounmap(ENI_DEV(dev)->ioaddr); err_unregister: atm_dev_deregister(dev); err_free_consistent: diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 9a70bee84125..495fd0a1f040 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -100,8 +100,6 @@ static LIST_HEAD(fore200e_boards); MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); -MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E"); - static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { { BUFFER_S1_NBR, BUFFER_L1_NBR }, diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c index 3c081b6171a8..bfca7b8a6f31 100644 --- a/drivers/atm/idt77105.c +++ b/drivers/atm/idt77105.c @@ -262,7 +262,7 @@ static int idt77105_start(struct atm_dev *dev) { unsigned long flags; - if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL))) + if (!(dev->phy_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL))) return -ENOMEM; PRIV(dev)->dev = dev; spin_lock_irqsave(&idt77105_priv_lock, flags); @@ -337,7 +337,7 @@ static int idt77105_stop(struct atm_dev *dev) else idt77105_all = walk->next; dev->phy = NULL; - dev->dev_data = NULL; + dev->phy_data = NULL; kfree(walk); break; } diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index d7277c26e423..32d7aa141d96 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2233,6 +2233,7 @@ static int lanai_dev_open(struct atm_dev *atmdev) conf1_write(lanai); #endif iounmap(lanai->base); + lanai->base = NULL; error_pci: pci_disable_device(lanai->pci); error: @@ -2245,6 +2246,8 @@ static int lanai_dev_open(struct atm_dev *atmdev) static void lanai_dev_close(struct atm_dev *atmdev) { struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data; + if (lanai->base==NULL) + return; printk(KERN_INFO DEV_LABEL "(itf %d): shutting down interface\n", lanai->number); lanai_timed_poll_stop(lanai); @@ -2552,7 +2555,7 @@ static int lanai_init_one(struct pci_dev *pci, struct atm_dev *atmdev; int result; - lanai = kmalloc(sizeof(*lanai), GFP_KERNEL); + lanai = kzalloc(sizeof(*lanai), GFP_KERNEL); if (lanai == NULL) { printk(KERN_ERR DEV_LABEL ": couldn't allocate dev_data structure!\n"); diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c index 7850758b5bb8..239852d85558 100644 --- a/drivers/atm/uPD98402.c +++ b/drivers/atm/uPD98402.c @@ -211,7 +211,7 @@ static void uPD98402_int(struct atm_dev *dev) static int uPD98402_start(struct atm_dev *dev) { DPRINTK("phy_start\n"); - if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL))) + if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL))) return -ENOMEM; spin_lock_init(&PRIV(dev)->lock); memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats)); diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index f43430e9dcee..24fd6f369ebe 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -470,12 +470,14 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf, char c; for (; count-- > 0; (*ppos)++, tmp++) { - if (!in_interrupt() && (((count + 1) & 0x1f) == 0)) + if (((count + 1) & 0x1f) == 0) { /* - * let's be a little nice with other processes - * that need some CPU + * charlcd_write() is invoked as a VFS->write() callback + * and as such it is always invoked from preemptible + * context and may sleep. */ - schedule(); + cond_resched(); + } if (get_user(c, tmp)) return -EFAULT; @@ -537,12 +539,8 @@ static void charlcd_puts(struct charlcd *lcd, const char *s) int count = strlen(s); for (; count-- > 0; tmp++) { - if (!in_interrupt() && (((count + 1) & 0x1f) == 0)) - /* - * let's be a little nice with other processes - * that need some CPU - */ - schedule(); + if (((count + 1) & 0x1f) == 0) + cond_resched(); charlcd_write_char(lcd, *tmp); } diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index de8587cc119e..c1179edc0f3b 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -21,17 +21,94 @@ #include <linux/sched.h> #include <linux/smp.h> +static DEFINE_PER_CPU(struct scale_freq_data *, sft_data); +static struct cpumask scale_freq_counters_mask; +static bool scale_freq_invariant; + +static bool supports_scale_freq_counters(const struct cpumask *cpus) +{ + return cpumask_subset(cpus, &scale_freq_counters_mask); +} + bool topology_scale_freq_invariant(void) { return cpufreq_supports_freq_invariance() || - arch_freq_counters_available(cpu_online_mask); + supports_scale_freq_counters(cpu_online_mask); } -__weak bool arch_freq_counters_available(const struct cpumask *cpus) +static void update_scale_freq_invariant(bool status) { - return false; + if (scale_freq_invariant == status) + return; + + /* + * Task scheduler behavior depends on frequency invariance support, + * either cpufreq or counter driven. If the support status changes as + * a result of counter initialisation and use, retrigger the build of + * scheduling domains to ensure the information is propagated properly. + */ + if (topology_scale_freq_invariant() == status) { + scale_freq_invariant = status; + rebuild_sched_domains_energy(); + } } -DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; + +void topology_set_scale_freq_source(struct scale_freq_data *data, + const struct cpumask *cpus) +{ + struct scale_freq_data *sfd; + int cpu; + + /* + * Avoid calling rebuild_sched_domains() unnecessarily if FIE is + * supported by cpufreq. + */ + if (cpumask_empty(&scale_freq_counters_mask)) + scale_freq_invariant = topology_scale_freq_invariant(); + + for_each_cpu(cpu, cpus) { + sfd = per_cpu(sft_data, cpu); + + /* Use ARCH provided counters whenever possible */ + if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) { + per_cpu(sft_data, cpu) = data; + cpumask_set_cpu(cpu, &scale_freq_counters_mask); + } + } + + update_scale_freq_invariant(true); +} +EXPORT_SYMBOL_GPL(topology_set_scale_freq_source); + +void topology_clear_scale_freq_source(enum scale_freq_source source, + const struct cpumask *cpus) +{ + struct scale_freq_data *sfd; + int cpu; + + for_each_cpu(cpu, cpus) { + sfd = per_cpu(sft_data, cpu); + + if (sfd && sfd->source == source) { + per_cpu(sft_data, cpu) = NULL; + cpumask_clear_cpu(cpu, &scale_freq_counters_mask); + } + } + + update_scale_freq_invariant(false); +} +EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source); + +void topology_scale_freq_tick(void) +{ + struct scale_freq_data *sfd = *this_cpu_ptr(&sft_data); + + if (sfd) + sfd->set_freq_scale(); +} + +DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; +EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale); void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, unsigned long max_freq) @@ -47,13 +124,13 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, * want to update the scale factor with information from CPUFREQ. * Instead the scale factor will be updated from arch_scale_freq_tick. */ - if (arch_freq_counters_available(cpus)) + if (supports_scale_freq_counters(cpus)) return; scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; for_each_cpu(i, cpus) - per_cpu(freq_scale, i) = scale; + per_cpu(arch_freq_scale, i) = scale; } DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 9179825ff646..e2cf3b29123e 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -97,6 +97,9 @@ static void deferred_probe_work_func(struct work_struct *work) get_device(dev); + kfree(dev->p->deferred_probe_reason); + dev->p->deferred_probe_reason = NULL; + /* * Drop the mutex while probing each device; the probe path may * manipulate the deferred list diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 78c310d3179d..b6a782c31613 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1088,34 +1088,6 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, } /** - * resume_needed - Check whether to resume a device before system suspend. - * @dev: Device to check. - * @genpd: PM domain the device belongs to. - * - * There are two cases in which a device that can wake up the system from sleep - * states should be resumed by genpd_prepare(): (1) if the device is enabled - * to wake up the system and it has to remain active for this purpose while the - * system is in the sleep state and (2) if the device is not enabled to wake up - * the system from sleep states and it generally doesn't generate wakeup signals - * by itself (those signals are generated on its behalf by other parts of the - * system). In the latter case it may be necessary to reconfigure the device's - * wakeup settings during system suspend, because it may have been set up to - * signal remote wakeup from the system's working state as needed by runtime PM. - * Return 'true' in either of the above cases. - */ -static bool resume_needed(struct device *dev, - const struct generic_pm_domain *genpd) -{ - bool active_wakeup; - - if (!device_can_wakeup(dev)) - return false; - - active_wakeup = genpd_is_active_wakeup(genpd); - return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; -} - -/** * genpd_prepare - Start power transition of a device in a PM domain. * @dev: Device to start the transition of. * @@ -1135,14 +1107,6 @@ static int genpd_prepare(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - /* - * If a wakeup request is pending for the device, it should be woken up - * at this point and a system wakeup event should be reported if it's - * set up to wake up the system from sleep states. - */ - if (resume_needed(dev, genpd)) - pm_runtime_resume(dev); - genpd_lock(genpd); if (genpd->prepared_count++ == 0) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index a46a7e30881b..fe1dad68aee4 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev) return 0; } -static void rpm_put_suppliers(struct device *dev) +static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) { struct device_link *link; @@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev) device_links_read_lock_held()) { while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put(link->supplier); + pm_runtime_put_noidle(link->supplier); + + if (try_to_suspend) + pm_request_idle(link->supplier); } } +static void rpm_put_suppliers(struct device *dev) +{ + __rpm_put_suppliers(dev, true); +} + +static void rpm_suspend_suppliers(struct device *dev) +{ + struct device_link *link; + int idx = device_links_read_lock(); + + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, + device_links_read_lock_held()) + pm_request_idle(link->supplier); + + device_links_read_unlock(idx); +} + /** * __rpm_callback - Run a given runtime PM callback for a given device. * @cb: Runtime PM callback to run. @@ -344,8 +364,10 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) idx = device_links_read_lock(); retval = rpm_get_suppliers(dev); - if (retval) + if (retval) { + rpm_put_suppliers(dev); goto fail; + } device_links_read_unlock(idx); } @@ -368,9 +390,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) || (dev->power.runtime_status == RPM_RESUMING && retval))) { idx = device_links_read_lock(); - fail: - rpm_put_suppliers(dev); + __rpm_put_suppliers(dev, false); +fail: device_links_read_unlock(idx); } @@ -642,8 +664,11 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto out; } + if (dev->power.irq_safe) + goto out; + /* Maybe the parent is now able to suspend. */ - if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { + if (parent && !parent->power.ignore_children) { spin_unlock(&dev->power.lock); spin_lock(&parent->power.lock); @@ -652,6 +677,14 @@ static int rpm_suspend(struct device *dev, int rpmflags) spin_lock(&dev->power.lock); } + /* Maybe the suppliers are now able to suspend. */ + if (dev->power.links_count > 0) { + spin_unlock_irq(&dev->power.lock); + + rpm_suspend_suppliers(dev); + + spin_lock_irq(&dev->power.lock); + } out: trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); @@ -1657,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev) device_links_read_lock_held()) if (link->flags & DL_FLAG_PM_RUNTIME) { link->supplier_preactivated = true; - refcount_inc(&link->rpm_active); pm_runtime_get_sync(link->supplier); + refcount_inc(&link->rpm_active); } device_links_read_unlock(idx); @@ -1671,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev) void pm_runtime_put_suppliers(struct device *dev) { struct device_link *link; + unsigned long flags; + bool put; int idx; idx = device_links_read_lock(); @@ -1679,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev) device_links_read_lock_held()) if (link->supplier_preactivated) { link->supplier_preactivated = false; - if (refcount_dec_not_one(&link->rpm_active)) + spin_lock_irqsave(&dev->power.lock, flags); + put = pm_runtime_status_suspended(dev) && + refcount_dec_not_one(&link->rpm_active); + spin_unlock_irqrestore(&dev->power.lock, flags); + if (put) pm_runtime_put(link->supplier); } diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c index d638259b829a..924fac493c4f 100644 --- a/drivers/base/power/wakeup_stats.c +++ b/drivers/base/power/wakeup_stats.c @@ -137,7 +137,7 @@ static struct device *wakeup_source_device_create(struct device *parent, struct wakeup_source *ws) { struct device *dev = NULL; - int retval = -ENODEV; + int retval; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { @@ -154,7 +154,7 @@ static struct device *wakeup_source_device_create(struct device *parent, dev_set_drvdata(dev, ws); device_set_pm_not_required(dev); - retval = kobject_set_name(&dev->kobj, "wakeup%d", ws->id); + retval = dev_set_name(dev, "wakeup%d", ws->id); if (retval) goto error; diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 37179a8b1ceb..fa3719ef80e4 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -938,6 +938,9 @@ int software_node_register(const struct software_node *node) if (software_node_to_swnode(node)) return -EEXIST; + if (node->parent && !parent) + return -EINVAL; + return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0)); } EXPORT_SYMBOL_GPL(software_node_register); @@ -1002,25 +1005,33 @@ EXPORT_SYMBOL_GPL(fwnode_remove_software_node); /** * device_add_software_node - Assign software node to a device * @dev: The device the software node is meant for. - * @swnode: The software node. + * @node: The software node. * - * This function will register @swnode and make it the secondary firmware node - * pointer of @dev. If @dev has no primary node, then @swnode will become the primary - * node. + * This function will make @node the secondary firmware node pointer of @dev. If + * @dev has no primary node, then @node will become the primary node. The + * function will register @node automatically if it wasn't already registered. */ -int device_add_software_node(struct device *dev, const struct software_node *swnode) +int device_add_software_node(struct device *dev, const struct software_node *node) { + struct swnode *swnode; int ret; /* Only one software node per device. */ if (dev_to_swnode(dev)) return -EBUSY; - ret = software_node_register(swnode); - if (ret) - return ret; + swnode = software_node_to_swnode(node); + if (swnode) { + kobject_get(&swnode->kobj); + } else { + ret = software_node_register(node); + if (ret) + return ret; + + swnode = software_node_to_swnode(node); + } - set_secondary_fwnode(dev, software_node_fwnode(swnode)); + set_secondary_fwnode(dev, &swnode->fwnode); return 0; } diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 7d9cc433b758..5d9181382ce1 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1324,7 +1324,7 @@ struct bm_extent { * A followup commit may allow even bigger BIO sizes, * once we thought that through. */ #define DRBD_MAX_BIO_SIZE (1U << 20) -#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT) +#if DRBD_MAX_BIO_SIZE > (BIO_MAX_VECS << PAGE_SHIFT) #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE #endif #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 0b71292d9d5a..4aa9683ee0c1 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -5091,7 +5091,6 @@ module_param(floppy, charp, 0); module_param(FLOPPY_IRQ, int, 0); module_param(FLOPPY_DMA, int, 0); MODULE_AUTHOR("Alain L. Knaff"); -MODULE_SUPPORTED_DEVICE("fd"); MODULE_LICENSE("GPL"); /* This doesn't actually get used other than for module information */ diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index d6c821d48090..51bfd7737552 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1369,10 +1369,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, } if (dev->zoned) - cmd->error = null_process_zoned_cmd(cmd, op, - sector, nr_sectors); + sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors); else - cmd->error = null_process_cmd(cmd, op, sector, nr_sectors); + sts = null_process_cmd(cmd, op, sector, nr_sectors); + + /* Do not overwrite errors (e.g. timeout errors) */ + if (cmd->error == BLK_STS_OK) + cmd->error = sts; out: nullb_complete_cmd(cmd); @@ -1451,8 +1454,20 @@ static bool should_requeue_request(struct request *rq) static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) { + struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); + pr_info("rq %p timed out\n", rq); - blk_mq_complete_request(rq); + + /* + * If the device is marked as blocking (i.e. memory backed or zoned + * device), the submission path may be blocked waiting for resources + * and cause real timeouts. For these real timeouts, the submission + * path will complete the request using blk_mq_complete_request(). + * Only fake timeouts need to execute blk_mq_complete_request() here. + */ + cmd->error = BLK_STS_TIMEOUT; + if (cmd->fake_timeout) + blk_mq_complete_request(rq); return BLK_EH_DONE; } @@ -1473,6 +1488,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, cmd->rq = bd->rq; cmd->error = BLK_STS_OK; cmd->nq = nq; + cmd->fake_timeout = should_timeout_request(bd->rq); blk_mq_start_request(bd->rq); @@ -1489,7 +1505,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; } } - if (should_timeout_request(bd->rq)) + if (cmd->fake_timeout) return BLK_STS_OK; return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h index 83504f3cc9d6..4876d5adb12d 100644 --- a/drivers/block/null_blk/null_blk.h +++ b/drivers/block/null_blk/null_blk.h @@ -22,6 +22,7 @@ struct nullb_cmd { blk_status_t error; struct nullb_queue *nq; struct hrtimer timer; + bool fake_timeout; }; struct nullb_queue { diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 63f549889f87..227e1be4c6f9 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -165,15 +165,17 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf, { struct rsxx_cardinfo *card = file_inode(fp)->i_private; char *buf; - ssize_t st; + int st; buf = kzalloc(cnt, GFP_KERNEL); if (!buf) return -ENOMEM; st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1); - if (!st) - st = copy_to_user(ubuf, buf, cnt); + if (!st) { + if (copy_to_user(ubuf, buf, cnt)) + st = -EFAULT; + } kfree(buf); if (st) return st; @@ -869,6 +871,7 @@ static int rsxx_pci_probe(struct pci_dev *dev, card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event"); if (!card->event_wq) { dev_err(CARD_TO_DEV(card), "Failed card event setup.\n"); + st = -ENOMEM; goto failed_event_handler; } diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h index 4861669e5786..6147977994ff 100644 --- a/drivers/block/rsxx/rsxx_priv.h +++ b/drivers/block/rsxx/rsxx_priv.h @@ -11,7 +11,6 @@ #ifndef __RSXX_PRIV_H__ #define __RSXX_PRIV_H__ -#include <linux/version.h> #include <linux/semaphore.h> #include <linux/fs.h> diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 982732dbe82e..664280f23bee 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -877,6 +877,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) if (card->mm_pages[0].desc == NULL || card->mm_pages[1].desc == NULL) { dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n"); + ret = -ENOMEM; goto failed_alloc; } reset_page(&card->mm_pages[0]); @@ -888,8 +889,10 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) spin_lock_init(&card->lock); card->queue = blk_alloc_queue(NUMA_NO_NODE); - if (!card->queue) + if (!card->queue) { + ret = -ENOMEM; goto failed_alloc; + } tasklet_init(&card->tasklet, process_page, (unsigned long)card); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 1cdf09ff67b6..14e452896d04 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -891,7 +891,7 @@ next: out: for (i = last_map; i < num; i++) { /* Don't zap current batch's valid persistent grants. */ - if(i >= last_map + segs_to_map) + if(i >= map_until) pages[i]->persistent_gnt = NULL; pages[i]->handle = BLKBACK_INVALID_HANDLE; } diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index a711a2e2a794..cf8deecc39ef 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -627,7 +627,7 @@ static ssize_t writeback_store(struct device *dev, struct bio_vec bio_vec; struct page *page; ssize_t ret = len; - int mode; + int mode, err; unsigned long blk_idx = 0; if (sysfs_streq(buf, "idle")) @@ -638,8 +638,8 @@ static ssize_t writeback_store(struct device *dev, if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1)) return -EINVAL; - ret = kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index); - if (ret || index >= nr_pages) + if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) || + index >= nr_pages) return -EINVAL; nr_pages = 1; @@ -663,7 +663,7 @@ static ssize_t writeback_store(struct device *dev, goto release_init_lock; } - while (nr_pages--) { + for (; nr_pages != 0; index++, nr_pages--) { struct bio_vec bvec; bvec.bv_page = page; @@ -728,12 +728,17 @@ static ssize_t writeback_store(struct device *dev, * XXX: A single page IO would be inefficient for write * but it would be not bad as starter. */ - ret = submit_bio_wait(&bio); - if (ret) { + err = submit_bio_wait(&bio); + if (err) { zram_slot_lock(zram, index); zram_clear_flag(zram, index, ZRAM_UNDER_WB); zram_clear_flag(zram, index, ZRAM_IDLE); zram_slot_unlock(zram, index); + /* + * Return last IO error unless every IO were + * not suceeded. + */ + ret = err; continue; } diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c index 3951f7b23840..bea1595f6432 100644 --- a/drivers/bluetooth/btrsi.c +++ b/drivers/bluetooth/btrsi.c @@ -194,5 +194,4 @@ module_init(rsi_91x_bt_module_init); module_exit(rsi_91x_bt_module_exit); MODULE_AUTHOR("Redpine Signals Inc"); MODULE_DESCRIPTION("RSI BT driver"); -MODULE_SUPPORTED_DEVICE("RSI-BT"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c index b040447575ad..dcfb32ee5cb6 100644 --- a/drivers/bus/omap_l3_noc.c +++ b/drivers/bus/omap_l3_noc.c @@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev) */ l3->debug_irq = platform_get_irq(pdev, 0); ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler, - 0x0, "l3-dbg-irq", l3); + IRQF_NO_THREAD, "l3-dbg-irq", l3); if (ret) { dev_err(l3->dev, "request_irq failed for %d\n", l3->debug_irq); @@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev) l3->app_irq = platform_get_irq(pdev, 1); ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler, - 0x0, "l3-app-irq", l3); + IRQF_NO_THREAD, "l3-app-irq", l3); if (ret) dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index a27d751cf219..3d74f237f005 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -3053,7 +3053,9 @@ static int sysc_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - reset_control_assert(ddata->rsts); + + if (!reset_control_status(ddata->rsts)) + reset_control_assert(ddata->rsts); unprepare: sysc_unprepare(ddata); diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index 14b2d8034c51..45ac7ab003ce 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -81,9 +81,6 @@ MODULE_DESCRIPTION("Driver for Applicom Profibus card"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(AC_MINOR); -MODULE_SUPPORTED_DEVICE("ac"); - - static struct applicom_board { unsigned long PhysIO; void __iomem *RamIO; diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c index 8038a8a9fb58..f4949b689bd5 100644 --- a/drivers/char/hw_random/pseries-rng.c +++ b/drivers/char/hw_random/pseries-rng.c @@ -54,10 +54,9 @@ static int pseries_rng_probe(struct vio_dev *dev, return hwrng_register(&pseries_rng); } -static int pseries_rng_remove(struct vio_dev *dev) +static void pseries_rng_remove(struct vio_dev *dev) { hwrng_unregister(&pseries_rng); - return 0; } static const struct vio_device_id pseries_rng_driver_ids[] = { diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c index aff0a8e44fff..776abbfd85d6 100644 --- a/drivers/char/toshiba.c +++ b/drivers/char/toshiba.c @@ -64,7 +64,6 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>"); MODULE_DESCRIPTION("Toshiba laptop SMM driver"); -MODULE_SUPPORTED_DEVICE("toshiba"); static DEFINE_MUTEX(tosh_mutex); static int tosh_fn; diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 19e23fcc6bc8..ddaeceb7e109 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -278,8 +278,6 @@ static void tpm_devs_release(struct device *dev) { struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); - dump_stack(); - /* release the master device reference */ put_device(&chip->dev); } diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 994385bf37c0..903604769de9 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -343,7 +343,7 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) * * Return: Always 0. */ -static int tpm_ibmvtpm_remove(struct vio_dev *vdev) +static void tpm_ibmvtpm_remove(struct vio_dev *vdev) { struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); @@ -372,8 +372,6 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev) kfree(ibmvtpm); /* For tpm_ibmvtpm_get_desired_dma */ dev_set_drvdata(&vdev->dev, NULL); - - return 0; } /** diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 431919d5f48a..a2e0395cbe61 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -707,12 +707,22 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip) const char *desc = "attempting to generate an interrupt"; u32 cap2; cap_t cap; + int ret; + /* TPM 2.0 */ if (chip->flags & TPM_CHIP_FLAG_TPM2) return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc); - else - return tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, - 0); + + /* TPM 1.2 */ + ret = request_locality(chip, 0); + if (ret < 0) + return ret; + + ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0); + + release_locality(chip, 0); + + return ret; } /* Register the IRQ and issue a command that will cause an interrupt. If an @@ -1019,11 +1029,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, init_waitqueue_head(&priv->read_queue); init_waitqueue_head(&priv->int_queue); if (irq != -1) { - /* Before doing irq testing issue a command to the TPM in polling mode + /* + * Before doing irq testing issue a command to the TPM in polling mode * to make sure it works. May as well use that command to set the * proper timeouts for the driver. */ - if (tpm_get_timeouts(chip)) { + + rc = request_locality(chip, 0); + if (rc < 0) + goto out_err; + + rc = tpm_get_timeouts(chip); + + release_locality(chip, 0); + + if (rc) { dev_err(dev, "Could not get TPM timeouts and durations\n"); rc = -ENODEV; goto out_err; diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c index f5746f9ea929..32ac6b6b7530 100644 --- a/drivers/clk/mvebu/armada-37xx-periph.c +++ b/drivers/clk/mvebu/armada-37xx-periph.c @@ -84,6 +84,7 @@ struct clk_pm_cpu { void __iomem *reg_div; u8 shift_div; struct regmap *nb_pm_base; + unsigned long l1_expiration; }; #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw) @@ -440,33 +441,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw) return val; } -static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index) -{ - struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw); - struct regmap *base = pm_cpu->nb_pm_base; - int load_level; - - /* - * We set the clock parent only if the DVFS is available but - * not enabled. - */ - if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base)) - return -EINVAL; - - /* Set the parent clock for all the load level */ - for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) { - unsigned int reg, mask, val, - offset = ARMADA_37XX_NB_TBG_SEL_OFF; - - armada_3700_pm_dvfs_update_regs(load_level, ®, &offset); - - val = index << offset; - mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset; - regmap_update_bits(base, reg, mask, val); - } - return 0; -} - static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -514,8 +488,10 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate, } /* - * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz - * respectively) to L0 frequency (1.2 Ghz) requires a significant + * Workaround when base CPU frequnecy is 1000 or 1200 MHz + * + * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz + * respectively) to L0 frequency (1/1.2 GHz) requires a significant * amount of time to let VDD stabilize to the appropriate * voltage. This amount of time is large enough that it cannot be * covered by the hardware countdown register. Due to this, the CPU @@ -525,26 +501,56 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate, * To work around this problem, we prevent switching directly from the * L2/L3 frequencies to the L0 frequency, and instead switch to the L1 * frequency in-between. The sequence therefore becomes: - * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ) + * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz) * 2. Sleep 20ms for stabling VDD voltage - * 3. Then switch from L1(600MHZ) to L0(1200Mhz). + * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz). */ -static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base) +static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu, + unsigned int new_level, unsigned long rate, + struct regmap *base) { unsigned int cur_level; - if (rate != 1200 * 1000 * 1000) - return; - regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level); cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK; - if (cur_level <= ARMADA_37XX_DVFS_LOAD_1) + + if (cur_level == new_level) + return; + + /* + * System wants to go to L1 on its own. If we are going from L2/L3, + * remember when 20ms will expire. If from L0, set the value so that + * next switch to L0 won't have to wait. + */ + if (new_level == ARMADA_37XX_DVFS_LOAD_1) { + if (cur_level == ARMADA_37XX_DVFS_LOAD_0) + pm_cpu->l1_expiration = jiffies; + else + pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20); return; + } + + /* + * If we are setting to L2/L3, just invalidate L1 expiration time, + * sleeping is not needed. + */ + if (rate < 1000*1000*1000) + goto invalidate_l1_exp; + + /* + * We are going to L0 with rate >= 1GHz. Check whether we have been at + * L1 for long enough time. If not, go to L1 for 20ms. + */ + if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration) + goto invalidate_l1_exp; regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD, ARMADA_37XX_NB_CPU_LOAD_MASK, ARMADA_37XX_DVFS_LOAD_1); msleep(20); + +invalidate_l1_exp: + pm_cpu->l1_expiration = 0; } static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, @@ -578,7 +584,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, reg = ARMADA_37XX_NB_CPU_LOAD; mask = ARMADA_37XX_NB_CPU_LOAD_MASK; - clk_pm_cpu_set_rate_wa(rate, base); + /* Apply workaround when base CPU frequency is 1000 or 1200 MHz */ + if (parent_rate >= 1000*1000*1000) + clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base); regmap_update_bits(base, reg, mask, load_level); @@ -592,7 +600,6 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, static const struct clk_ops clk_pm_cpu_ops = { .get_parent = clk_pm_cpu_get_parent, - .set_parent = clk_pm_cpu_set_parent, .round_rate = clk_pm_cpu_round_rate, .set_rate = clk_pm_cpu_set_rate, .recalc_rate = clk_pm_cpu_recalc_rate, diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 42f13a2d1cc1..05ff3b0d233e 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -730,7 +730,8 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, struct clk_rate_request parent_req = { }; struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); struct clk_hw *xo, *p0, *p1, *p2; - unsigned long request, p0_rate; + unsigned long p0_rate; + u8 mux_div = cgfx->div; int ret; p0 = cgfx->hws[0]; @@ -750,14 +751,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, return 0; } - request = req->rate; - if (cgfx->div > 1) - parent_req.rate = request = request * cgfx->div; + if (mux_div == 0) + mux_div = 1; + + parent_req.rate = req->rate * mux_div; /* This has to be a fixed rate PLL */ p0_rate = clk_hw_get_rate(p0); - if (request == p0_rate) { + if (parent_req.rate == p0_rate) { req->rate = req->best_parent_rate = p0_rate; req->best_parent_hw = p0; return 0; @@ -765,7 +767,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, if (req->best_parent_hw == p0) { /* Are we going back to a previously used rate? */ - if (clk_hw_get_rate(p2) == request) + if (clk_hw_get_rate(p2) == parent_req.rate) req->best_parent_hw = p2; else req->best_parent_hw = p1; @@ -780,8 +782,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, return ret; req->rate = req->best_parent_rate = parent_req.rate; - if (cgfx->div > 1) - req->rate /= cgfx->div; + req->rate /= mux_div; return 0; } diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index 91dc390a583b..c623ce900406 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -510,9 +510,12 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = { .num_clks = ARRAY_SIZE(sm8350_rpmh_clocks), }; +/* Resource name must match resource id present in cmd-db */ +DEFINE_CLK_RPMH_ARC(sc7280, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 4); + static struct clk_hw *sc7280_rpmh_clocks[] = { - [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw, - [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw, + [RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw, + [RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw, [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw, [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw, [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw, diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c index 88e896abb663..da8b627ca156 100644 --- a/drivers/clk/qcom/gcc-sc7180.c +++ b/drivers/clk/qcom/gcc-sc7180.c @@ -620,7 +620,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { .name = "gcc_sdcc1_apps_clk_src", .parent_data = gcc_parent_data_1, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { .name = "gcc_sdcc1_ice_core_clk_src", .parent_data = gcc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_floor_ops, + .ops = &clk_rcg2_ops, }, }; diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c index ef2a974a2f10..75bc401fdd18 100644 --- a/drivers/counter/stm32-timer-cnt.c +++ b/drivers/counter/stm32-timer-cnt.c @@ -31,7 +31,7 @@ struct stm32_timer_cnt { struct counter_device counter; struct regmap *regmap; struct clk *clk; - u32 ceiling; + u32 max_arr; bool enabled; struct stm32_timer_regs bak; }; @@ -44,13 +44,14 @@ struct stm32_timer_cnt { * @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges */ enum stm32_count_function { - STM32_COUNT_SLAVE_MODE_DISABLED = -1, + STM32_COUNT_SLAVE_MODE_DISABLED, STM32_COUNT_ENCODER_MODE_1, STM32_COUNT_ENCODER_MODE_2, STM32_COUNT_ENCODER_MODE_3, }; static enum counter_count_function stm32_count_functions[] = { + [STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_COUNT_FUNCTION_INCREASE, [STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A, [STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B, [STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4, @@ -73,8 +74,10 @@ static int stm32_count_write(struct counter_device *counter, const unsigned long val) { struct stm32_timer_cnt *const priv = counter->priv; + u32 ceiling; - if (val > priv->ceiling) + regmap_read(priv->regmap, TIM_ARR, &ceiling); + if (val > ceiling) return -EINVAL; return regmap_write(priv->regmap, TIM_CNT, val); @@ -90,6 +93,9 @@ static int stm32_count_function_get(struct counter_device *counter, regmap_read(priv->regmap, TIM_SMCR, &smcr); switch (smcr & TIM_SMCR_SMS) { + case 0: + *function = STM32_COUNT_SLAVE_MODE_DISABLED; + return 0; case 1: *function = STM32_COUNT_ENCODER_MODE_1; return 0; @@ -99,9 +105,9 @@ static int stm32_count_function_get(struct counter_device *counter, case 3: *function = STM32_COUNT_ENCODER_MODE_3; return 0; + default: + return -EINVAL; } - - return -EINVAL; } static int stm32_count_function_set(struct counter_device *counter, @@ -112,6 +118,9 @@ static int stm32_count_function_set(struct counter_device *counter, u32 cr1, sms; switch (function) { + case STM32_COUNT_SLAVE_MODE_DISABLED: + sms = 0; + break; case STM32_COUNT_ENCODER_MODE_1: sms = 1; break; @@ -122,8 +131,7 @@ static int stm32_count_function_set(struct counter_device *counter, sms = 3; break; default: - sms = 0; - break; + return -EINVAL; } /* Store enable status */ @@ -131,10 +139,6 @@ static int stm32_count_function_set(struct counter_device *counter, regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); - /* TIMx_ARR register shouldn't be buffered (ARPE=0) */ - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); - regmap_write(priv->regmap, TIM_ARR, priv->ceiling); - regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms); /* Make sure that registers are updated */ @@ -185,11 +189,13 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter, if (ret) return ret; + if (ceiling > priv->max_arr) + return -ERANGE; + /* TIMx_ARR register shouldn't be buffered (ARPE=0) */ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); regmap_write(priv->regmap, TIM_ARR, ceiling); - priv->ceiling = ceiling; return len; } @@ -274,31 +280,36 @@ static int stm32_action_get(struct counter_device *counter, size_t function; int err; - /* Default action mode (e.g. STM32_COUNT_SLAVE_MODE_DISABLED) */ - *action = STM32_SYNAPSE_ACTION_NONE; - err = stm32_count_function_get(counter, count, &function); if (err) - return 0; + return err; switch (function) { + case STM32_COUNT_SLAVE_MODE_DISABLED: + /* counts on internal clock when CEN=1 */ + *action = STM32_SYNAPSE_ACTION_NONE; + return 0; case STM32_COUNT_ENCODER_MODE_1: /* counts up/down on TI1FP1 edge depending on TI2FP2 level */ if (synapse->signal->id == count->synapses[0].signal->id) *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; - break; + else + *action = STM32_SYNAPSE_ACTION_NONE; + return 0; case STM32_COUNT_ENCODER_MODE_2: /* counts up/down on TI2FP2 edge depending on TI1FP1 level */ if (synapse->signal->id == count->synapses[1].signal->id) *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; - break; + else + *action = STM32_SYNAPSE_ACTION_NONE; + return 0; case STM32_COUNT_ENCODER_MODE_3: /* counts up/down on both TI1FP1 and TI2FP2 edges */ *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; - break; + return 0; + default: + return -EINVAL; } - - return 0; } static const struct counter_ops stm32_timer_cnt_ops = { @@ -359,7 +370,7 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev) priv->regmap = ddata->regmap; priv->clk = ddata->clk; - priv->ceiling = ddata->max_arr; + priv->max_arr = ddata->max_arr; priv->counter.name = dev_name(dev); priv->counter.parent = dev; diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 85de313ddec2..c3038cdc6865 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -13,7 +13,8 @@ config CPU_FREQ clock speed, you need to either enable a dynamic cpufreq governor (see below) after boot, or use a userspace tool. - For details, take a look at <file:Documentation/cpu-freq>. + For details, take a look at + <file:Documentation/admin-guide/pm/cpufreq.rst>. If in doubt, say N. @@ -140,8 +141,6 @@ config CPU_FREQ_GOV_USERSPACE To compile this driver as a module, choose M here: the module will be called cpufreq_userspace. - For details, take a look at <file:Documentation/cpu-freq/>. - If in doubt, say Y. config CPU_FREQ_GOV_ONDEMAND @@ -158,7 +157,8 @@ config CPU_FREQ_GOV_ONDEMAND To compile this driver as a module, choose M here: the module will be called cpufreq_ondemand. - For details, take a look at linux/Documentation/cpu-freq. + For details, take a look at + <file:Documentation/admin-guide/pm/cpufreq.rst>. If in doubt, say N. @@ -182,7 +182,8 @@ config CPU_FREQ_GOV_CONSERVATIVE To compile this driver as a module, choose M here: the module will be called cpufreq_conservative. - For details, take a look at linux/Documentation/cpu-freq. + For details, take a look at + <file:Documentation/admin-guide/pm/cpufreq.rst>. If in doubt, say N. @@ -246,8 +247,6 @@ config IA64_ACPI_CPUFREQ This driver adds a CPUFreq driver which utilizes the ACPI Processor Performance States. - For details, take a look at <file:Documentation/cpu-freq/>. - If in doubt, say N. endif @@ -271,8 +270,6 @@ config LOONGSON2_CPUFREQ Loongson2F and it's successors support this feature. - For details, take a look at <file:Documentation/cpu-freq/>. - If in doubt, say N. config LOONGSON1_CPUFREQ @@ -282,8 +279,6 @@ config LOONGSON1_CPUFREQ This option adds a CPUFreq driver for loongson1 processors which support software configurable cpu frequency. - For details, take a look at <file:Documentation/cpu-freq/>. - If in doubt, say N. endif @@ -293,8 +288,6 @@ config SPARC_US3_CPUFREQ help This adds the CPUFreq driver for UltraSPARC-III processors. - For details, take a look at <file:Documentation/cpu-freq>. - If in doubt, say N. config SPARC_US2E_CPUFREQ @@ -302,8 +295,6 @@ config SPARC_US2E_CPUFREQ help This adds the CPUFreq driver for UltraSPARC-IIe processors. - For details, take a look at <file:Documentation/cpu-freq>. - If in doubt, say N. endif @@ -318,8 +309,6 @@ config SH_CPU_FREQ will also generate a notice in the boot log before disabling itself if the CPU in question is not capable of rate rounding. - For details, take a look at <file:Documentation/cpu-freq>. - If unsure, say N. endif diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index e65e0a43be64..a5c5f70acfc9 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -19,6 +19,16 @@ config ACPI_CPPC_CPUFREQ If in doubt, say N. +config ACPI_CPPC_CPUFREQ_FIE + bool "Frequency Invariance support for CPPC cpufreq driver" + depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY + default y + help + This extends frequency invariance support in the CPPC cpufreq driver, + by using CPPC delivered and reference performance counters. + + If in doubt, say N. + config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM tristate "Allwinner nvmem based SUN50I CPUFreq driver" depends on ARCH_SUNXI diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index b4af4094309b..3fc98a3ffd91 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -25,6 +25,10 @@ #include "cpufreq-dt.h" +/* Clk register set */ +#define ARMADA_37XX_CLK_TBG_SEL 0 +#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF 22 + /* Power management in North Bridge register set */ #define ARMADA_37XX_NB_L0L1 0x18 #define ARMADA_37XX_NB_L2L3 0x1C @@ -69,6 +73,8 @@ #define LOAD_LEVEL_NR 4 #define MIN_VOLT_MV 1000 +#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108 +#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155 /* AVS value for the corresponding voltage (in mV) */ static int avs_map[] = { @@ -80,6 +86,8 @@ static int avs_map[] = { }; struct armada37xx_cpufreq_state { + struct platform_device *pdev; + struct device *cpu_dev; struct regmap *regmap; u32 nb_l0l1; u32 nb_l2l3; @@ -120,10 +128,15 @@ static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq) * will be configured then the DVFS will be enabled. */ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base, - struct clk *clk, u8 *divider) + struct regmap *clk_base, u8 *divider) { + u32 cpu_tbg_sel; int load_lvl; - struct clk *parent; + + /* Determine to which TBG clock is CPU connected */ + regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel); + cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF; + cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK; for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) { unsigned int reg, mask, val, offset = 0; @@ -142,6 +155,11 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base, mask = (ARMADA_37XX_NB_CLK_SEL_MASK << ARMADA_37XX_NB_CLK_SEL_OFF); + /* Set TBG index, for all levels we use the same TBG */ + val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF; + mask = (ARMADA_37XX_NB_TBG_SEL_MASK + << ARMADA_37XX_NB_TBG_SEL_OFF); + /* * Set cpu divider based on the pre-computed array in * order to have balanced step. @@ -160,14 +178,6 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base, regmap_update_bits(base, reg, mask, val); } - - /* - * Set cpu clock source, for all the level we keep the same - * clock source that the one already configured. For this one - * we need to use the clock framework - */ - parent = clk_get_parent(clk); - clk_set_parent(clk, parent); } /* @@ -202,6 +212,8 @@ static u32 armada_37xx_avs_val_match(int target_vm) * - L2 & L3 voltage should be about 150mv smaller than L0 voltage. * This function calculates L1 & L2 & L3 AVS values dynamically based * on L0 voltage and fill all AVS values to the AVS value table. + * When base CPU frequency is 1000 or 1200 MHz then there is additional + * minimal avs value for load L1. */ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base, struct armada_37xx_dvfs *dvfs) @@ -233,6 +245,19 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base, for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++) dvfs->avs[load_level] = avs_min; + /* + * Set the avs values for load L0 and L1 when base CPU frequency + * is 1000/1200 MHz to its typical initial values according to + * the Armada 3700 Hardware Specifications. + */ + if (dvfs->cpu_freq_max >= 1000*1000*1000) { + if (dvfs->cpu_freq_max >= 1200*1000*1000) + avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ); + else + avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ); + dvfs->avs[0] = dvfs->avs[1] = avs_min; + } + return; } @@ -252,6 +277,26 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base, target_vm = avs_map[l0_vdd_min] - 150; target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV; dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm); + + /* + * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz, + * otherwise the CPU gets stuck when switching from load L1 to load L0. + * Also ensure that avs value for load L1 is not higher than for L0. + */ + if (dvfs->cpu_freq_max >= 1000*1000*1000) { + u32 avs_min_l1; + + if (dvfs->cpu_freq_max >= 1200*1000*1000) + avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ); + else + avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ); + + if (avs_min_l1 > dvfs->avs[0]) + avs_min_l1 = dvfs->avs[0]; + + if (dvfs->avs[1] < avs_min_l1) + dvfs->avs[1] = avs_min_l1; + } } static void __init armada37xx_cpufreq_avs_setup(struct regmap *base, @@ -357,12 +402,17 @@ static int __init armada37xx_cpufreq_driver_init(void) struct armada_37xx_dvfs *dvfs; struct platform_device *pdev; unsigned long freq; - unsigned int cur_frequency, base_frequency; - struct regmap *nb_pm_base, *avs_base; + unsigned int base_frequency; + struct regmap *nb_clk_base, *nb_pm_base, *avs_base; struct device *cpu_dev; int load_lvl, ret; struct clk *clk, *parent; + nb_clk_base = + syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb"); + if (IS_ERR(nb_clk_base)) + return -ENODEV; + nb_pm_base = syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm"); @@ -413,15 +463,7 @@ static int __init armada37xx_cpufreq_driver_init(void) return -EINVAL; } - /* Get nominal (current) CPU frequency */ - cur_frequency = clk_get_rate(clk); - if (!cur_frequency) { - dev_err(cpu_dev, "Failed to get clock rate for CPU\n"); - clk_put(clk); - return -EINVAL; - } - - dvfs = armada_37xx_cpu_freq_info_get(cur_frequency); + dvfs = armada_37xx_cpu_freq_info_get(base_frequency); if (!dvfs) { clk_put(clk); return -EINVAL; @@ -439,7 +481,7 @@ static int __init armada37xx_cpufreq_driver_init(void) armada37xx_cpufreq_avs_configure(avs_base, dvfs); armada37xx_cpufreq_avs_setup(avs_base, dvfs); - armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider); + armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider); clk_put(clk); for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR; @@ -466,6 +508,9 @@ static int __init armada37xx_cpufreq_driver_init(void) if (ret) goto disable_dvfs; + armada37xx_cpufreq_state->cpu_dev = cpu_dev; + armada37xx_cpufreq_state->pdev = pdev; + platform_set_drvdata(pdev, dvfs); return 0; disable_dvfs: @@ -473,7 +518,7 @@ disable_dvfs: remove_opp: /* clean-up the already added opp before leaving */ while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) { - freq = cur_frequency / dvfs->divider[load_lvl]; + freq = base_frequency / dvfs->divider[load_lvl]; dev_pm_opp_remove(cpu_dev, freq); } @@ -484,6 +529,26 @@ remove_opp: /* late_initcall, to guarantee the driver is loaded after A37xx clock driver */ late_initcall(armada37xx_cpufreq_driver_init); +static void __exit armada37xx_cpufreq_driver_exit(void) +{ + struct platform_device *pdev = armada37xx_cpufreq_state->pdev; + struct armada_37xx_dvfs *dvfs = platform_get_drvdata(pdev); + unsigned long freq; + int load_lvl; + + platform_device_unregister(pdev); + + armada37xx_cpufreq_disable_dvfs(armada37xx_cpufreq_state->regmap); + + for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR; load_lvl++) { + freq = dvfs->cpu_freq_max / dvfs->divider[load_lvl]; + dev_pm_opp_remove(armada37xx_cpufreq_state->cpu_dev, freq); + } + + kfree(armada37xx_cpufreq_state); +} +module_exit(armada37xx_cpufreq_driver_exit); + static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = { { .compatible = "marvell,armada-3700-nb-pm" }, { }, diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 8a482c434ea6..3848b4c222e1 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -10,14 +10,18 @@ #define pr_fmt(fmt) "CPPC Cpufreq:" fmt +#include <linux/arch_topology.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/dmi.h> +#include <linux/irq_work.h> +#include <linux/kthread.h> #include <linux/time.h> #include <linux/vmalloc.h> +#include <uapi/linux/sched/types.h> #include <asm/unaligned.h> @@ -57,6 +61,204 @@ static struct cppc_workaround_oem_info wa_info[] = { } }; +#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE + +/* Frequency invariance support */ +struct cppc_freq_invariance { + int cpu; + struct irq_work irq_work; + struct kthread_work work; + struct cppc_perf_fb_ctrs prev_perf_fb_ctrs; + struct cppc_cpudata *cpu_data; +}; + +static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv); +static struct kthread_worker *kworker_fie; +static bool fie_disabled; + +static struct cpufreq_driver cppc_cpufreq_driver; +static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu); +static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, + struct cppc_perf_fb_ctrs fb_ctrs_t0, + struct cppc_perf_fb_ctrs fb_ctrs_t1); + +/** + * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance + * @work: The work item. + * + * The CPPC driver register itself with the topology core to provide its own + * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which + * gets called by the scheduler on every tick. + * + * Note that the arch specific counters have higher priority than CPPC counters, + * if available, though the CPPC driver doesn't need to have any special + * handling for that. + * + * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we + * reach here from hard-irq context), which then schedules a normal work item + * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable + * based on the counter updates since the last tick. + */ +static void cppc_scale_freq_workfn(struct kthread_work *work) +{ + struct cppc_freq_invariance *cppc_fi; + struct cppc_perf_fb_ctrs fb_ctrs = {0}; + struct cppc_cpudata *cpu_data; + unsigned long local_freq_scale; + u64 perf; + + cppc_fi = container_of(work, struct cppc_freq_invariance, work); + cpu_data = cppc_fi->cpu_data; + + if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) { + pr_warn("%s: failed to read perf counters\n", __func__); + return; + } + + cppc_fi->prev_perf_fb_ctrs = fb_ctrs; + perf = cppc_perf_from_fbctrs(cpu_data, cppc_fi->prev_perf_fb_ctrs, + fb_ctrs); + + perf <<= SCHED_CAPACITY_SHIFT; + local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf); + if (WARN_ON(local_freq_scale > 1024)) + local_freq_scale = 1024; + + per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale; +} + +static void cppc_irq_work(struct irq_work *irq_work) +{ + struct cppc_freq_invariance *cppc_fi; + + cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work); + kthread_queue_work(kworker_fie, &cppc_fi->work); +} + +static void cppc_scale_freq_tick(void) +{ + struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id()); + + /* + * cppc_get_perf_ctrs() can potentially sleep, call that from the right + * context. + */ + irq_work_queue(&cppc_fi->irq_work); +} + +static struct scale_freq_data cppc_sftd = { + .source = SCALE_FREQ_SOURCE_CPPC, + .set_freq_scale = cppc_scale_freq_tick, +}; + +static void cppc_freq_invariance_policy_init(struct cpufreq_policy *policy, + struct cppc_cpudata *cpu_data) +{ + struct cppc_perf_fb_ctrs fb_ctrs = {0}; + struct cppc_freq_invariance *cppc_fi; + int i, ret; + + if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) + return; + + if (fie_disabled) + return; + + for_each_cpu(i, policy->cpus) { + cppc_fi = &per_cpu(cppc_freq_inv, i); + cppc_fi->cpu = i; + cppc_fi->cpu_data = cpu_data; + kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn); + init_irq_work(&cppc_fi->irq_work, cppc_irq_work); + + ret = cppc_get_perf_ctrs(i, &fb_ctrs); + if (ret) { + pr_warn("%s: failed to read perf counters: %d\n", + __func__, ret); + fie_disabled = true; + } else { + cppc_fi->prev_perf_fb_ctrs = fb_ctrs; + } + } +} + +static void __init cppc_freq_invariance_init(void) +{ + struct sched_attr attr = { + .size = sizeof(struct sched_attr), + .sched_policy = SCHED_DEADLINE, + .sched_nice = 0, + .sched_priority = 0, + /* + * Fake (unused) bandwidth; workaround to "fix" + * priority inheritance. + */ + .sched_runtime = 1000000, + .sched_deadline = 10000000, + .sched_period = 10000000, + }; + int ret; + + if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) + return; + + if (fie_disabled) + return; + + kworker_fie = kthread_create_worker(0, "cppc_fie"); + if (IS_ERR(kworker_fie)) + return; + + ret = sched_setattr_nocheck(kworker_fie->task, &attr); + if (ret) { + pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__, + ret); + kthread_destroy_worker(kworker_fie); + return; + } + + /* Register for freq-invariance */ + topology_set_scale_freq_source(&cppc_sftd, cpu_present_mask); +} + +static void cppc_freq_invariance_exit(void) +{ + struct cppc_freq_invariance *cppc_fi; + int i; + + if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) + return; + + if (fie_disabled) + return; + + topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, cpu_present_mask); + + for_each_possible_cpu(i) { + cppc_fi = &per_cpu(cppc_freq_inv, i); + irq_work_sync(&cppc_fi->irq_work); + } + + kthread_destroy_worker(kworker_fie); + kworker_fie = NULL; +} + +#else +static inline void +cppc_freq_invariance_policy_init(struct cpufreq_policy *policy, + struct cppc_cpudata *cpu_data) +{ +} + +static inline void cppc_freq_invariance_init(void) +{ +} + +static inline void cppc_freq_invariance_exit(void) +{ +} +#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */ + /* Callback function used to retrieve the max frequency from DMI */ static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) { @@ -216,26 +418,16 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) { unsigned long implementor = read_cpuid_implementor(); unsigned long part_num = read_cpuid_part_number(); - unsigned int delay_us = 0; switch (implementor) { case ARM_CPU_IMP_QCOM: switch (part_num) { case QCOM_CPU_PART_FALKOR_V1: case QCOM_CPU_PART_FALKOR: - delay_us = 10000; - break; - default: - delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; - break; + return 10000; } - break; - default: - delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; - break; } - - return delay_us; + return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; } #else @@ -355,9 +547,12 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) cpu_data->perf_ctrls.desired_perf = caps->highest_perf; ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); - if (ret) + if (ret) { pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", caps->highest_perf, cpu, ret); + } else { + cppc_freq_invariance_policy_init(policy, cpu_data); + } return ret; } @@ -370,12 +565,12 @@ static inline u64 get_delta(u64 t1, u64 t0) return (u32)t1 - (u32)t0; } -static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data, - struct cppc_perf_fb_ctrs fb_ctrs_t0, - struct cppc_perf_fb_ctrs fb_ctrs_t1) +static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, + struct cppc_perf_fb_ctrs fb_ctrs_t0, + struct cppc_perf_fb_ctrs fb_ctrs_t1) { u64 delta_reference, delta_delivered; - u64 reference_perf, delivered_perf; + u64 reference_perf; reference_perf = fb_ctrs_t0.reference_perf; @@ -384,12 +579,21 @@ static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data, delta_delivered = get_delta(fb_ctrs_t1.delivered, fb_ctrs_t0.delivered); - /* Check to avoid divide-by zero */ - if (delta_reference || delta_delivered) - delivered_perf = (reference_perf * delta_delivered) / - delta_reference; - else - delivered_perf = cpu_data->perf_ctrls.desired_perf; + /* Check to avoid divide-by zero and invalid delivered_perf */ + if (!delta_reference || !delta_delivered) + return cpu_data->perf_ctrls.desired_perf; + + return (reference_perf * delta_delivered) / delta_reference; +} + +static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data, + struct cppc_perf_fb_ctrs fb_ctrs_t0, + struct cppc_perf_fb_ctrs fb_ctrs_t1) +{ + u64 delivered_perf; + + delivered_perf = cppc_perf_from_fbctrs(cpu_data, fb_ctrs_t0, + fb_ctrs_t1); return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); } @@ -514,6 +718,8 @@ static void cppc_check_hisi_workaround(void) static int __init cppc_cpufreq_init(void) { + int ret; + if ((acpi_disabled) || !acpi_cpc_valid()) return -ENODEV; @@ -521,7 +727,11 @@ static int __init cppc_cpufreq_init(void) cppc_check_hisi_workaround(); - return cpufreq_register_driver(&cppc_cpufreq_driver); + ret = cpufreq_register_driver(&cppc_cpufreq_driver); + if (!ret) + cppc_freq_invariance_init(); + + return ret; } static inline void free_cpu_data(void) @@ -538,6 +748,7 @@ static inline void free_cpu_data(void) static void __exit cppc_cpufreq_exit(void) { + cppc_freq_invariance_exit(); cpufreq_unregister_driver(&cppc_cpufreq_driver); free_cpu_data(); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 3ba2f716fe97..5e07065ec22f 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -103,6 +103,8 @@ static const struct of_device_id whitelist[] __initconst = { static const struct of_device_id blacklist[] __initconst = { { .compatible = "allwinner,sun50i-h6", }, + { .compatible = "arm,vexpress", }, + { .compatible = "calxeda,highbank", }, { .compatible = "calxeda,ecx-2000", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index b1e1bdc63b01..ece52863ba62 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -255,10 +255,15 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) * before updating priv->cpus. Otherwise, we will end up creating * duplicate OPPs for the CPUs. * - * OPPs might be populated at runtime, don't check for error here. + * OPPs might be populated at runtime, don't fail for error here unless + * it is -EPROBE_DEFER. */ - if (!dev_pm_opp_of_cpumask_add_table(priv->cpus)) + ret = dev_pm_opp_of_cpumask_add_table(priv->cpus); + if (!ret) { priv->have_static_opps = true; + } else if (ret == -EPROBE_DEFER) { + goto out; + } /* * The OPP table must be initialized, statically or dynamically, by this diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1d1b563cea4b..802abc925b2a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -42,9 +42,6 @@ static LIST_HEAD(cpufreq_policy_list); #define for_each_inactive_policy(__policy) \ for_each_suitable_policy(__policy, false) -#define for_each_policy(__policy) \ - list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) - /* Iterate over governors */ static LIST_HEAD(cpufreq_governor_list); #define for_each_governor(__governor) \ diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index d3f756f7b5a0..67e56cf638ef 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -267,7 +267,7 @@ struct freq_attr cpufreq_freq_attr_##_name##_freqs = \ __ATTR_RO(_name##_frequencies) /* - * show_scaling_available_frequencies - show available normal frequencies for + * scaling_available_frequencies_show - show available normal frequencies for * the specified CPU */ static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy, @@ -279,7 +279,7 @@ cpufreq_attr_available_freq(scaling_available); EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); /* - * show_available_boost_freqs - show available boost frequencies for + * scaling_boost_frequencies_show - show available boost frequencies for * the specified CPU */ static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy, diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c index 2efe7189ccc4..c6bdc455517f 100644 --- a/drivers/cpufreq/ia64-acpi-cpufreq.c +++ b/drivers/cpufreq/ia64-acpi-cpufreq.c @@ -54,7 +54,7 @@ processor_set_pstate ( retval = ia64_pal_set_pstate((u64)value); if (retval) { - pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", + pr_debug("Failed to set freq to 0x%x, with error 0x%llx\n", value, retval); return -ENODEV; } @@ -77,7 +77,7 @@ processor_get_pstate ( if (retval) pr_debug("Failed to get current freq with " - "error 0x%lx, idx 0x%x\n", retval, *value); + "error 0x%llx, idx 0x%x\n", retval, *value); return (int)retval; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 5175ae3cac44..f0401064d7aa 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -819,19 +819,21 @@ static struct freq_attr *hwp_cpufreq_attrs[] = { NULL, }; -static void intel_pstate_get_hwp_max(struct cpudata *cpu, int *phy_max, - int *current_max) +static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) { u64 cap; rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); WRITE_ONCE(cpu->hwp_cap_cached, cap); - if (global.no_turbo || global.turbo_disabled) - *current_max = HWP_GUARANTEED_PERF(cap); - else - *current_max = HWP_HIGHEST_PERF(cap); + cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); + cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); +} - *phy_max = HWP_HIGHEST_PERF(cap); +static void intel_pstate_get_hwp_cap(struct cpudata *cpu) +{ + __intel_pstate_get_hwp_cap(cpu); + cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; } static void intel_pstate_hwp_set(unsigned int cpu) @@ -1195,12 +1197,13 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, static void update_qos_request(enum freq_qos_req_type type) { - int max_state, turbo_max, freq, i, perf_pct; struct freq_qos_request *req; struct cpufreq_policy *policy; + int i; for_each_possible_cpu(i) { struct cpudata *cpu = all_cpu_data[i]; + unsigned int freq, perf_pct; policy = cpufreq_cpu_get(i); if (!policy) @@ -1213,9 +1216,7 @@ static void update_qos_request(enum freq_qos_req_type type) continue; if (hwp_active) - intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state); - else - turbo_max = cpu->pstate.turbo_pstate; + intel_pstate_get_hwp_cap(cpu); if (type == FREQ_QOS_MIN) { perf_pct = global.min_perf_pct; @@ -1224,8 +1225,7 @@ static void update_qos_request(enum freq_qos_req_type type) perf_pct = global.max_perf_pct; } - freq = DIV_ROUND_UP(turbo_max * perf_pct, 100); - freq *= cpu->pstate.scaling; + freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100); if (freq_qos_update_request(req, freq) < 0) pr_warn("Failed to update freq constraint: CPU%d\n", i); @@ -1715,21 +1715,17 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { cpu->pstate.min_pstate = pstate_funcs.get_min(); cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); - cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); cpu->pstate.scaling = pstate_funcs.get_scaling(); if (hwp_active && !hwp_mode_bdw) { - unsigned int phy_max, current_max; - - intel_pstate_get_hwp_max(cpu, &phy_max, ¤t_max); - cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; - cpu->pstate.turbo_pstate = phy_max; - cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached)); + __intel_pstate_get_hwp_cap(cpu); } else { - cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; cpu->pstate.max_pstate = pstate_funcs.get_max(); + cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); } + cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; if (pstate_funcs.get_aperf_mperf_shift) cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); @@ -2199,41 +2195,34 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu, unsigned int policy_min, unsigned int policy_max) { + int scaling = cpu->pstate.scaling; int32_t max_policy_perf, min_policy_perf; - int max_state, turbo_max; - int max_freq; /* - * HWP needs some special consideration, because on BDX the - * HWP_REQUEST uses abstract value to represent performance - * rather than pure ratios. + * HWP needs some special consideration, because HWP_REQUEST uses + * abstract values to represent performance rather than pure ratios. */ - if (hwp_active) { - intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state); - } else { - max_state = global.no_turbo || global.turbo_disabled ? - cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; - turbo_max = cpu->pstate.turbo_pstate; - } - max_freq = max_state * cpu->pstate.scaling; + if (hwp_active) + intel_pstate_get_hwp_cap(cpu); - max_policy_perf = max_state * policy_max / max_freq; + max_policy_perf = policy_max / scaling; if (policy_max == policy_min) { min_policy_perf = max_policy_perf; } else { - min_policy_perf = max_state * policy_min / max_freq; + min_policy_perf = policy_min / scaling; min_policy_perf = clamp_t(int32_t, min_policy_perf, 0, max_policy_perf); } - pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n", - cpu->cpu, max_state, min_policy_perf, max_policy_perf); + pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n", + cpu->cpu, min_policy_perf, max_policy_perf); /* Normalize user input to [min_perf, max_perf] */ if (per_cpu_limits) { cpu->min_perf_ratio = min_policy_perf; cpu->max_perf_ratio = max_policy_perf; } else { + int turbo_max = cpu->pstate.turbo_pstate; int32_t global_min, global_max; /* Global limits are in percent of the maximum turbo P-state. */ @@ -2322,10 +2311,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, update_turbo_state(); if (hwp_active) { - int max_state, turbo_max; - - intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state); - max_freq = max_state * cpu->pstate.scaling; + intel_pstate_get_hwp_cap(cpu); + max_freq = global.no_turbo || global.turbo_disabled ? + cpu->pstate.max_freq : cpu->pstate.turbo_freq; } else { max_freq = intel_pstate_get_max_freq(cpu); } @@ -2416,25 +2404,15 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) cpu->max_perf_ratio = 0xFF; cpu->min_perf_ratio = 0; - policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; - policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; - /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; update_turbo_state(); global.turbo_disabled_mf = global.turbo_disabled; policy->cpuinfo.max_freq = global.turbo_disabled ? - cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; - policy->cpuinfo.max_freq *= cpu->pstate.scaling; - - if (hwp_active) { - unsigned int max_freq; - - max_freq = global.turbo_disabled ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; - if (max_freq < policy->cpuinfo.max_freq) - policy->cpuinfo.max_freq = max_freq; - } + + policy->min = policy->cpuinfo.min_freq; + policy->max = policy->cpuinfo.max_freq; intel_pstate_init_acpi_perf_limits(policy); @@ -2683,10 +2661,10 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum, static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) { - int max_state, turbo_max, min_freq, max_freq, ret; struct freq_qos_request *req; struct cpudata *cpu; struct device *dev; + int ret, freq; dev = get_cpu_device(policy->cpu); if (!dev) @@ -2711,30 +2689,31 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) if (hwp_active) { u64 value; - intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state); policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; + + intel_pstate_get_hwp_cap(cpu); + rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); WRITE_ONCE(cpu->hwp_req_cached, value); + cpu->epp_cached = intel_pstate_get_epp(cpu, value); } else { - turbo_max = cpu->pstate.turbo_pstate; policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; } - min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); - min_freq *= cpu->pstate.scaling; - max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); - max_freq *= cpu->pstate.scaling; + freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100); ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, - min_freq); + freq); if (ret < 0) { dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); goto free_req; } + freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100); + ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, - max_freq); + freq); if (ret < 0) { dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); goto remove_min_req; diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index d3c23447b892..f86859bf76f1 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -317,9 +317,9 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) } base = ioremap(res->start, resource_size(res)); - if (IS_ERR(base)) { + if (!base) { dev_err(dev, "failed to map resource %pR\n", res); - ret = PTR_ERR(base); + ret = -ENOMEM; goto release_region; } @@ -374,7 +374,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) error: kfree(data); unmap_base: - iounmap(data->base); + iounmap(base); release_region: release_mem_region(res->start, resource_size(res)); return ret; diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 69786e5bbf05..ad7d4f272ddc 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -91,7 +91,7 @@ static DEFINE_MUTEX(set_freq_lock); /* Use 800MHz when entering sleep mode */ #define SLEEP_FREQ (800 * 1000) -/* Tracks if cpu freqency can be updated anymore */ +/* Tracks if CPU frequency can be updated anymore */ static bool no_cpufreq_access; /* @@ -190,7 +190,7 @@ static u32 clkdiv_val[5][11] = { /* * This function set DRAM refresh counter - * accoriding to operating frequency of DRAM + * according to operating frequency of DRAM * ch: DMC port number 0 or 1 * freq: Operating frequency of DRAM(KHz) */ @@ -320,7 +320,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) /* * 3. DMC1 refresh count for 133Mhz if (index == L4) is - * true refresh counter is already programed in upper + * true refresh counter is already programmed in upper * code. 0x287@83Mhz */ if (!bus_speed_changing) @@ -378,7 +378,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) /* * 6. Turn on APLL * 6-1. Set PMS values - * 6-2. Wait untile the PLL is locked + * 6-2. Wait until the PLL is locked */ if (index == L0) writel_relaxed(APLL_VAL_1000, S5P_APLL_CON); @@ -390,7 +390,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) } while (!(reg & (0x1 << 29))); /* - * 7. Change souce clock from SCLKMPLL(667Mhz) + * 7. Change source clock from SCLKMPLL(667Mhz) * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX * (667/4=166)->(200/4=50)Mhz */ @@ -439,8 +439,8 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) } /* - * L4 level need to change memory bus speed, hence onedram clock divier - * and memory refresh parameter should be changed + * L4 level needs to change memory bus speed, hence ONEDRAM clock + * divider and memory refresh parameter should be changed */ if (bus_speed_changing) { reg = readl_relaxed(S5P_CLK_DIV6); diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 0844fadc4be8..334f83e56120 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -107,7 +107,7 @@ config ARM_TEGRA_CPUIDLE config ARM_QCOM_SPM_CPUIDLE bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)" - depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 + depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU select ARM_CPU_SUSPEND select CPU_IDLE_MULTIPLE_DRIVERS select DT_IDLE_STATES diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c index 191966dc8d02..508bd9f23792 100644 --- a/drivers/cpuidle/cpuidle-tegra.c +++ b/drivers/cpuidle/cpuidle-tegra.c @@ -48,11 +48,6 @@ enum tegra_state { static atomic_t tegra_idle_barrier; static atomic_t tegra_abort_flag; -static inline bool tegra_cpuidle_using_firmware(void) -{ - return firmware_ops->prepare_idle && firmware_ops->do_idle; -} - static void tegra_cpuidle_report_cpus_state(void) { unsigned long cpu, lcpu, csr; @@ -135,13 +130,9 @@ static int tegra_cpuidle_c7_enter(void) { int err; - if (tegra_cpuidle_using_firmware()) { - err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2); - if (err) - return err; - - return call_firmware_op(do_idle, 0); - } + err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2); + if (err && err != -ENOSYS) + return err; return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend); } @@ -356,9 +347,7 @@ static int tegra_cpuidle_probe(struct platform_device *pdev) * is disabled. */ if (!IS_ENABLED(CONFIG_PM_SLEEP)) { - if (!tegra_cpuidle_using_firmware()) - tegra_cpuidle_disable_state(TEGRA_C7); - + tegra_cpuidle_disable_state(TEGRA_C7); tegra_cpuidle_disable_state(TEGRA_CC6); } diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 4070e573bf43..f70aa17e2a8e 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -181,9 +181,13 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv) */ if (s->target_residency > 0) s->target_residency_ns = s->target_residency * NSEC_PER_USEC; + else if (s->target_residency_ns < 0) + s->target_residency_ns = 0; if (s->exit_latency > 0) s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC; + else if (s->exit_latency_ns < 0) + s->exit_latency_ns = 0; } } diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index b0a7ad566081..c3aa8d6ccee3 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -271,7 +271,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, u64 predicted_ns; u64 interactivity_req; unsigned long nr_iowaiters; - ktime_t delta_next; + ktime_t delta, delta_tick; int i, idx; if (data->needs_update) { @@ -280,7 +280,12 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, } /* determine the expected residency time, round up */ - data->next_timer_ns = tick_nohz_get_sleep_length(&delta_next); + delta = tick_nohz_get_sleep_length(&delta_tick); + if (unlikely(delta < 0)) { + delta = 0; + delta_tick = 0; + } + data->next_timer_ns = delta; nr_iowaiters = nr_iowait_cpu(dev->cpu); data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); @@ -318,7 +323,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, * state selection. */ if (predicted_ns < TICK_NSEC) - predicted_ns = delta_next; + predicted_ns = data->next_timer_ns; } else { /* * Use the performance multiplier and the user-configurable @@ -377,7 +382,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, * stuck in the shallow one for too long. */ if (drv->states[idx].target_residency_ns < TICK_NSEC && - s->target_residency_ns <= delta_next) + s->target_residency_ns <= delta_tick) idx = i; return idx; @@ -399,7 +404,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) { *stop_tick = false; - if (idx > 0 && drv->states[idx].target_residency_ns > delta_next) { + if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) { /* * The tick is not going to be stopped and the target * residency of the state to be returned is not within @@ -411,7 +416,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, continue; idx = i; - if (drv->states[i].target_residency_ns <= delta_next) + if (drv->states[i].target_residency_ns <= delta_tick) break; } } diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index 6deaaf5f05b5..ac4bb27d69b0 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -100,8 +100,8 @@ struct teo_idle_state { * @intervals: Saved idle duration values. */ struct teo_cpu { - u64 time_span_ns; - u64 sleep_length_ns; + s64 time_span_ns; + s64 sleep_length_ns; struct teo_idle_state states[CPUIDLE_STATE_MAX]; int interval_idx; u64 intervals[INTERVALS]; @@ -117,7 +117,8 @@ static DEFINE_PER_CPU(struct teo_cpu, teo_cpus); static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); - int i, idx_hit = -1, idx_timer = -1; + int i, idx_hit = 0, idx_timer = 0; + unsigned int hits, misses; u64 measured_ns; if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) { @@ -174,25 +175,22 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) * also increase the "early hits" metric for the state that actually * matches the measured idle duration. */ - if (idx_timer >= 0) { - unsigned int hits = cpu_data->states[idx_timer].hits; - unsigned int misses = cpu_data->states[idx_timer].misses; - - hits -= hits >> DECAY_SHIFT; - misses -= misses >> DECAY_SHIFT; - - if (idx_timer > idx_hit) { - misses += PULSE; - if (idx_hit >= 0) - cpu_data->states[idx_hit].early_hits += PULSE; - } else { - hits += PULSE; - } + hits = cpu_data->states[idx_timer].hits; + hits -= hits >> DECAY_SHIFT; + + misses = cpu_data->states[idx_timer].misses; + misses -= misses >> DECAY_SHIFT; - cpu_data->states[idx_timer].misses = misses; - cpu_data->states[idx_timer].hits = hits; + if (idx_timer == idx_hit) { + hits += PULSE; + } else { + misses += PULSE; + cpu_data->states[idx_hit].early_hits += PULSE; } + cpu_data->states[idx_timer].misses = misses; + cpu_data->states[idx_timer].hits = hits; + /* * Save idle duration values corresponding to non-timer wakeups for * pattern detection. @@ -216,7 +214,7 @@ static bool teo_time_ok(u64 interval_ns) */ static int teo_find_shallower_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, int state_idx, - u64 duration_ns) + s64 duration_ns) { int i; @@ -242,10 +240,10 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); s64 latency_req = cpuidle_governor_latency_req(dev->cpu); - u64 duration_ns; + int max_early_idx, prev_max_early_idx, constraint_idx, idx0, idx, i; unsigned int hits, misses, early_hits; - int max_early_idx, prev_max_early_idx, constraint_idx, idx, i; ktime_t delta_tick; + s64 duration_ns; if (dev->last_state_idx >= 0) { teo_update(drv, dev); @@ -264,6 +262,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, prev_max_early_idx = -1; constraint_idx = drv->state_count; idx = -1; + idx0 = idx; for (i = 0; i < drv->state_count; i++) { struct cpuidle_state *s = &drv->states[i]; @@ -324,6 +323,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, idx = i; /* first enabled state */ hits = cpu_data->states[i].hits; misses = cpu_data->states[i].misses; + idx0 = i; } if (s->target_residency_ns > duration_ns) @@ -376,11 +376,16 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (idx < 0) { idx = 0; /* No states enabled. Must use 0. */ - } else if (idx > 0) { + } else if (idx > idx0) { unsigned int count = 0; u64 sum = 0; /* + * The target residencies of at least two different enabled idle + * states are less than or equal to the current expected idle + * duration. Try to refine the selection using the most recent + * measured idle duration values. + * * Count and sum the most recent idle duration values less than * the current expected idle duration value. */ @@ -428,7 +433,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, * till the closest timer including the tick, try to correct * that. */ - if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) + if (idx > idx0 && + drv->states[idx].target_residency_ns > delta_tick) idx = teo_find_shallower_state(drv, dev, idx, delta_tick); } diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index 2de5e3672e42..cc8dd3072b8b 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -1042,7 +1042,7 @@ error: return ret; } -static int nx842_remove(struct vio_dev *viodev) +static void nx842_remove(struct vio_dev *viodev) { struct nx842_devdata *old_devdata; unsigned long flags; @@ -1063,8 +1063,6 @@ static int nx842_remove(struct vio_dev *viodev) if (old_devdata) kfree(old_devdata->counters); kfree(old_devdata); - - return 0; } static const struct vio_device_id nx842_vio_driver_ids[] = { diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 0d2dc5be7f19..1d0e8a1ba160 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -783,7 +783,7 @@ static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) return nx_register_algs(); } -static int nx_remove(struct vio_dev *viodev) +static void nx_remove(struct vio_dev *viodev) { dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n", viodev->unit_address); @@ -811,8 +811,6 @@ static int nx_remove(struct vio_dev *viodev) nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); } - - return 0; } diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 0a6438cbb3f3..e7a9561a826d 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev) sizeof(*edev->nh), GFP_KERNEL); if (!edev->nh) { ret = -ENOMEM; + device_unregister(&edev->dev); goto err_dev; } diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 5fd6a60b6741..88ed971e32c0 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -346,6 +346,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct client *client = file->private_data; spinlock_t *client_list_lock = &client->lynx->client_list_lock; struct nosy_stats stats; + int ret; switch (cmd) { case NOSY_IOC_GET_STATS: @@ -360,11 +361,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; case NOSY_IOC_START: + ret = -EBUSY; spin_lock_irq(client_list_lock); - list_add_tail(&client->link, &client->lynx->client_list); + if (list_empty(&client->link)) { + list_add_tail(&client->link, &client->lynx->client_list); + ret = 0; + } spin_unlock_irq(client_list_lock); - return 0; + return ret; case NOSY_IOC_STOP: spin_lock_irq(client_list_lock); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index df3f9bcab581..4b7ee3fa9224 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -927,7 +927,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) } /* first try to find a slot in an existing linked list entry */ - for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { + for (prsv = efi_memreserve_root->next; prsv; ) { rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); if (index < rsv->size) { @@ -937,6 +937,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) memunmap(rsv); return efi_mem_reserve_iomem(addr, size); } + prsv = rsv->next; memunmap(rsv); } diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index b69d63143e0d..7bf0a7acae5e 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -24,7 +24,7 @@ efi_status_t check_platform_features(void) return EFI_SUCCESS; tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; - if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) { + if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) { if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) efi_err("This 64 KB granular kernel is not supported by your CPU\n"); else diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index ec2f3985bef3..26e69788f27a 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -96,6 +96,18 @@ static void install_memreserve_table(void) efi_err("Failed to install memreserve config table!\n"); } +static u32 get_supported_rt_services(void) +{ + const efi_rt_properties_table_t *rt_prop_table; + u32 supported = EFI_RT_SUPPORTED_ALL; + + rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID); + if (rt_prop_table) + supported &= rt_prop_table->runtime_services_supported; + + return supported; +} + /* * EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint * that is described in the PE/COFF header. Most of the code is the same @@ -250,6 +262,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, (prop_tbl->memory_protection_attribute & EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA); + /* force efi_novamap if SetVirtualAddressMap() is unsupported */ + efi_novamap |= !(get_supported_rt_services() & + EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP); + /* hibernation expects the runtime regions to stay in the same place */ if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) { /* diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index 41c1d00bf933..abdc8a6a3963 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -485,6 +485,10 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), } break; + case EFI_UNSUPPORTED: + err = -EOPNOTSUPP; + status = EFI_NOT_FOUND; + break; case EFI_NOT_FOUND: break; default: diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 5ea09fd01544..c91d05651596 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -113,8 +113,29 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id); #ifdef CONFIG_GPIO_PCA953X_IRQ #include <linux/dmi.h> -#include <linux/gpio.h> -#include <linux/list.h> + +static const struct acpi_gpio_params pca953x_irq_gpios = { 0, 0, true }; + +static const struct acpi_gpio_mapping pca953x_acpi_irq_gpios[] = { + { "irq-gpios", &pca953x_irq_gpios, 1, ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER }, + { } +}; + +static int pca953x_acpi_get_irq(struct device *dev) +{ + int ret; + + ret = devm_acpi_dev_add_driver_gpios(dev, pca953x_acpi_irq_gpios); + if (ret) + dev_warn(dev, "can't add GPIO ACPI mapping\n"); + + ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq-gpios", 0); + if (ret < 0) + return ret; + + dev_info(dev, "ACPI interrupt quirk (IRQ %d)\n", ret); + return ret; +} static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = { { @@ -133,59 +154,6 @@ static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = { }, {} }; - -#ifdef CONFIG_ACPI -static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data) -{ - struct acpi_resource_gpio *agpio; - int *pin = data; - - if (acpi_gpio_get_irq_resource(ares, &agpio)) - *pin = agpio->pin_table[0]; - return 1; -} - -static int pca953x_acpi_find_pin(struct device *dev) -{ - struct acpi_device *adev = ACPI_COMPANION(dev); - int pin = -ENOENT, ret; - LIST_HEAD(r); - - ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin); - acpi_dev_free_resource_list(&r); - if (ret < 0) - return ret; - - return pin; -} -#else -static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; } -#endif - -static int pca953x_acpi_get_irq(struct device *dev) -{ - int pin, ret; - - pin = pca953x_acpi_find_pin(dev); - if (pin < 0) - return pin; - - dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin); - - if (!gpio_is_valid(pin)) - return -EINVAL; - - ret = gpio_request(pin, "pca953x interrupt"); - if (ret) - return ret; - - ret = gpio_to_irq(pin); - - /* When pin is used as an IRQ, no need to keep it requested */ - gpio_free(pin); - - return ret; -} #endif static const struct acpi_device_id pca953x_acpi_ids[] = { diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index e37a57d0a2f0..1aacd2a5a1fd 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -174,7 +174,7 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio, int ret, value; ret = request_threaded_irq(event->irq, NULL, event->handler, - event->irqflags, "ACPI:Event", event); + event->irqflags | IRQF_ONESHOT, "ACPI:Event", event); if (ret) { dev_err(acpi_gpio->chip->parent, "Failed to setup interrupt handler for %d\n", @@ -677,6 +677,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data) if (!lookup->desc) { const struct acpi_resource_gpio *agpio = &ares->data.gpio; bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT; + struct gpio_desc *desc; u16 pin_index; if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint) @@ -689,8 +690,12 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data) if (pin_index >= agpio->pin_table_length) return 1; - lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr, + if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER) + desc = gpio_to_desc(agpio->pin_table[pin_index]); + else + desc = acpi_get_gpiod(agpio->resource_source.string_ptr, agpio->pin_table[pin_index]); + lookup->desc = desc; lookup->info.pin_config = agpio->pin_config; lookup->info.debounce = agpio->debounce_timeout; lookup->info.gpioint = gpioint; @@ -940,8 +945,9 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode, } /** - * acpi_dev_gpio_irq_get() - Find GpioInt and translate it to Linux IRQ number + * acpi_dev_gpio_irq_get_by() - Find GpioInt and translate it to Linux IRQ number * @adev: pointer to a ACPI device to get IRQ from + * @name: optional name of GpioInt resource * @index: index of GpioInt resource (starting from %0) * * If the device has one or more GpioInt resources, this function can be @@ -951,9 +957,12 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode, * The function is idempotent, though each time it runs it will configure GPIO * pin direction according to the flags in GpioInt resource. * + * The function takes optional @name parameter. If the resource has a property + * name, then only those will be taken into account. + * * Return: Linux IRQ number (> %0) on success, negative errno on failure. */ -int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) +int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index) { int idx, i; unsigned int irq_flags; @@ -963,7 +972,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) struct acpi_gpio_info info; struct gpio_desc *desc; - desc = acpi_get_gpiod_by_index(adev, NULL, i, &info); + desc = acpi_get_gpiod_by_index(adev, name, i, &info); /* Ignore -EPROBE_DEFER, it only matters if idx matches */ if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) @@ -1008,7 +1017,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) } return -ENOENT; } -EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get); +EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get_by); static acpi_status acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index adf55db080d8..6367646dce83 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -367,22 +367,18 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc) * * Looks for device property "gpio-line-names" and if it exists assigns * GPIO line names for the chip. The memory allocated for the assigned - * names belong to the underlying software node and should not be released + * names belong to the underlying firmware node and should not be released * by the caller. */ static int devprop_gpiochip_set_names(struct gpio_chip *chip) { struct gpio_device *gdev = chip->gpiodev; - struct device *dev = chip->parent; + struct fwnode_handle *fwnode = dev_fwnode(&gdev->dev); const char **names; int ret, i; int count; - /* GPIO chip may not have a parent device whose properties we inspect. */ - if (!dev) - return 0; - - count = device_property_string_array_count(dev, "gpio-line-names"); + count = fwnode_property_string_array_count(fwnode, "gpio-line-names"); if (count < 0) return 0; @@ -396,7 +392,7 @@ static int devprop_gpiochip_set_names(struct gpio_chip *chip) if (!names) return -ENOMEM; - ret = device_property_read_string_array(dev, "gpio-line-names", + ret = fwnode_property_read_string_array(fwnode, "gpio-line-names", names, count); if (ret < 0) { dev_warn(&gdev->dev, "failed to read GPIO line names\n"); @@ -474,9 +470,13 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_valid); static void gpiodevice_release(struct device *dev) { - struct gpio_device *gdev = dev_get_drvdata(dev); + struct gpio_device *gdev = container_of(dev, struct gpio_device, dev); + unsigned long flags; + spin_lock_irqsave(&gpio_lock, flags); list_del(&gdev->list); + spin_unlock_irqrestore(&gpio_lock, flags); + ida_free(&gpio_ida, gdev->id); kfree_const(gdev->label); kfree(gdev->descs); @@ -571,6 +571,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, struct lock_class_key *lock_key, struct lock_class_key *request_key) { + struct fwnode_handle *fwnode = gc->parent ? dev_fwnode(gc->parent) : NULL; unsigned long flags; int ret = 0; unsigned i; @@ -594,6 +595,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, of_gpio_dev_init(gc, gdev); + /* + * Assign fwnode depending on the result of the previous calls, + * if none of them succeed, assign it to the parent's one. + */ + gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode; + gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL); if (gdev->id < 0) { ret = gdev->id; @@ -605,7 +612,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, goto err_free_ida; device_initialize(&gdev->dev); - dev_set_drvdata(&gdev->dev, gdev); if (gc->parent && gc->parent->driver) gdev->owner = gc->parent->driver->owner; else if (gc->owner) @@ -4257,7 +4263,8 @@ static int __init gpiolib_dev_init(void) return ret; } - if (driver_register(&gpio_stub_drv) < 0) { + ret = driver_register(&gpio_stub_drv); + if (ret < 0) { pr_err("gpiolib: could not register GPIO stub driver\n"); bus_unregister(&gpio_bus_type); return ret; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e392a90ca687..85b79a7fee63 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -228,6 +228,7 @@ source "drivers/gpu/drm/arm/Kconfig" config DRM_RADEON tristate "ATI Radeon" depends on DRM && PCI && MMU + depends on AGP || !AGP select FW_LOADER select DRM_KMS_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b6879d97c9c9..29885febc0b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -180,6 +180,7 @@ extern uint amdgpu_smu_memory_pool_size; extern uint amdgpu_dc_feature_mask; extern uint amdgpu_dc_debug_mask; extern uint amdgpu_dm_abm_level; +extern int amdgpu_backlight; extern struct amdgpu_mgpu_info mgpu_info; extern int amdgpu_ras_enable; extern uint amdgpu_ras_mask; @@ -1006,13 +1007,9 @@ struct amdgpu_device { /* s3/s4 mask */ bool in_suspend; - bool in_hibernate; - - /* - * The combination flag in_poweroff_reboot_com used to identify the poweroff - * and reboot opt in the s0i3 system-wide suspend. - */ - bool in_poweroff_reboot_com; + bool in_s3; + bool in_s4; + bool in_s0ix; atomic_t in_gpu_reset; enum pp_mp1_state mp1_state; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 8155c54392c8..2e9b16fb3fcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -903,10 +903,11 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev) */ bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { +#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE) if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { if (adev->flags & AMD_IS_APU) return true; } - +#endif return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 0a25fecf488a..43059ead733b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -357,7 +357,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, while (size) { uint32_t value; - value = RREG32_PCIE(*pos >> 2); + value = RREG32_PCIE(*pos); r = put_user(value, (uint32_t *)buf); if (r) { pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -424,7 +424,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user return r; } - WREG32_PCIE(*pos >> 2, value); + WREG32_PCIE(*pos, value); result += 4; buf += 4; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6447cd6ca5a8..8a5a8ff5d362 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2371,6 +2371,10 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; if (!adev->ip_blocks[i].status.late_initialized) continue; + /* skip CG for GFX on S0ix */ + if (adev->in_s0ix && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) + continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && @@ -2402,6 +2406,10 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; if (!adev->ip_blocks[i].status.late_initialized) continue; + /* skip PG for GFX on S0ix */ + if (adev->in_s0ix && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) + continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && @@ -2678,11 +2686,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) { int i, r; - if (adev->in_poweroff_reboot_com || - !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) { - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); - amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); - } + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) @@ -2722,6 +2727,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) { int i, r; + if (adev->in_s0ix) + amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; @@ -2734,6 +2742,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; continue; } + + /* skip suspend of gfx and psp for S0ix + * gfx is in gfxoff state, so on resume it will exit gfxoff just + * like at runtime. PSP is also part of the always on hardware + * so no need to suspend it. + */ + if (adev->in_s0ix && + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)) + continue; + /* XXX handle errors */ r = adev->ip_blocks[i].version->funcs->suspend(adev); /* XXX handle errors */ @@ -3673,14 +3692,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev) */ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) { - struct amdgpu_device *adev; - struct drm_crtc *crtc; - struct drm_connector *connector; - struct drm_connector_list_iter iter; + struct amdgpu_device *adev = drm_to_adev(dev); int r; - adev = drm_to_adev(dev); - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -3692,61 +3706,19 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) cancel_delayed_work_sync(&adev->delayed_init_work); - if (!amdgpu_device_has_dc_support(adev)) { - /* turn off display hw */ - drm_modeset_lock_all(dev); - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) - drm_helper_connector_dpms(connector, - DRM_MODE_DPMS_OFF); - drm_connector_list_iter_end(&iter); - drm_modeset_unlock_all(dev); - /* unpin the front buffers and cursors */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_framebuffer *fb = crtc->primary->fb; - struct amdgpu_bo *robj; - - if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - r = amdgpu_bo_reserve(aobj, true); - if (r == 0) { - amdgpu_bo_unpin(aobj); - amdgpu_bo_unreserve(aobj); - } - } - - if (fb == NULL || fb->obj[0] == NULL) { - continue; - } - robj = gem_to_amdgpu_bo(fb->obj[0]); - /* don't unpin kernel fb objects */ - if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { - r = amdgpu_bo_reserve(robj, true); - if (r == 0) { - amdgpu_bo_unpin(robj); - amdgpu_bo_unreserve(robj); - } - } - } - } - amdgpu_ras_suspend(adev); r = amdgpu_device_ip_suspend_phase1(adev); - amdgpu_amdkfd_suspend(adev, adev->in_runpm); + if (!adev->in_s0ix) + amdgpu_amdkfd_suspend(adev, adev->in_runpm); /* evict vram memory */ amdgpu_bo_evict_vram(adev); amdgpu_fence_driver_suspend(adev); - if (adev->in_poweroff_reboot_com || - !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) - r = amdgpu_device_ip_suspend_phase2(adev); - else - amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); + r = amdgpu_device_ip_suspend_phase2(adev); /* evict remaining vram memory * This second call to evict vram is to evict the gart page table * using the CPU. @@ -3768,16 +3740,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) */ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) { - struct drm_connector *connector; - struct drm_connector_list_iter iter; struct amdgpu_device *adev = drm_to_adev(dev); - struct drm_crtc *crtc; int r = 0; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (amdgpu_acpi_is_s0ix_supported(adev)) + if (adev->in_s0ix) amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry); /* post card */ @@ -3802,50 +3771,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) queue_delayed_work(system_wq, &adev->delayed_init_work, msecs_to_jiffies(AMDGPU_RESUME_MS)); - if (!amdgpu_device_has_dc_support(adev)) { - /* pin cursors */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - r = amdgpu_bo_reserve(aobj, true); - if (r == 0) { - r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); - if (r != 0) - dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); - amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); - amdgpu_bo_unreserve(aobj); - } - } - } + if (!adev->in_s0ix) { + r = amdgpu_amdkfd_resume(adev, adev->in_runpm); + if (r) + return r; } - r = amdgpu_amdkfd_resume(adev, adev->in_runpm); - if (r) - return r; /* Make sure IB tests flushed */ flush_delayed_work(&adev->delayed_init_work); - /* blat the mode back in */ - if (fbcon) { - if (!amdgpu_device_has_dc_support(adev)) { - /* pre DCE11 */ - drm_helper_resume_force_mode(dev); - - /* turn on display hw */ - drm_modeset_lock_all(dev); - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) - drm_helper_connector_dpms(connector, - DRM_MODE_DPMS_ON); - drm_connector_list_iter_end(&iter); - - drm_modeset_unlock_all(dev); - } + if (fbcon) amdgpu_fbdev_set_suspend(adev, 0); - } drm_kms_helper_poll_enable(dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 48cb33e5b382..f753e04fee99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1310,3 +1310,92 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, stime, etime, mode); } + +int amdgpu_display_suspend_helper(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_crtc *crtc; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + int r; + + /* turn off display hw */ + drm_modeset_lock_all(dev); + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_OFF); + drm_connector_list_iter_end(&iter); + drm_modeset_unlock_all(dev); + /* unpin the front buffers and cursors */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_framebuffer *fb = crtc->primary->fb; + struct amdgpu_bo *robj; + + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); + if (r == 0) { + amdgpu_bo_unpin(aobj); + amdgpu_bo_unreserve(aobj); + } + } + + if (fb == NULL || fb->obj[0] == NULL) { + continue; + } + robj = gem_to_amdgpu_bo(fb->obj[0]); + /* don't unpin kernel fb objects */ + if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { + r = amdgpu_bo_reserve(robj, true); + if (r == 0) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); + } + } + } + return r; +} + +int amdgpu_display_resume_helper(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_connector *connector; + struct drm_connector_list_iter iter; + struct drm_crtc *crtc; + int r; + + /* pin cursors */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); + if (r == 0) { + r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); + if (r != 0) + dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); + amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); + amdgpu_bo_unreserve(aobj); + } + } + } + + drm_helper_resume_force_mode(dev); + + /* turn on display hw */ + drm_modeset_lock_all(dev); + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_ON); + drm_connector_list_iter_end(&iter); + + drm_modeset_unlock_all(dev); + + return 0; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index dc7b7d116549..7b6d83e2b13c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -47,4 +47,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, const struct drm_format_info * amdgpu_lookup_format_info(u32 format, uint64_t modifier); +int amdgpu_display_suspend_helper(struct amdgpu_device *adev); +int amdgpu_display_resume_helper(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 4575192d9b08..e92e7dea71da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -781,6 +781,10 @@ uint amdgpu_dm_abm_level; MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) "); module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444); +int amdgpu_backlight = -1; +MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))"); +module_param_named(backlight, amdgpu_backlight, bint, 0444); + /** * DOC: tmz (int) * Trusted Memory Zone (TMZ) is a method to protect data being written @@ -1103,6 +1107,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, /* Van Gogh */ @@ -1270,24 +1275,35 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) */ if (!amdgpu_passthrough(adev)) adev->mp1_state = PP_MP1_STATE_UNLOAD; - adev->in_poweroff_reboot_com = true; amdgpu_device_ip_suspend(adev); - adev->in_poweroff_reboot_com = false; adev->mp1_state = PP_MP1_STATE_NONE; } static int amdgpu_pmops_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; - return amdgpu_device_suspend(drm_dev, true); + if (amdgpu_acpi_is_s0ix_supported(adev)) + adev->in_s0ix = true; + adev->in_s3 = true; + r = amdgpu_device_suspend(drm_dev, true); + adev->in_s3 = false; + + return r; } static int amdgpu_pmops_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; - return amdgpu_device_resume(drm_dev, true); + r = amdgpu_device_resume(drm_dev, true); + if (amdgpu_acpi_is_s0ix_supported(adev)) + adev->in_s0ix = false; + return r; } static int amdgpu_pmops_freeze(struct device *dev) @@ -1296,9 +1312,9 @@ static int amdgpu_pmops_freeze(struct device *dev) struct amdgpu_device *adev = drm_to_adev(drm_dev); int r; - adev->in_hibernate = true; + adev->in_s4 = true; r = amdgpu_device_suspend(drm_dev, true); - adev->in_hibernate = false; + adev->in_s4 = false; if (r) return r; return amdgpu_asic_reset(adev); @@ -1314,13 +1330,8 @@ static int amdgpu_pmops_thaw(struct device *dev) static int amdgpu_pmops_poweroff(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(drm_dev); - int r; - adev->in_poweroff_reboot_com = true; - r = amdgpu_device_suspend(drm_dev, true); - adev->in_poweroff_reboot_com = false; - return r; + return amdgpu_device_suspend(drm_dev, true); } static int amdgpu_pmops_restore(struct device *dev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 51cd49c6f38f..24010cacf7d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags, - ttm_bo_type_kernel, NULL, &gobj); + ttm_bo_type_device, NULL, &gobj); if (ret) { pr_err("failed to allocate framebuffer (%d)\n", aligned_size); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 3c37cf1ae8b7..a4e2cf7cada1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -173,8 +173,6 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) switch (adev->asic_type) { case CHIP_VEGA20: case CHIP_ARCTURUS: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: /* enable runpm if runpm=1 */ if (amdgpu_runtime_pm > 0) adev->runpm = true; @@ -780,9 +778,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) dev_info->high_va_offset = AMDGPU_GMC_HOLE_END; dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size; } - dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); + dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; - dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE; + dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info->cu_active_number = adev->gfx.cu_info.number; dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; dev_info->ce_ram_size = adev->gfx.ce_ram_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 4b29b8205442..072050429a2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1028,13 +1028,10 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) { struct ttm_resource_manager *man; - /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ -#ifndef CONFIG_HIBERNATION - if (adev->flags & AMD_IS_APU) { - /* Useless to evict on IGP chips */ + if (adev->in_s3 && (adev->flags & AMD_IS_APU)) { + /* No need to evict vram on APUs for suspend to ram */ return 0; } -#endif man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return ttm_resource_manager_evict_all(&adev->mman.bdev, man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ad91c0c3c423..7d2c8b169827 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2197,8 +2197,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, uint64_t eaddr; /* validate the parameters */ - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || + size == 0 || size & ~PAGE_MASK) return -EINVAL; /* make sure object fit at this offset */ @@ -2263,8 +2263,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, int r; /* validate the parameters */ - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || + size == 0 || size & ~PAGE_MASK) return -EINVAL; /* make sure object fit at this offset */ @@ -2409,7 +2409,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, after->start = eaddr + 1; after->last = tmp->last; after->offset = tmp->offset; - after->offset += after->start - tmp->start; + after->offset += (after->start - tmp->start) << PAGE_SHIFT; after->flags = tmp->flags; after->bo_va = tmp->bo_va; list_add(&after->list, &tmp->bo_va->invalids); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 7944781e1086..19abb740a169 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2897,6 +2897,11 @@ static int dce_v10_0_hw_fini(void *handle) static int dce_v10_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -2921,8 +2926,10 @@ static int dce_v10_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v10_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 1b6ff0470011..320ec35bfd37 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3027,6 +3027,11 @@ static int dce_v11_0_hw_fini(void *handle) static int dce_v11_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -3051,8 +3056,10 @@ static int dce_v11_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v11_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 83a88385b762..13322000ebd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2770,7 +2770,11 @@ static int dce_v6_0_hw_fini(void *handle) static int dce_v6_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -2794,8 +2798,10 @@ static int dce_v6_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v6_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 224b30214427..04ebf02e5b8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2796,6 +2796,11 @@ static int dce_v8_0_hw_fini(void *handle) static int dce_v8_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -2820,8 +2825,10 @@ static int dce_v8_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v8_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 9810af712cc0..5c11144da051 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -39,6 +39,7 @@ #include "dce_v11_0.h" #include "dce_virtual.h" #include "ivsrcid/ivsrcid_vislands30.h" +#include "amdgpu_display.h" #define DCE_VIRTUAL_VBLANK_PERIOD 16666666 @@ -491,12 +492,24 @@ static int dce_virtual_hw_fini(void *handle) static int dce_virtual_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; return dce_virtual_hw_fini(handle); } static int dce_virtual_resume(void *handle) { - return dce_virtual_hw_init(handle); + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = dce_virtual_hw_init(handle); + if (r) + return r; + return amdgpu_display_resume_helper(adev); } static bool dce_virtual_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 160fa5f59805..c625c5d8ed89 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -558,7 +558,8 @@ static bool nv_is_headless_sku(struct pci_dev *pdev) { if ((pdev->device == 0x731E && (pdev->revision == 0xC6 || pdev->revision == 0xC7)) || - (pdev->device == 0x7340 && pdev->revision == 0xC9)) + (pdev->device == 0x7340 && pdev->revision == 0xC9) || + (pdev->device == 0x7360 && pdev->revision == 0xC7)) return true; return false; } @@ -634,7 +635,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + if (!nv_is_headless_sku(adev->pdev)) + amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index b258a3dae767..159add0f5aaa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, /* Wait till CP writes sync code: */ status = amdkfd_fence_wait_timeout( - (unsigned int *) rm_state, + rm_state, QUEUESTATE__ACTIVE, 1500); kfd_gtt_sa_free(dbgdev->dev, mem_obj); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index e686ce2bf3b3..4598a9a58125 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm) if (retval) goto fail_allocate_vidmem; - dqm->fence_addr = dqm->fence_mem->cpu_ptr; + dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr; dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; init_interrupts(dqm); @@ -1340,8 +1340,8 @@ out: return retval; } -int amdkfd_fence_wait_timeout(unsigned int *fence_addr, - unsigned int fence_value, +int amdkfd_fence_wait_timeout(uint64_t *fence_addr, + uint64_t fence_value, unsigned int timeout_ms) { unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 7351dd195274..45f815946554 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -192,7 +192,7 @@ struct device_queue_manager { uint16_t vmid_pasid[VMID_NUM]; uint64_t pipelines_addr; uint64_t fence_gpu_addr; - unsigned int *fence_addr; + uint64_t *fence_addr; struct kfd_mem_obj *fence_mem; bool active_runlist; int sched_policy; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 5d541e0cc8ca..f71a7fa6680c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -347,7 +347,7 @@ fail_create_runlist_ib: } int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, - uint32_t fence_value) + uint64_t fence_value) { uint32_t *buffer, size; int retval = 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index dfaf771a42e6..e3ba0cd3b6fa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, } static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer, - uint64_t fence_address, uint32_t fence_value) + uint64_t fence_address, uint64_t fence_value) { struct pm4_mes_query_status *packet; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index a852e0d7d804..08442e7d9944 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, } static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer, - uint64_t fence_address, uint32_t fence_value) + uint64_t fence_address, uint64_t fence_value) { struct pm4_mes_query_status *packet; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 09599efa41fc..f304d1f8df5f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm, u32 *ctl_stack_used_size, u32 *save_area_used_size); -int amdkfd_fence_wait_timeout(unsigned int *fence_addr, - unsigned int fence_value, +int amdkfd_fence_wait_timeout(uint64_t *fence_addr, + uint64_t fence_value, unsigned int timeout_ms); /* Packet Manager */ @@ -1040,7 +1040,7 @@ struct packet_manager_funcs { uint32_t filter_param, bool reset, unsigned int sdma_engine); int (*query_status)(struct packet_manager *pm, uint32_t *buffer, - uint64_t fence_address, uint32_t fence_value); + uint64_t fence_address, uint64_t fence_value); int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); /* Packet sizes */ @@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm, struct scheduling_resources *res); int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, - uint32_t fence_value); + uint64_t fence_value); int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, enum kfd_unmap_queues_filter mode, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3e1fd1e7d09f..573cf17262da 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2267,6 +2267,11 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps->ext_caps->bits.hdr_aux_backlight_control == 1) caps->aux_support = true; + if (amdgpu_backlight == 0) + caps->aux_support = false; + else if (amdgpu_backlight == 1) + caps->aux_support = true; + /* From the specification (CTA-861-G), for calculating the maximum * luminance we need to use: * Luminance = 50*2**(CV/32) @@ -3185,19 +3190,6 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) #endif } -static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness) -{ - bool rc; - - if (!link) - return 1; - - rc = dc_link_set_backlight_level_nits(link, true, brightness, - AUX_BL_DEFAULT_TRANSITION_TIME_MS); - - return rc ? 0 : 1; -} - static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, unsigned *min, unsigned *max) { @@ -3260,9 +3252,10 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) brightness = convert_brightness_from_user(&caps, bd->props.brightness); // Change brightness based on AUX property if (caps.aux_support) - return set_backlight_via_aux(link, brightness); - - rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0); + rc = dc_link_set_backlight_level_nits(link, true, brightness, + AUX_BL_DEFAULT_TRANSITION_TIME_MS); + else + rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0); return rc ? 0 : 1; } @@ -3270,11 +3263,27 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) { struct amdgpu_display_manager *dm = bl_get_data(bd); - int ret = dc_link_get_backlight_level(dm->backlight_link); + struct amdgpu_dm_backlight_caps caps; + + amdgpu_dm_update_backlight_caps(dm); + caps = dm->backlight_caps; + + if (caps.aux_support) { + struct dc_link *link = (struct dc_link *)dm->backlight_link; + u32 avg, peak; + bool rc; - if (ret == DC_ERROR_UNEXPECTED) - return bd->props.brightness; - return convert_brightness_to_user(&dm->backlight_caps, ret); + rc = dc_link_get_backlight_level_nits(link, &avg, &peak); + if (!rc) + return bd->props.brightness; + return convert_brightness_to_user(&caps, avg); + } else { + int ret = dc_link_get_backlight_level(dm->backlight_link); + + if (ret == DC_ERROR_UNEXPECTED) + return bd->props.brightness; + return convert_brightness_to_user(&caps, ret); + } } static const struct backlight_ops amdgpu_dm_backlight_ops = { @@ -4716,6 +4725,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, dc_plane_state->global_alpha_value = plane_info.global_alpha_value; dc_plane_state->dcc = plane_info.dcc; dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 + dc_plane_state->flip_int_enabled = true; /* * Always set input transfer function, since plane state is refreshed diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 5159399f8239..5750818db8f6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -530,7 +530,7 @@ bool dm_helpers_dp_write_dsc_enable( { uint8_t enable_dsc = enable ? 1 : 0; struct amdgpu_dm_connector *aconnector; - uint8_t ret; + uint8_t ret = 0; if (!stream) return false; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index fa5059f71727..bd0101013ec8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2602,7 +2602,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link, if (pipe_ctx->plane_state == NULL) frame_ramp = 0; } else { - ASSERT(false); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 4eee3a55fa30..18ed0d3f247e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -887,6 +887,7 @@ struct dc_plane_state { int layer_index; union surface_update_flags update_flags; + bool flip_int_enabled; /* private to DC core */ struct dc_plane_status status; struct dc_context *ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 9e796dfeac20..714c71a5fbde 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1257,6 +1257,16 @@ void hubp1_soft_reset(struct hubp *hubp, bool reset) REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0); } +void hubp1_set_flip_int(struct hubp *hubp) +{ + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); + + REG_UPDATE(DCSURF_SURFACE_FLIP_INTERRUPT, + SURFACE_FLIP_INT_MASK, 1); + + return; +} + void hubp1_init(struct hubp *hubp) { //do nothing @@ -1290,6 +1300,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = { .dmdata_load = NULL, .hubp_soft_reset = hubp1_soft_reset, .hubp_in_blank = hubp1_in_blank, + .hubp_set_flip_int = hubp1_set_flip_int, }; /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index a9a6ed7f4f99..e2f2f6995935 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -74,6 +74,7 @@ SRI(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id),\ SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id),\ SRI(DCSURF_SURFACE_CONTROL, HUBPREQ, id),\ + SRI(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id),\ SRI(HUBPRET_CONTROL, HUBPRET, id),\ SRI(DCN_EXPANSION_MODE, HUBPREQ, id),\ SRI(DCHUBP_REQ_SIZE_CONFIG, HUBP, id),\ @@ -183,6 +184,7 @@ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C; \ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C; \ uint32_t DCSURF_SURFACE_CONTROL; \ + uint32_t DCSURF_SURFACE_FLIP_INTERRUPT; \ uint32_t HUBPRET_CONTROL; \ uint32_t DCN_EXPANSION_MODE; \ uint32_t DCHUBP_REQ_SIZE_CONFIG; \ @@ -332,6 +334,7 @@ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\ @@ -531,6 +534,7 @@ type PRIMARY_SURFACE_DCC_IND_64B_BLK;\ type SECONDARY_SURFACE_DCC_EN;\ type SECONDARY_SURFACE_DCC_IND_64B_BLK;\ + type SURFACE_FLIP_INT_MASK;\ type DET_BUF_PLANE1_BASE_ADDRESS;\ type CROSSBAR_SRC_CB_B;\ type CROSSBAR_SRC_CR_R;\ @@ -777,4 +781,6 @@ void hubp1_read_state_common(struct hubp *hubp); bool hubp1_in_blank(struct hubp *hubp); void hubp1_soft_reset(struct hubp *hubp, bool reset); +void hubp1_set_flip_int(struct hubp *hubp); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 89912bb5014f..9ba5c624770d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2196,6 +2196,13 @@ static void dcn10_enable_plane( if (dc->debug.sanity_checks) { hws->funcs.verify_allow_pstate_change_high(dc); } + + if (!pipe_ctx->top_pipe + && pipe_ctx->plane_state + && pipe_ctx->plane_state->flip_int_enabled + && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) + pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp); + } void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 0df0da2e6a4d..bec7059f6d5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -1597,6 +1597,7 @@ static struct hubp_funcs dcn20_hubp_funcs = { .validate_dml_output = hubp2_validate_dml_output, .hubp_in_blank = hubp1_in_blank, .hubp_soft_reset = hubp1_soft_reset, + .hubp_set_flip_int = hubp1_set_flip_int, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 0726fb435e2a..aece1103331d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1146,6 +1146,12 @@ void dcn20_enable_plane( pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); } + if (!pipe_ctx->top_pipe + && pipe_ctx->plane_state + && pipe_ctx->plane_state->flip_int_enabled + && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) + pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp); + // if (dc->debug.sanity_checks) { // dcn10_verify_allow_pstate_change_high(dc); // } @@ -1501,38 +1507,8 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || pipe_ctx->stream->update_flags.bits.gamut_remap || pipe_ctx->stream->update_flags.bits.out_csc) { - struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - - if (mpc->funcs->set_gamut_remap) { - int i; - int mpcc_id = hubp->inst; - struct mpc_grph_gamut_adjustment adjust; - bool enable_remap_dpp = false; - - memset(&adjust, 0, sizeof(adjust)); - adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; - - /* save the enablement of gamut remap for dpp */ - enable_remap_dpp = pipe_ctx->stream->gamut_remap_matrix.enable_remap; - - /* force bypass gamut remap for dpp/cm */ - pipe_ctx->stream->gamut_remap_matrix.enable_remap = false; - dc->hwss.program_gamut_remap(pipe_ctx); - - /* restore gamut remap flag and use this remap into mpc */ - pipe_ctx->stream->gamut_remap_matrix.enable_remap = enable_remap_dpp; - - /* build remap matrix for top plane if enabled */ - if (enable_remap_dpp && pipe_ctx->top_pipe == NULL) { - adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; - for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) - adjust.temperature_matrix[i] = - pipe_ctx->stream->gamut_remap_matrix.matrix[i]; - } - mpc->funcs->set_gamut_remap(mpc, mpcc_id, &adjust); - } else - /* dpp/cm gamut remap*/ - dc->hwss.program_gamut_remap(pipe_ctx); + /* dpp/cm gamut remap*/ + dc->hwss.program_gamut_remap(pipe_ctx); /*call the dcn2 method which uses mpc csc*/ dc->hwss.program_output_csc(dc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index fa013496e26b..2f9bfaeaba8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc) } else { AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); - AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d); - + AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a); } //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index f9045852728f..b0c9180b808f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -838,6 +838,7 @@ static struct hubp_funcs dcn21_hubp_funcs = { .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp21_init, .validate_dml_output = hubp21_validate_dml_output, + .hubp_set_flip_int = hubp1_set_flip_int, }; bool hubp21_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 072f8c880924..4a3df13c9e49 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .num_banks = 8, .num_chans = 4, .vmm_page_size_bytes = 4096, - .dram_clock_change_latency_us = 11.72, + .dram_clock_change_latency_us = 23.84, .return_bus_width_bytes = 64, .dispclk_dppclk_vco_speed_mhz = 3600, .xfc_bus_transport_time_us = 4, @@ -1062,8 +1062,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s { int i; - DC_FP_START(); - if (dc->bb_overrides.sr_exit_time_ns) { for (i = 0; i < WM_SET_COUNT; i++) { dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us = @@ -1088,8 +1086,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; } } - - DC_FP_END(); } void dcn21_calculate_wm( @@ -1339,7 +1335,7 @@ static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc, int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); DC_LOGGER_INIT(dc->ctx->logger); BW_VAL_TRACE_COUNT(); @@ -1599,6 +1595,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_soc.num_chans = bw_params->num_channels; ASSERT(clk_table->num_entries); + /* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */ + for (i = 0; i < dcn2_1_soc.num_states + 1; i++) { + clock_limits[i] = dcn2_1_soc.clock_limits[i]; + } + for (i = 0; i < clk_table->num_entries; i++) { /* loop backwards*/ for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index 41a1d0e9b7e2..e0df9b0065f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -113,6 +113,7 @@ bool cm3_helper_translate_curve_to_hw_format( struct pwl_result_data *rgb_resulted; struct pwl_result_data *rgb; struct pwl_result_data *rgb_plus_1; + struct pwl_result_data *rgb_minus_1; struct fixed31_32 end_value; int32_t region_start, region_end; @@ -140,7 +141,7 @@ bool cm3_helper_translate_curve_to_hw_format( region_start = -MAX_LOW_POINT; region_end = NUMBER_REGIONS - MAX_LOW_POINT; } else { - /* 10 segments + /* 11 segments * segment is from 2^-10 to 2^0 * There are less than 256 points, for optimization */ @@ -154,9 +155,10 @@ bool cm3_helper_translate_curve_to_hw_format( seg_distr[7] = 4; seg_distr[8] = 4; seg_distr[9] = 4; + seg_distr[10] = 1; region_start = -10; - region_end = 0; + region_end = 1; } for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) @@ -189,6 +191,10 @@ bool cm3_helper_translate_curve_to_hw_format( rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; + rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red; + rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green; + rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue; + // All 3 color channels have same x corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_start)); @@ -259,15 +265,18 @@ bool cm3_helper_translate_curve_to_hw_format( rgb = rgb_resulted; rgb_plus_1 = rgb_resulted + 1; + rgb_minus_1 = rgb; i = 1; while (i != hw_points + 1) { - if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) - rgb_plus_1->red = rgb->red; - if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) - rgb_plus_1->green = rgb->green; - if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) - rgb_plus_1->blue = rgb->blue; + if (i >= hw_points - 1) { + if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) + rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red); + if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) + rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green); + if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) + rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue); + } rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); @@ -283,6 +292,7 @@ bool cm3_helper_translate_curve_to_hw_format( } ++rgb_plus_1; + rgb_minus_1 = rgb; ++rgb; ++i; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index 88ffa9ff1ed1..f24612523248 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -511,6 +511,7 @@ static struct hubp_funcs dcn30_hubp_funcs = { .hubp_init = hubp3_init, .hubp_in_blank = hubp1_in_blank, .hubp_soft_reset = hubp1_soft_reset, + .hubp_set_flip_int = hubp1_set_flip_int, }; bool hubp3_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 8d0f663489ac..fb7f1dea3c46 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -2508,6 +2508,19 @@ static const struct resource_funcs dcn30_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, }; +#define CTX ctx + +#define REG(reg_name) \ + (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* Support for max 6 pipes */ + value = value & 0x3f; + return value; +} + static bool dcn30_resource_construct( uint8_t num_virtual_links, struct dc *dc, @@ -2517,6 +2530,15 @@ static bool dcn30_resource_construct( struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; struct ddc_service_init_data ddc_init_data; + uint32_t pipe_fuses = read_pipe_fuses(ctx); + uint32_t num_pipes = 0; + + if (!(pipe_fuses == 0 || pipe_fuses == 0x3e)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: Unexpected fuse recipe for navi2x !\n"); + /* fault to single pipe */ + pipe_fuses = 0x3e; + } DC_FP_START(); @@ -2650,6 +2672,15 @@ static bool dcn30_resource_construct( /* PP Lib and SMU interfaces */ init_soc_bounding_box(dc, pool); + num_pipes = dcn3_0_ip.max_num_dpp; + + for (i = 0; i < dcn3_0_ip.max_num_dpp; i++) + if (pipe_fuses & 1 << i) + num_pipes--; + + dcn3_0_ip.max_num_dpp = num_pipes; + dcn3_0_ip.max_num_otg = num_pipes; + dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); /* IRQ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 5d4b2c60192e..c494235016e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1619,12 +1619,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); } +static void calculate_wm_set_for_vlevel( + int vlevel, + struct wm_range_table_entry *table_entry, + struct dcn_watermarks *wm_set, + struct display_mode_lib *dml, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; + + ASSERT(vlevel < dml->soc.num_states); + /* only pipe 0 is read for voltage and dcf/soc clocks */ + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; + + dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; + + wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; + wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; + dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; + +} + +static void dcn301_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel_req) +{ + int i, pipe_idx; + int vlevel, vlevel_max; + struct wm_range_table_entry *table_entry; + struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; + + ASSERT(bw_params); + + vlevel_max = bw_params->clk_table.num_entries - 1; + + /* WM Set D */ + table_entry = &bw_params->wm_table.entries[WM_D]; + if (table_entry->wm_type == WM_TYPE_RETRAINING) + vlevel = 0; + else + vlevel = vlevel_max; + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set C */ + table_entry = &bw_params->wm_table.entries[WM_C]; + vlevel = min(max(vlevel_req, 2), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set B */ + table_entry = &bw_params->wm_table.entries[WM_B]; + vlevel = min(max(vlevel_req, 1), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, + &context->bw_ctx.dml, pipes, pipe_cnt); + + /* WM Set A */ + table_entry = &bw_params->wm_table.entries[WM_A]; + vlevel = min(vlevel_req, vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, + &context->bw_ctx.dml, pipes, pipe_cnt); + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); + pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + if (dc->config.forced_clocks) { + pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + + pipe_idx++; + } + + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); +} + static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 22f3f643ed1b..346dcd87dc10 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -191,6 +191,8 @@ struct hubp_funcs { bool (*hubp_in_blank)(struct hubp *hubp); void (*hubp_soft_reset)(struct hubp *hubp, bool reset); + void (*hubp_set_flip_int)(struct hubp *hubp); + }; #endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index c57dc9ae81f2..d0ec83881fc5 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -587,6 +587,48 @@ static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) tmp, MC_CG_ARB_FREQ_F0); } +static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint16_t pcie_gen = 0; + + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 && + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4) + pcie_gen = 3; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 && + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3) + pcie_gen = 2; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 && + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2) + pcie_gen = 1; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 && + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1) + pcie_gen = 0; + + return pcie_gen; +} + +static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint16_t pcie_width = 0; + + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + pcie_width = 16; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) + pcie_width = 12; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) + pcie_width = 8; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) + pcie_width = 4; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) + pcie_width = 2; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) + pcie_width = 1; + + return pcie_width; +} + static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -683,6 +725,11 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) PP_Min_PCIEGen), get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + + if (data->pcie_dpm_key_disabled) + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, + data->dpm_table.pcie_speed_table.count, + smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr)); } return 0; } @@ -1248,6 +1295,13 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) NULL)), "Failed to enable pcie DPM during DPM Start Function!", return -EINVAL); + } else { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_PCIeDPM_Disable, + NULL)), + "Failed to disble pcie DPM during DPM Start Function!", + return -EINVAL); } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -3276,7 +3330,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) && !hwmgr->display_config->multi_monitor_in_sync) || - smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time); + (hwmgr->display_config->num_display && + smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time)); disable_mclk_switching = disable_mclk_switching_for_frame_lock || disable_mclk_switching_for_display; @@ -5216,10 +5271,10 @@ static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, for (j = 0; j < dep_sclk_table->count; j++) { valid_entry = false; for (k = 0; k < watermarks->num_wm_sets; k++) { - if (dep_sclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz && - dep_sclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz && - dep_mclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz && - dep_mclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz) { + if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 && + dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 && + dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 && + dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) { valid_entry = true; table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id; break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 29c99642d22d..599ec9726601 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -54,6 +54,9 @@ #include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_sh_mask.h" +#define smnPCIE_LC_SPEED_CNTL 0x11140290 +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 + #define HBM_MEMORY_CHANNEL_WIDTH 128 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; @@ -443,8 +446,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (PP_CAP(PHM_PlatformCaps_VCEDPM)) data->smu_features[GNLD_DPM_VCE].supported = true; - if (!data->registry_data.pcie_dpm_key_disabled) - data->smu_features[GNLD_DPM_LINK].supported = true; + data->smu_features[GNLD_DPM_LINK].supported = true; if (!data->registry_data.dcefclk_dpm_key_disabled) data->smu_features[GNLD_DPM_DCEFCLK].supported = true; @@ -1505,6 +1507,55 @@ static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr, return 0; } +static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t pcie_gen = 0, pcie_width = 0; + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + int i; + + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) + pcie_gen = 3; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + pcie_gen = 2; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) + pcie_gen = 1; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) + pcie_gen = 0; + + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + pcie_width = 6; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) + pcie_width = 5; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) + pcie_width = 4; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) + pcie_width = 3; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) + pcie_width = 2; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) + pcie_width = 1; + + for (i = 0; i < NUM_LINK_LEVELS; i++) { + if (pp_table->PcieGenSpeed[i] > pcie_gen) + pp_table->PcieGenSpeed[i] = pcie_gen; + + if (pp_table->PcieLaneCount[i] > pcie_width) + pp_table->PcieLaneCount[i] = pcie_width; + } + + if (data->registry_data.pcie_dpm_key_disabled) { + for (i = 0; i < NUM_LINK_LEVELS; i++) { + pp_table->PcieGenSpeed[i] = pcie_gen; + pp_table->PcieLaneCount[i] = pcie_width; + } + } + + return 0; +} + static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr) { int result = -1; @@ -2556,6 +2607,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to initialize Link Level!", return result); + result = vega10_override_pcie_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to override pcie parameters!", + return result); + result = vega10_populate_all_graphic_levels(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to initialize Graphics Level!", @@ -2919,9 +2975,18 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) } } + if (data->registry_data.pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, + false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap), + "Attempt to Disable Link DPM feature Failed!", return -EINVAL); + data->smu_features[GNLD_DPM_LINK].enabled = false; + data->smu_features[GNLD_DPM_LINK].supported = false; + } + return 0; } + static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable) { struct vega10_hwmgr *data = hwmgr->backend; @@ -4536,6 +4601,24 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return 0; } +static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; +} + +static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) + >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; +} + static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf) { @@ -4544,8 +4627,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table); struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table); - struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; + uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; + PPTable_t *pptable = &(data->smc_state_table.pp_table); int i, now, size = 0, count = 0; @@ -4602,15 +4686,31 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, "*" : ""); break; case PP_PCIE: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now); - - for (i = 0; i < pcie_table->count; i++) - size += sprintf(buf + size, "%d: %s %s\n", i, - (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" : - (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" : - (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "", - (i == now) ? "*" : ""); + current_gen_speed = + vega10_get_current_pcie_link_speed_level(hwmgr); + current_lane_width = + vega10_get_current_pcie_link_width_level(hwmgr); + for (i = 0; i < NUM_LINK_LEVELS; i++) { + gen_speed = pptable->PcieGenSpeed[i]; + lane_width = pptable->PcieLaneCount[i]; + + size += sprintf(buf + size, "%d: %s %s %s\n", i, + (gen_speed == 0) ? "2.5GT/s," : + (gen_speed == 1) ? "5.0GT/s," : + (gen_speed == 2) ? "8.0GT/s," : + (gen_speed == 3) ? "16.0GT/s," : "", + (lane_width == 1) ? "x1" : + (lane_width == 2) ? "x2" : + (lane_width == 3) ? "x4" : + (lane_width == 4) ? "x8" : + (lane_width == 5) ? "x12" : + (lane_width == 6) ? "x16" : "", + (current_gen_speed == gen_speed) && + (current_lane_width == lane_width) ? + "*" : ""); + } break; + case OD_SCLK: if (hwmgr->od_enabled) { size = sprintf(buf, "%s:\n", "OD_SCLK"); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index c0753029a8e2..4f6da11e8f10 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -133,6 +133,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.auto_wattman_debug = 0; data->registry_data.auto_wattman_sample_period = 100; data->registry_data.auto_wattman_threshold = 50; + data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); } static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr) @@ -481,6 +482,90 @@ static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state) dpm_state->hard_max_level = 0xffff; } +static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + struct vega12_hwmgr *data = + (struct vega12_hwmgr *)(hwmgr->backend); + uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg; + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + int i; + int ret; + + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) + pcie_gen = 3; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + pcie_gen = 2; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) + pcie_gen = 1; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) + pcie_gen = 0; + + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + pcie_width = 6; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) + pcie_width = 5; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) + pcie_width = 4; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) + pcie_width = 3; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) + pcie_width = 2; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) + pcie_width = 1; + + /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 + * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 + * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 + */ + for (i = 0; i < NUM_LINK_LEVELS; i++) { + pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen : + pp_table->PcieGenSpeed[i]; + pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width : + pp_table->PcieLaneCount[i]; + + if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg != + pp_table->PcieLaneCount[i]) { + smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg; + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, + NULL); + PP_ASSERT_WITH_CODE(!ret, + "[OverridePcieParameters] Attempt to override pcie params failed!", + return ret); + } + + /* update the pptable */ + pp_table->PcieGenSpeed[i] = pcie_gen_arg; + pp_table->PcieLaneCount[i] = pcie_width_arg; + } + + /* override to the highest if it's disabled from ppfeaturmask */ + if (data->registry_data.pcie_dpm_key_disabled) { + for (i = 0; i < NUM_LINK_LEVELS; i++) { + smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width; + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, + NULL); + PP_ASSERT_WITH_CODE(!ret, + "[OverridePcieParameters] Attempt to override pcie params failed!", + return ret); + + pp_table->PcieGenSpeed[i] = pcie_gen; + pp_table->PcieLaneCount[i] = pcie_width; + } + ret = vega12_enable_smc_features(hwmgr, + false, + data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to Disable DPM LINK Failed!", + return ret); + data->smu_features[GNLD_DPM_LINK].enabled = false; + data->smu_features[GNLD_DPM_LINK].supported = false; + } + return 0; +} + static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, PPCLK_e clk_id, uint32_t *num_of_levels) { @@ -968,6 +1053,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "Failed to enable all smu features!", return result); + result = vega12_override_pcie_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to override pcie parameters!", + return result); + tmp_result = vega12_power_control_set_level(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, "Failed to power control set level!", diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 87811b005b85..b6ee3a285c9d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -171,6 +171,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.gfxoff_controlled_by_driver = 1; data->gfxoff_allowed = false; data->counter_gfxoff = 0; + data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); } static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) @@ -831,7 +832,9 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); - uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; + uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg; + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + int i; int ret; if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) @@ -860,17 +863,51 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */ - smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; - ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, - NULL); - PP_ASSERT_WITH_CODE(!ret, - "[OverridePcieParameters] Attempt to override pcie params failed!", - return ret); + for (i = 0; i < NUM_LINK_LEVELS; i++) { + pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen : + pp_table->PcieGenSpeed[i]; + pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width : + pp_table->PcieLaneCount[i]; + + if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg != + pp_table->PcieLaneCount[i]) { + smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg; + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, + NULL); + PP_ASSERT_WITH_CODE(!ret, + "[OverridePcieParameters] Attempt to override pcie params failed!", + return ret); + } + + /* update the pptable */ + pp_table->PcieGenSpeed[i] = pcie_gen_arg; + pp_table->PcieLaneCount[i] = pcie_width_arg; + } + + /* override to the highest if it's disabled from ppfeaturmask */ + if (data->registry_data.pcie_dpm_key_disabled) { + for (i = 0; i < NUM_LINK_LEVELS; i++) { + smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width; + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, + NULL); + PP_ASSERT_WITH_CODE(!ret, + "[OverridePcieParameters] Attempt to override pcie params failed!", + return ret); - data->pcie_parameters_override = true; - data->pcie_gen_level1 = pcie_gen; - data->pcie_width_level1 = pcie_width; + pp_table->PcieGenSpeed[i] = pcie_gen; + pp_table->PcieLaneCount[i] = pcie_width; + } + ret = vega20_enable_smc_features(hwmgr, + false, + data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to Disable DPM LINK Failed!", + return ret); + data->smu_features[GNLD_DPM_LINK].enabled = false; + data->smu_features[GNLD_DPM_LINK].supported = false; + } return 0; } @@ -3319,9 +3356,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, data->od8_settings.od8_settings_array; OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); - struct phm_ppt_v3_information *pptable_information = - (struct phm_ppt_v3_information *)hwmgr->pptable; - PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable; + PPTable_t *pptable = &(data->smc_state_table.pp_table); struct pp_clock_levels_with_latency clocks; struct vega20_single_dpm_table *fclk_dpm_table = &(data->dpm_table.fclk_table); @@ -3420,13 +3455,9 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, current_lane_width = vega20_get_current_pcie_link_width_level(hwmgr); for (i = 0; i < NUM_LINK_LEVELS; i++) { - if (i == 1 && data->pcie_parameters_override) { - gen_speed = data->pcie_gen_level1; - lane_width = data->pcie_width_level1; - } else { - gen_speed = pptable->PcieGenSpeed[i]; - lane_width = pptable->PcieLaneCount[i]; - } + gen_speed = pptable->PcieGenSpeed[i]; + lane_width = pptable->PcieLaneCount[i]; + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index d143ef1b460b..cd905e41080e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1294,7 +1294,7 @@ static int smu_disable_dpms(struct smu_context *smu) bool use_baco = !smu->is_apu && ((amdgpu_in_reset(adev) && (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || - ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); + ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); /* * For custom pptable uploading, skip the DPM features @@ -1431,7 +1431,8 @@ static int smu_suspend(void *handle) smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); - if (smu->is_apu) + /* skip CGPG when in S0ix */ + if (smu->is_apu && !adev->in_s0ix) smu_set_gfx_cgpg(&adev->smu, false); return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 45564a776e9b..9f0d03ae3109 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1322,7 +1322,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, CMN2ASIC_MAPPING_WORKLOAD, profile_mode); if (workload_type < 0) { - dev_err(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode); + dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 90585461a56e..a6211858ead4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -78,6 +78,9 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin"); #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE +#define mmTHM_BACO_CNTL_ARCT 0xA7 +#define mmTHM_BACO_CNTL_ARCT_BASE_IDX 0 + static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; static int link_speed[] = {25, 50, 80, 160}; @@ -1532,9 +1535,15 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) break; default: if (!ras || !ras->supported) { - data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); - data |= 0x80000000; - WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); + if (adev->asic_type == CHIP_ARCTURUS) { + data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); + data |= 0x80000000; + WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data); + } else { + data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); + data |= 0x80000000; + WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); + } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL); } else { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 093b01159408..101eaa20db9b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) static bool vangogh_is_dpm_running(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; uint32_t feature_mask[2]; uint64_t feature_enabled; + /* we need to re-init after suspend so return false */ + if (adev->in_suspend) + return false; + ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); if (ret) @@ -810,7 +815,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, CMN2ASIC_MAPPING_WORKLOAD, profile_mode); if (workload_type < 0) { - dev_err_once(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", + dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", profile_mode); return -EINVAL; } @@ -1685,9 +1690,9 @@ static int vangogh_system_features_control(struct smu_context *smu, bool en) uint32_t feature_mask[2]; int ret = 0; - if (adev->pm.fw_version >= 0x43f1700) + if (adev->pm.fw_version >= 0x43f1700 && !en) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, - en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL); + RLC_STATUS_OFF, NULL); bitmap_zero(feature->enabled, feature->feature_num); bitmap_zero(feature->supported, feature->feature_num); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 5faa509f0dba..5493388fcb10 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -844,7 +844,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u * TODO: If some case need switch to powersave/default power mode * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving. */ - dev_err_once(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode); + dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index b9a616737c0e..f6baa2046124 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2048,7 +2048,7 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) if (shadow) vfree(shadow); - else + else if (fb_helper->buffer) drm_client_buffer_vunmap(fb_helper->buffer); drm_client_framebuffer_delete(fb_helper->buffer); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 9825c378dfa6..6d625cee7a6a 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -357,13 +357,14 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, if (--shmem->vmap_use_count > 0) return; - if (obj->import_attach) + if (obj->import_attach) { dma_buf_vunmap(obj->import_attach->dmabuf, map); - else + } else { vunmap(shmem->vaddr); + drm_gem_shmem_put_pages(shmem); + } shmem->vaddr = NULL; - drm_gem_shmem_put_pages(shmem); } /* @@ -525,14 +526,28 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); loff_t num_pages = obj->size >> PAGE_SHIFT; + vm_fault_t ret; struct page *page; + pgoff_t page_offset; - if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) - return VM_FAULT_SIGBUS; + /* We don't use vmf->pgoff since that has the fake offset */ + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - page = shmem->pages[vmf->pgoff]; + mutex_lock(&shmem->pages_lock); - return vmf_insert_page(vma, vmf->address, page); + if (page_offset >= num_pages || + WARN_ON_ONCE(!shmem->pages) || + shmem->madv < 0) { + ret = VM_FAULT_SIGBUS; + } else { + page = shmem->pages[page_offset]; + + ret = vmf_insert_page(vma, vmf->address, page); + } + + mutex_unlock(&shmem->pages_lock); + + return ret; } static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) @@ -581,9 +596,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) struct drm_gem_shmem_object *shmem; int ret; - /* Remove the fake offset */ - vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); - if (obj->import_attach) { /* Drop the reference drm_gem_mmap_obj() acquired.*/ drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index f86448ab1fe0..dc734d4828a1 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -99,6 +99,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd, if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) return -EFAULT; + memset(&v, 0, sizeof(v)); + v = (struct drm_version) { .name_len = v32.name_len, .name = compat_ptr(v32.name), @@ -137,6 +139,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) return -EFAULT; + + memset(&uq, 0, sizeof(uq)); + uq = (struct drm_unique){ .unique_len = uq32.unique_len, .unique = compat_ptr(uq32.unique), @@ -265,6 +270,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, if (copy_from_user(&c32, argp, sizeof(c32))) return -EFAULT; + memset(&client, 0, sizeof(client)); + client.idx = c32.idx; err = drm_ioctl_kernel(file, drm_getclient, &client, 0); @@ -852,6 +859,8 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; + memset(&req, 0, sizeof(req)); + req.request.type = req32.request.type; req.request.sequence = req32.request.sequence; req.request.signal = req32.request.signal; @@ -889,6 +898,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, struct drm_mode_fb_cmd2 req64; int err; + memset(&req64, 0, sizeof(req64)); + if (copy_from_user(&req64, argp, offsetof(drm_mode_fb_cmd232_t, modifier))) return -EFAULT; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 6d38c5c17f23..db69f19ab5bc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -689,7 +689,8 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) struct page **pages = pvec + pinned; ret = pin_user_pages_fast(ptr, num_pages, - !userptr->ro ? FOLL_WRITE : 0, pages); + FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM, + pages); if (ret < 0) { unpin_user_pages(pvec, pinned); kvfree(pvec); diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 1f79bc2a881e..1510e4e2973c 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -13,7 +13,6 @@ #include <linux/irq.h> #include <linux/mfd/syscon.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 4683f98f7e54..c3f2962aa1eb 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -317,12 +317,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc) return 0; - new_crtc_state->enabled_planes |= BIT(plane->id); - ret = plane->check_plane(new_crtc_state, new_plane_state); if (ret) return ret; + if (fb) + new_crtc_state->enabled_planes |= BIT(plane->id); + /* FIXME pre-g4x don't work like this */ if (new_plane_state->uapi.visible) new_crtc_state->active_planes |= BIT(plane->id); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 8c12d5375607..775d89b6c3fc 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3619,9 +3619,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) { int ret; - intel_dp_lttpr_init(intel_dp); - - if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) + if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) return false; /* diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index eaebf123310a..10fe17b7280d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -133,6 +133,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, else precharge = 5; + /* Max timeout value on G4x-BDW: 1.6ms */ if (IS_BROADWELL(dev_priv)) timeout = DP_AUX_CH_CTL_TIME_OUT_600us; else @@ -159,6 +160,12 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, enum phy phy = intel_port_to_phy(i915, dig_port->base.port); u32 ret; + /* + * Max timeout values: + * SKL-GLK: 1.6ms + * CNL: 3.2ms + * ICL+: 4ms + */ ret = DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_INTERRUPT | diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 892d7db7d94f..be6ac0dd846e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -34,6 +34,11 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE]) link_status[3], link_status[4], link_status[5]); } +static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) +{ + memset(&intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); +} + static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) { intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT - @@ -81,19 +86,36 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp) { - if (drm_dp_read_lttpr_common_caps(&intel_dp->aux, - intel_dp->lttpr_common_caps) < 0) { - memset(intel_dp->lttpr_common_caps, 0, - sizeof(intel_dp->lttpr_common_caps)); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + if (intel_dp_is_edp(intel_dp)) return false; - } + + /* + * Detecting LTTPRs must be avoided on platforms with an AUX timeout + * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). + */ + if (INTEL_GEN(i915) < 10) + return false; + + if (drm_dp_read_lttpr_common_caps(&intel_dp->aux, + intel_dp->lttpr_common_caps) < 0) + goto reset_caps; drm_dbg_kms(&dp_to_i915(intel_dp)->drm, "LTTPR common capabilities: %*ph\n", (int)sizeof(intel_dp->lttpr_common_caps), intel_dp->lttpr_common_caps); + /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */ + if (intel_dp->lttpr_common_caps[0] < 0x14) + goto reset_caps; + return true; + +reset_caps: + intel_dp_reset_lttpr_common_caps(intel_dp); + return false; } static bool @@ -106,33 +128,49 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) } /** - * intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode + * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode * @intel_dp: Intel DP struct * - * Read the LTTPR common capabilities, switch to non-transparent link training - * mode if any is detected and read the PHY capabilities for all detected - * LTTPRs. In case of an LTTPR detection error or if the number of + * Read the LTTPR common and DPRX capabilities and switch to non-transparent + * link training mode if any is detected and read the PHY capabilities for all + * detected LTTPRs. In case of an LTTPR detection error or if the number of * LTTPRs is more than is supported (8), fall back to the no-LTTPR, * transparent mode link training mode. * * Returns: - * >0 if LTTPRs were detected and the non-transparent LT mode was set + * >0 if LTTPRs were detected and the non-transparent LT mode was set. The + * DPRX capabilities are read out. * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a - * detection failure and the transparent LT mode was set + * detection failure and the transparent LT mode was set. The DPRX + * capabilities are read out. + * <0 Reading out the DPRX capabilities failed. */ -int intel_dp_lttpr_init(struct intel_dp *intel_dp) +int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) { int lttpr_count; bool ret; int i; - if (intel_dp_is_edp(intel_dp)) - return 0; - ret = intel_dp_read_lttpr_common_caps(intel_dp); + + /* The DPTX shall read the DPRX caps after LTTPR detection. */ + if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) { + intel_dp_reset_lttpr_common_caps(intel_dp); + return -EIO; + } + if (!ret) return 0; + /* + * The 0xF0000-0xF02FF range is only valid if the DPCD revision is + * at least 1.4. + */ + if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) { + intel_dp_reset_lttpr_common_caps(intel_dp); + return 0; + } + lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); /* * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are @@ -172,7 +210,7 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp) return lttpr_count; } -EXPORT_SYMBOL(intel_dp_lttpr_init); +EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps); static u8 dp_voltage_max(u8 preemph) { @@ -807,7 +845,10 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp, * TODO: Reiniting LTTPRs here won't be needed once proper connector * HW state readout is added. */ - int lttpr_count = intel_dp_lttpr_init(intel_dp); + int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); + + if (lttpr_count < 0) + return; if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count)) intel_dp_schedule_fallback_link_training(intel_dp, crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 6a1f76bd8c75..9cb7c28027f0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -11,7 +11,7 @@ struct intel_crtc_state; struct intel_dp; -int intel_dp_lttpr_init(struct intel_dp *intel_dp); +int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp); void intel_dp_get_adjust_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index f58cc5700784..a86c57d117f2 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -1014,20 +1014,14 @@ static i915_reg_t dss_ctl1_reg(const struct intel_crtc_state *crtc_state) { enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - if (crtc_state->cpu_transcoder == TRANSCODER_EDP) - return DSS_CTL1; - - return ICL_PIPE_DSS_CTL1(pipe); + return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL1(pipe) : DSS_CTL1; } static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state) { enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - if (crtc_state->cpu_transcoder == TRANSCODER_EDP) - return DSS_CTL2; - - return ICL_PIPE_DSS_CTL2(pipe); + return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2; } void intel_dsc_enable(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index fb1b1d096975..9cf555d6842b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -713,9 +713,12 @@ static int engine_setup_common(struct intel_engine_cs *engine) goto err_status; } + err = intel_engine_init_cmd_parser(engine); + if (err) + goto err_cmd_parser; + intel_engine_init_active(engine, ENGINE_PHYSICAL); intel_engine_init_execlists(engine); - intel_engine_init_cmd_parser(engine); intel_engine_init__pm(engine); intel_engine_init_retire(engine); @@ -732,6 +735,8 @@ static int engine_setup_common(struct intel_engine_cs *engine) return 0; +err_cmd_parser: + intel_breadcrumbs_free(engine->breadcrumbs); err_status: cleanup_status_page(engine); return err; diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index a357bb431815..67de2b189598 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -316,7 +316,18 @@ void i915_vma_revoke_fence(struct i915_vma *vma) WRITE_ONCE(fence->vma, NULL); vma->fence = NULL; - with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref) + /* + * Skip the write to HW if and only if the device is currently + * suspended. + * + * If the driver does not currently hold a wakeref (if_in_use == 0), + * the device may currently be runtime suspended, or it may be woken + * up before the suspend takes place. If the device is not suspended + * (powered down) and we skip clearing the fence register, the HW is + * left in an undefined state where we may end up with multiple + * registers overlapping. + */ + with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref) fence_write(fence); } diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index ced9a96d7c34..5f86f5b2caf6 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -940,7 +940,7 @@ static void fini_hash_table(struct intel_engine_cs *engine) * struct intel_engine_cs based on whether the platform requires software * command parsing. */ -void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) +int intel_engine_init_cmd_parser(struct intel_engine_cs *engine) { const struct drm_i915_cmd_table *cmd_tables; int cmd_table_count; @@ -948,7 +948,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) && engine->class == COPY_ENGINE_CLASS)) - return; + return 0; switch (engine->class) { case RENDER_CLASS: @@ -1013,19 +1013,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) break; default: MISSING_CASE(engine->class); - return; + goto out; } if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) { drm_err(&engine->i915->drm, "%s: command descriptions are not sorted\n", engine->name); - return; + goto out; } if (!validate_regs_sorted(engine)) { drm_err(&engine->i915->drm, "%s: registers are not sorted\n", engine->name); - return; + goto out; } ret = init_hash_table(engine, cmd_tables, cmd_table_count); @@ -1033,10 +1033,17 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) drm_err(&engine->i915->drm, "%s: initialised failed!\n", engine->name); fini_hash_table(engine); - return; + goto out; } engine->flags |= I915_ENGINE_USING_CMD_PARSER; + +out: + if (intel_engine_requires_cmd_parser(engine) && + !intel_engine_using_cmd_parser(engine)) + return -EINVAL; + + return 0; } /** diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 26d69d06aa6d..cb62ddba2035 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1952,7 +1952,7 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); -void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); +int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); int intel_engine_cmd_parser(struct intel_engine_cs *engine, struct i915_vma *batch, diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 112ba5f2ce90..e62ad69606f6 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -603,7 +603,6 @@ static int append_oa_sample(struct i915_perf_stream *stream, { int report_size = stream->oa_buffer.format_size; struct drm_i915_perf_record_header header; - u32 sample_flags = stream->sample_flags; header.type = DRM_I915_PERF_RECORD_SAMPLE; header.pad = 0; @@ -617,10 +616,8 @@ static int append_oa_sample(struct i915_perf_stream *stream, return -EFAULT; buf += sizeof(header); - if (sample_flags & SAMPLE_OA_REPORT) { - if (copy_to_user(buf, report, report_size)) - return -EFAULT; - } + if (copy_to_user(buf, report, report_size)) + return -EFAULT; (*offset) += header.size; @@ -2682,7 +2679,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream) stream->perf->ops.oa_enable(stream); - if (stream->periodic) + if (stream->sample_flags & SAMPLE_OA_REPORT) hrtimer_start(&stream->poll_check_timer, ns_to_ktime(stream->poll_oa_period), HRTIMER_MODE_REL_PINNED); @@ -2745,7 +2742,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream) { stream->perf->ops.oa_disable(stream); - if (stream->periodic) + if (stream->sample_flags & SAMPLE_OA_REPORT) hrtimer_cancel(&stream->poll_check_timer); } @@ -3028,7 +3025,7 @@ static ssize_t i915_perf_read(struct file *file, * disabled stream as an error. In particular it might otherwise lead * to a deadlock for blocking file descriptors... */ - if (!stream->enabled) + if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT)) return -EIO; if (!(file->f_flags & O_NONBLOCK)) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7146cd0f3256..aaf1f0045b16 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3316,7 +3316,18 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) #define ILK_FBCQ_DIS (1 << 22) -#define ILK_PABSTRETCH_DIS (1 << 21) +#define ILK_PABSTRETCH_DIS REG_BIT(21) +#define ILK_SABSTRETCH_DIS REG_BIT(20) +#define IVB_PRI_STRETCH_MAX_MASK REG_GENMASK(21, 20) +#define IVB_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 0) +#define IVB_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 1) +#define IVB_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 2) +#define IVB_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 3) +#define IVB_SPR_STRETCH_MAX_MASK REG_GENMASK(19, 18) +#define IVB_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 0) +#define IVB_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 1) +#define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2) +#define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3) /* @@ -8039,6 +8050,16 @@ enum { #define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_B 0x420b4 +#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27) +#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0) +#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1) +#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2) +#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3) +#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25) +#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0) +#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1) +#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2) +#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3) #define HSW_FBCQ_DIS (1 << 22) #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0c3e63f27c29..97b57acc02e2 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7245,11 +7245,16 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); - /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ for_each_pipe(dev_priv, pipe) { + /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) | BDW_DPRS_MASK_VBLANK_SRD); + + /* Undocumented but fixes async flip + VT-d corruption */ + if (intel_vtd_active()) + intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), + HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1); } /* WaVSRefCountFullforceMissDisable:bdw */ @@ -7285,11 +7290,20 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) { + enum pipe pipe; + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) | HSW_FBCQ_DIS); + for_each_pipe(dev_priv, pipe) { + /* Undocumented but fixes async flip + VT-d corruption */ + if (intel_vtd_active()) + intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), + HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1); + } + /* This is required by WaCatErrorRejectionIssue:hsw */ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 153ca9e65382..8b725efb2254 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -412,12 +412,20 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm) } /** - * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use + * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active * @rpm: the intel_runtime_pm structure + * @ignore_usecount: get a ref even if dev->power.usage_count is 0 * * This function grabs a device-level runtime pm reference if the device is - * already in use and ensures that it is powered up. It is illegal to try - * and access the HW should intel_runtime_pm_get_if_in_use() report failure. + * already active and ensures that it is powered up. It is illegal to try + * and access the HW should intel_runtime_pm_get_if_active() report failure. + * + * If @ignore_usecount=true, a reference will be acquired even if there is no + * user requiring the device to be powered up (dev->power.usage_count == 0). + * If the function returns false in this case then it's guaranteed that the + * device's runtime suspend hook has been called already or that it will be + * called (and hence it's also guaranteed that the device's runtime resume + * hook will be called eventually). * * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. @@ -425,7 +433,8 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm) * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates * as True if the wakeref was acquired, or False otherwise. */ -intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) +static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm, + bool ignore_usecount) { if (IS_ENABLED(CONFIG_PM)) { /* @@ -434,7 +443,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) * function, since the power state is undefined. This applies * atm to the late/early system suspend/resume handlers. */ - if (pm_runtime_get_if_in_use(rpm->kdev) <= 0) + if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0) return 0; } @@ -443,6 +452,16 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) return track_intel_runtime_pm_wakeref(rpm); } +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) +{ + return __intel_runtime_pm_get_if_active(rpm, false); +} + +intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm) +{ + return __intel_runtime_pm_get_if_active(rpm, true); +} + /** * intel_runtime_pm_get_noresume - grab a runtime pm reference * @rpm: the intel_runtime_pm structure diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index ae64ff14c642..1e4ddd11c12b 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -177,6 +177,7 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); +intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); @@ -188,6 +189,10 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \ intel_runtime_pm_put((rpm), (wf)), (wf) = 0) +#define with_intel_runtime_pm_if_active(rpm, wf) \ + for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \ + intel_runtime_pm_put((rpm), (wf)), (wf) = 0) + void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref); diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index d1a9841adeed..e6a88c8cbd69 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev) ret = drmm_mode_config_init(drm); if (ret) - return ret; + goto err_kms; ret = drm_vblank_init(drm, MAX_CRTC); if (ret) diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index dbfe39e2f7f6..ffdc492c5bc5 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); + if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { + dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); + return; + } + drm_panel_prepare(imx_ldb_ch->panel); if (dual) { @@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder, int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); u32 bus_format = imx_ldb_ch->bus_format; + if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { + dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); + return; + } + if (mode->clock > 170000) { dev_warn(ldb->dev, "%s: mode exceeds 170 MHz pixel clock\n", __func__); @@ -583,7 +593,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) struct imx_ldb_channel *channel = &imx_ldb->channel[i]; if (!channel->ldb) - break; + continue; ret = imx_ldb_register(drm, channel); if (ret) diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 42c5d3246cfc..453d8b4c5763 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -482,6 +482,16 @@ static int meson_probe_remote(struct platform_device *pdev, return count; } +static void meson_drv_shutdown(struct platform_device *pdev) +{ + struct meson_drm *priv = dev_get_drvdata(&pdev->dev); + struct drm_device *drm = priv->drm; + + DRM_DEBUG_DRIVER("\n"); + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); +} + static int meson_drv_probe(struct platform_device *pdev) { struct component_match *match = NULL; @@ -553,6 +563,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = { static struct platform_driver meson_drm_platform_driver = { .probe = meson_drv_probe, + .shutdown = meson_drv_shutdown, .driver = { .name = "meson-drm", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 5ccc9da455a1..c35b06b46fcc 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -304,7 +304,7 @@ int a5xx_power_init(struct msm_gpu *gpu) /* Set up the limits management */ if (adreno_is_a530(adreno_gpu)) a530_lm_setup(gpu); - else + else if (adreno_is_a540(adreno_gpu)) a540_lm_setup(gpu); /* Set up SP/TP power collpase */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 71c917f909af..91cf46f84025 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -339,7 +339,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) else bit = a6xx_gmu_oob_bits[state].ack_new; - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit); + gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit); } /* Enable CPU control of SPTP power power collapse */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index ba8e9d3cf0fe..690409ca8a18 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -522,28 +522,73 @@ static int a6xx_cp_init(struct msm_gpu *gpu) return a6xx_idle(gpu, ring) ? 0 : -EINVAL; } -static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, +/* + * Check that the microcode version is new enough to include several key + * security fixes. Return true if the ucode is safe. + */ +static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, struct drm_gem_object *obj) { + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; u32 *buf = msm_gem_get_vaddr(obj); + bool ret = false; if (IS_ERR(buf)) - return; + return false; /* - * If the lowest nibble is 0xa that is an indication that this microcode - * has been patched. The actual version is in dword [3] but we only care - * about the patchlevel which is the lowest nibble of dword [3] - * - * Otherwise check that the firmware is greater than or equal to 1.90 - * which was the first version that had this fix built in + * Targets up to a640 (a618, a630 and a640) need to check for a + * microcode version that is patched to support the whereami opcode or + * one that is new enough to include it by default. */ - if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) - a6xx_gpu->has_whereami = true; - else if ((buf[0] & 0xfff) > 0x190) - a6xx_gpu->has_whereami = true; + if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) || + adreno_is_a640(adreno_gpu)) { + /* + * If the lowest nibble is 0xa that is an indication that this + * microcode has been patched. The actual version is in dword + * [3] but we only care about the patchlevel which is the lowest + * nibble of dword [3] + * + * Otherwise check that the firmware is greater than or equal + * to 1.90 which was the first version that had this fix built + * in + */ + if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) || + (buf[0] & 0xfff) >= 0x190) { + a6xx_gpu->has_whereami = true; + ret = true; + goto out; + } + + DRM_DEV_ERROR(&gpu->pdev->dev, + "a630 SQE ucode is too old. Have version %x need at least %x\n", + buf[0] & 0xfff, 0x190); + } else { + /* + * a650 tier targets don't need whereami but still need to be + * equal to or newer than 1.95 for other security fixes + */ + if (adreno_is_a650(adreno_gpu)) { + if ((buf[0] & 0xfff) >= 0x195) { + ret = true; + goto out; + } + + DRM_DEV_ERROR(&gpu->pdev->dev, + "a650 SQE ucode is too old. Have version %x need at least %x\n", + buf[0] & 0xfff, 0x195); + } + /* + * When a660 is added those targets should return true here + * since those have all the critical security fixes built in + * from the start + */ + } +out: msm_gem_put_vaddr(obj); + return ret; } static int a6xx_ucode_init(struct msm_gpu *gpu) @@ -566,7 +611,13 @@ static int a6xx_ucode_init(struct msm_gpu *gpu) } msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); - a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo); + if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) { + msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); + drm_gem_object_put(a6xx_gpu->sqe_bo); + + a6xx_gpu->sqe_bo = NULL; + return -EPERM; + } } gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO, @@ -1350,35 +1401,20 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu, u32 revn) { struct opp_table *opp_table; - struct nvmem_cell *cell; u32 supp_hw = UINT_MAX; - void *buf; - - cell = nvmem_cell_get(dev, "speed_bin"); - /* - * -ENOENT means that the platform doesn't support speedbin which is - * fine - */ - if (PTR_ERR(cell) == -ENOENT) - return 0; - else if (IS_ERR(cell)) { - DRM_DEV_ERROR(dev, - "failed to read speed-bin. Some OPPs may not be supported by hardware"); - goto done; - } + u16 speedbin; + int ret; - buf = nvmem_cell_read(cell, NULL); - if (IS_ERR(buf)) { - nvmem_cell_put(cell); + ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin); + if (ret) { DRM_DEV_ERROR(dev, - "failed to read speed-bin. Some OPPs may not be supported by hardware"); + "failed to read speed-bin (%d). Some OPPs may not be supported by hardware", + ret); goto done; } + speedbin = le16_to_cpu(speedbin); - supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf)); - - kfree(buf); - nvmem_cell_put(cell); + supp_hw = fuse_to_supp_hw(dev, revn, speedbin); done: opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 5a8e3e1fc48c..85f2c3564c96 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -43,6 +43,8 @@ #define DPU_DEBUGFS_DIR "msm_dpu" #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask" +#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */ + static int dpu_kms_hw_init(struct msm_kms *kms); static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); @@ -931,6 +933,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms) DPU_DEBUG("REG_DMA is not defined"); } + if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) + dpu_kms_parse_data_bus_icc_path(dpu_kms); + pm_runtime_get_sync(&dpu_kms->pdev->dev); dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0); @@ -1032,9 +1037,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dpu_vbif_init_memtypes(dpu_kms); - if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) - dpu_kms_parse_data_bus_icc_path(dpu_kms); - pm_runtime_put_sync(&dpu_kms->pdev->dev); return 0; @@ -1191,10 +1193,10 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev) ddev = dpu_kms->dev; + WARN_ON(!(dpu_kms->num_paths)); /* Min vote of BW is required before turning on AXI clk */ for (i = 0; i < dpu_kms->num_paths; i++) - icc_set_bw(dpu_kms->path[i], 0, - dpu_kms->catalog->perf.min_dram_ib); + icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW)); rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); if (rc) { diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 1c6e1d2b947c..7c22bfe0fc7d 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -32,6 +32,8 @@ struct dp_aux_private { struct drm_dp_aux dp_aux; }; +#define MAX_AUX_RETRIES 5 + static const char *dp_aux_get_error(u32 aux_error) { switch (aux_error) { @@ -377,6 +379,11 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, ret = dp_aux_cmd_fifo_tx(aux, msg); if (ret < 0) { + if (aux->native) { + aux->retry_cnt++; + if (!(aux->retry_cnt % MAX_AUX_RETRIES)) + dp_catalog_aux_update_cfg(aux->catalog); + } usleep_range(400, 500); /* at least 400us to next try */ goto unlock_exit; } diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index a45fe95aff49..3dc65877fa10 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -163,7 +163,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, break; case MSM_DSI_PHY_7NM: case MSM_DSI_PHY_7NM_V4_1: - pll = msm_dsi_pll_7nm_init(pdev, id); + pll = msm_dsi_pll_7nm_init(pdev, type, id); break; default: pll = ERR_PTR(-ENXIO); diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index 3405982a092c..bbecb1de5678 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -117,10 +117,12 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id) } #endif #ifdef CONFIG_DRM_MSM_DSI_7NM_PHY -struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id); +struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id); #else static inline struct msm_dsi_pll * -msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) +msm_dsi_pll_7nm_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id) { return ERR_PTR(-ENODEV); } diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c index 93bf142e4a4e..e29b3bfd63d1 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c @@ -325,7 +325,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll) pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low); pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid); pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high); - pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate); pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */ pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters); @@ -509,6 +509,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw, { struct msm_dsi_pll *pll = hw_clk_to_pll(hw); struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + struct dsi_pll_config *config = &pll_7nm->pll_configuration; void __iomem *base = pll_7nm->mmio; u64 ref_clk = pll_7nm->vco_ref_clk_rate; u64 vco_rate = 0x0; @@ -529,9 +530,8 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw, /* * TODO: * 1. Assumes prescaler is disabled - * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits) */ - multiplier = 1 << 18; + multiplier = 1 << config->frac_bits; pll_freq = dec * (ref_clk * 2); tmp64 = (ref_clk * 2 * frac); pll_freq += div_u64(tmp64, multiplier); @@ -852,7 +852,8 @@ err_base_clk_hw: return ret; } -struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) +struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id) { struct dsi_pll_7nm *pll_7nm; struct msm_dsi_pll *pll; @@ -885,7 +886,7 @@ struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) pll = &pll_7nm->base; pll->min_rate = 1000000000UL; pll->max_rate = 3500000000UL; - if (pll->type == MSM_DSI_PHY_7NM_V4_1) { + if (type == MSM_DSI_PHY_7NM_V4_1) { pll->min_rate = 600000000UL; pll->max_rate = (unsigned long)5000000000ULL; /* workaround for max rate overflowing on 32-bit builds: */ diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 6a326761dc4a..edcaccaa27e6 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -57,10 +57,13 @@ static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) { + int crtc_index; struct drm_crtc *crtc; - for_each_crtc_mask(kms->dev, crtc, crtc_mask) - mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]); + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { + crtc_index = drm_crtc_index(crtc); + mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index); + } } static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 94525ac76d4e..a5c6b8c23336 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1072,6 +1072,10 @@ static int __maybe_unused msm_pm_resume(struct device *dev) static int __maybe_unused msm_pm_prepare(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); + struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL; + + if (!priv || !priv->kms) + return 0; return drm_mode_config_helper_suspend(ddev); } @@ -1079,6 +1083,10 @@ static int __maybe_unused msm_pm_prepare(struct device *dev) static void __maybe_unused msm_pm_complete(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); + struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL; + + if (!priv || !priv->kms) + return; drm_mode_config_helper_resume(ddev); } @@ -1311,6 +1319,10 @@ static int msm_pdev_remove(struct platform_device *pdev) static void msm_pdev_shutdown(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); + struct msm_drm_private *priv = drm ? drm->dev_private : NULL; + + if (!priv || !priv->kms) + return; drm_atomic_helper_shutdown(drm); } diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index ad2703698b05..cd59a5918038 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, int ret; if (fence > fctx->last_fence) { - DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n", + DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n", fctx->name, fence, fctx->last_fence); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 4735251a394d..d8151a89e163 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -157,7 +157,6 @@ struct msm_kms { * from the crtc's pending_timer close to end of the frame: */ struct mutex commit_lock[MAX_CRTCS]; - struct lock_class_key commit_lock_keys[MAX_CRTCS]; unsigned pending_crtc_mask; struct msm_pending_timer pending_timers[MAX_CRTCS]; }; @@ -167,11 +166,8 @@ static inline int msm_kms_init(struct msm_kms *kms, { unsigned i, ret; - for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) { - lockdep_register_key(&kms->commit_lock_keys[i]); - __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]", - &kms->commit_lock_keys[i]); - } + for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) + mutex_init(&kms->commit_lock[i]); kms->funcs = funcs; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 196612addfd6..1c9c0cdf85db 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2693,9 +2693,20 @@ nv50_display_create(struct drm_device *dev) else nouveau_display(dev)->format_modifiers = disp50xx_modifiers; - if (disp->disp->object.oclass >= GK104_DISP) { + /* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later + * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The + * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to + * small page allocations in prepare_fb(). When this is implemented, we should also force + * large pages (128K) for ovly fbs in order to fix Kepler ovlys. + * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using + * large pages. + */ + if (disp->disp->object.oclass >= GM107_DISP) { dev->mode_config.cursor_width = 256; dev->mode_config.cursor_height = 256; + } else if (disp->disp->object.oclass >= GK104_DISP) { + dev->mode_config.cursor_width = 128; + dev->mode_config.cursor_height = 128; } else { dev->mode_config.cursor_width = 64; dev->mode_config.cursor_height = 64; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2375711877cf..f2720a006199 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -551,12 +551,17 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) if (!ttm_dma) return; + if (!ttm_dma->pages) { + NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); + return; + } /* Don't waste time looping if the object is coherent */ if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; ++i) { + i = 0; + while (i < ttm_dma->num_pages) { struct page *p = ttm_dma->pages[i]; size_t num_pages = 1; @@ -582,12 +587,17 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) if (!ttm_dma) return; + if (!ttm_dma->pages) { + NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); + return; + } /* Don't waste time looping if the object is coherent */ if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; ++i) { + i = 0; + while (i < ttm_dma->num_pages) { struct page *p = ttm_dma->pages[i]; size_t num_pages = 1; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index 69da601f1754..e771bd519ee2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -261,6 +261,9 @@ gk104_fifo_pbdma = { struct nvkm_engine * gk104_fifo_id_engine(struct nvkm_fifo *base, int engi) { + if (engi == GK104_FIFO_ENGN_SW) + return nvkm_device_engine(base->engine.subdev.device, NVKM_ENGINE_SW, 0); + return gk104_fifo(base)->engine[engi].engine; } diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 8e11612f5fe1..b31d750c425a 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -2149,11 +2149,12 @@ static int dsi_vc_send_short(struct dsi_data *dsi, int vc, const struct mipi_dsi_msg *msg) { struct mipi_dsi_packet pkt; + int ret; u32 r; - r = mipi_dsi_create_packet(&pkt, msg); - if (r < 0) - return r; + ret = mipi_dsi_create_packet(&pkt, msg); + if (ret < 0) + return ret; WARN_ON(!dsi_bus_is_locked(dsi)); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 012bce0cdb65..10738e04c09b 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -328,6 +328,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, head.id = i; head.flags = 0; + head.surface_id = 0; oldcount = qdev->monitors_config->count; if (crtc->state->active) { struct drm_display_mode *mode = &crtc->mode; diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 0fcfc952d5e9..b372455e2729 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -321,7 +321,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, int type, struct qxl_release **release, struct qxl_bo **rbo) { - struct qxl_bo *bo; + struct qxl_bo *bo, *free_bo = NULL; int idr_ret; int ret = 0; union qxl_release_info *info; @@ -347,7 +347,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, mutex_lock(&qdev->release_mutex); if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { - qxl_bo_unref(&qdev->current_release_bo[cur_idx]); + free_bo = qdev->current_release_bo[cur_idx]; qdev->current_release_bo_offset[cur_idx] = 0; qdev->current_release_bo[cur_idx] = NULL; } @@ -355,6 +355,10 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]); if (ret) { mutex_unlock(&qdev->release_mutex); + if (free_bo) { + qxl_bo_unpin(free_bo); + qxl_bo_unref(&free_bo); + } qxl_release_free(qdev, *release); return ret; } @@ -370,6 +374,10 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, *rbo = bo; mutex_unlock(&qdev->release_mutex); + if (free_bo) { + qxl_bo_unpin(free_bo); + qxl_bo_unref(&free_bo); + } ret = qxl_release_list_add(*release, bo); qxl_bo_unref(&bo); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index f09989bdce98..3effc8c71494 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -574,6 +574,8 @@ struct radeon_gem { struct list_head objects; }; +extern const struct drm_gem_object_funcs radeon_gem_object_funcs; + int radeon_gem_init(struct radeon_device *rdev); void radeon_gem_fini(struct radeon_device *rdev); int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 941826923247..db14a82a2e4b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -43,7 +43,7 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); int radeon_gem_prime_pin(struct drm_gem_object *obj); void radeon_gem_prime_unpin(struct drm_gem_object *obj); -static const struct drm_gem_object_funcs radeon_gem_object_funcs; +const struct drm_gem_object_funcs radeon_gem_object_funcs; static void radeon_gem_object_free(struct drm_gem_object *gobj) { @@ -227,7 +227,7 @@ static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) return r; } -static const struct drm_gem_object_funcs radeon_gem_object_funcs = { +const struct drm_gem_object_funcs radeon_gem_object_funcs = { .free = radeon_gem_object_free, .open = radeon_gem_object_open, .close = radeon_gem_object_close, diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index ab29eb9e8667..42a87948e28c 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -56,6 +56,8 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, if (ret) return ERR_PTR(ret); + bo->tbo.base.funcs = &radeon_gem_object_funcs; + mutex_lock(&rdev->gem.mutex); list_add_tail(&bo->list, &rdev->gem.objects); mutex_unlock(&rdev->gem.mutex); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index ba8c6038cd63..ca3761772211 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -48,21 +48,12 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node) static const struct drm_encoder_funcs rcar_du_encoder_funcs = { }; -static void rcar_du_encoder_release(struct drm_device *dev, void *res) -{ - struct rcar_du_encoder *renc = res; - - drm_encoder_cleanup(&renc->base); - kfree(renc); -} - int rcar_du_encoder_init(struct rcar_du_device *rcdu, enum rcar_du_output output, struct device_node *enc_node) { struct rcar_du_encoder *renc; struct drm_bridge *bridge; - int ret; /* * Locate the DRM bridge from the DT node. For the DPAD outputs, if the @@ -101,26 +92,16 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, return -ENOLINK; } - renc = kzalloc(sizeof(*renc), GFP_KERNEL); - if (renc == NULL) - return -ENOMEM; - - renc->output = output; - dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n", enc_node, output); - ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs, - DRM_MODE_ENCODER_NONE, NULL); - if (ret < 0) { - kfree(renc); - return ret; - } + renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base, + &rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE, + NULL); + if (!renc) + return -ENOMEM; - ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release, - renc); - if (ret) - return ret; + renc->output = output; /* * Attach the bridge to the encoder. The bridge will create the diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 0ae3a025efe9..134986dc2783 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1688,6 +1688,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, dev_err(dc->dev, "failed to set clock rate to %lu Hz\n", state->pclk); + + err = clk_set_rate(dc->clk, state->pclk); + if (err < 0) + dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n", + dc->clk, state->pclk, err); } DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), @@ -1698,11 +1703,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); } - - err = clk_set_rate(dc->clk, state->pclk); - if (err < 0) - dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n", - dc->clk, state->pclk, err); } static void tegra_dc_stop(struct tegra_dc *dc) @@ -2501,22 +2501,18 @@ static int tegra_dc_couple(struct tegra_dc *dc) * POWER_CONTROL registers during CRTC enabling. */ if (dc->soc->coupled_pm && dc->pipe == 1) { - u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER; - struct device_link *link; - struct device *partner; + struct device *companion; + struct tegra_dc *parent; - partner = driver_find_device(dc->dev->driver, NULL, NULL, - tegra_dc_match_by_pipe); - if (!partner) + companion = driver_find_device(dc->dev->driver, NULL, (const void *)0, + tegra_dc_match_by_pipe); + if (!companion) return -EPROBE_DEFER; - link = device_link_add(dc->dev, partner, flags); - if (!link) { - dev_err(dc->dev, "failed to link controllers\n"); - return -EINVAL; - } + parent = dev_get_drvdata(companion); + dc->client.parent = &parent->client; - dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner)); + dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion)); } return 0; diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index f02a035dda45..7b88261f57bb 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client) * kernel is possible. */ if (sor->rst) { + err = pm_runtime_resume_and_get(sor->dev); + if (err < 0) { + dev_err(sor->dev, "failed to get runtime PM: %d\n", err); + return err; + } + err = reset_control_acquire(sor->rst); if (err < 0) { dev_err(sor->dev, "failed to acquire SOR reset: %d\n", @@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client) } reset_control_release(sor->rst); + pm_runtime_put(sor->dev); } err = clk_prepare_enable(sor->clk_safe); diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 33f65f4626e5..23866a54e3f9 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -83,6 +83,7 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)"); struct gm12u320_device { struct drm_device dev; + struct device *dmadev; struct drm_simple_display_pipe pipe; struct drm_connector conn; unsigned char *cmd_buf; @@ -601,6 +602,22 @@ static const uint64_t gm12u320_pipe_modifiers[] = { DRM_FORMAT_MOD_INVALID }; +/* + * FIXME: Dma-buf sharing requires DMA support by the importing device. + * This function is a workaround to make USB devices work as well. + * See todo.rst for how to fix the issue in the dma-buf framework. + */ +static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct gm12u320_device *gm12u320 = to_gm12u320(dev); + + if (!gm12u320->dmadev) + return ERR_PTR(-ENODEV); + + return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev); +} + DEFINE_DRM_GEM_FOPS(gm12u320_fops); static const struct drm_driver gm12u320_drm_driver = { @@ -614,6 +631,7 @@ static const struct drm_driver gm12u320_drm_driver = { .fops = &gm12u320_fops, DRM_GEM_SHMEM_DRIVER_OPS, + .gem_prime_import = gm12u320_gem_prime_import, }; static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = { @@ -640,15 +658,18 @@ static int gm12u320_usb_probe(struct usb_interface *interface, struct gm12u320_device, dev); if (IS_ERR(gm12u320)) return PTR_ERR(gm12u320); + dev = &gm12u320->dev; + + gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); + if (!gm12u320->dmadev) + drm_warn(dev, "buffer sharing not supported"); /* not an error */ INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); mutex_init(&gm12u320->fb_update.lock); - dev = &gm12u320->dev; - ret = drmm_mode_config_init(dev); if (ret) - return ret; + goto err_put_device; dev->mode_config.min_width = GM12U320_USER_WIDTH; dev->mode_config.max_width = GM12U320_USER_WIDTH; @@ -658,15 +679,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface, ret = gm12u320_usb_alloc(gm12u320); if (ret) - return ret; + goto err_put_device; ret = gm12u320_set_ecomode(gm12u320); if (ret) - return ret; + goto err_put_device; ret = gm12u320_conn_init(gm12u320); if (ret) - return ret; + goto err_put_device; ret = drm_simple_display_pipe_init(&gm12u320->dev, &gm12u320->pipe, @@ -676,24 +697,31 @@ static int gm12u320_usb_probe(struct usb_interface *interface, gm12u320_pipe_modifiers, &gm12u320->conn); if (ret) - return ret; + goto err_put_device; drm_mode_config_reset(dev); usb_set_intfdata(interface, dev); ret = drm_dev_register(dev, 0); if (ret) - return ret; + goto err_put_device; drm_fbdev_generic_setup(dev, 0); return 0; + +err_put_device: + put_device(gm12u320->dmadev); + return ret; } static void gm12u320_usb_disconnect(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); + struct gm12u320_device *gm12u320 = to_gm12u320(dev); + put_device(gm12u320->dmadev); + gm12u320->dmadev = NULL; drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 20a25660b35b..101a68dc615b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -136,7 +136,8 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *man; - dma_resv_assert_held(bo->base.resv); + if (!bo->deleted) + dma_resv_assert_held(bo->base.resv); if (bo->pin_count) { ttm_bo_del_from_lru(bo); @@ -508,8 +509,11 @@ static void ttm_bo_release(struct kref *kref) * Make pinned bos immediately available to * shrinkers, now that they are queued for * destruction. + * + * FIXME: QXL is triggering this. Can be removed when the + * driver is fixed. */ - if (WARN_ON(bo->pin_count)) { + if (WARN_ON_ONCE(bo->pin_count)) { bo->pin_count = 0; ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); } diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 6e27cb1bf48b..4eb6efb8b8c0 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -268,13 +268,13 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, /* Remove a pool_type from the global shrinker list and free all pages */ static void ttm_pool_type_fini(struct ttm_pool_type *pt) { - struct page *p, *tmp; + struct page *p; mutex_lock(&shrinker_lock); list_del(&pt->shrinker_list); mutex_unlock(&shrinker_lock); - list_for_each_entry_safe(p, tmp, &pt->pages, lru) + while ((p = ttm_pool_type_take(pt))) ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); } diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 9269092697d8..5703277c6f52 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -32,6 +32,22 @@ static int udl_usb_resume(struct usb_interface *interface) return drm_mode_config_helper_resume(dev); } +/* + * FIXME: Dma-buf sharing requires DMA support by the importing device. + * This function is a workaround to make USB devices work as well. + * See todo.rst for how to fix the issue in the dma-buf framework. + */ +static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct udl_device *udl = to_udl(dev); + + if (!udl->dmadev) + return ERR_PTR(-ENODEV); + + return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev); +} + DEFINE_DRM_GEM_FOPS(udl_driver_fops); static const struct drm_driver driver = { @@ -40,6 +56,7 @@ static const struct drm_driver driver = { /* GEM hooks */ .fops = &udl_driver_fops, DRM_GEM_SHMEM_DRIVER_OPS, + .gem_prime_import = udl_driver_gem_prime_import, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 875e73551ae9..cc16a13316e4 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -50,6 +50,7 @@ struct urb_list { struct udl_device { struct drm_device drm; struct device *dev; + struct device *dmadev; struct drm_simple_display_pipe display_pipe; diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 0e2a376cb075..853f147036f6 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -315,6 +315,10 @@ int udl_init(struct udl_device *udl) DRM_DEBUG("\n"); + udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); + if (!udl->dmadev) + drm_warn(dev, "buffer sharing not supported"); /* not an error */ + mutex_init(&udl->gem_lock); if (!udl_parse_vendor_descriptor(udl)) { @@ -343,12 +347,18 @@ int udl_init(struct udl_device *udl) err: if (udl->urbs.count) udl_free_urb_list(dev); + put_device(udl->dmadev); DRM_ERROR("%d\n", ret); return ret; } int udl_drop_usb(struct drm_device *dev) { + struct udl_device *udl = to_udl(dev); + udl_free_urb_list(dev); + put_device(udl->dmadev); + udl->dmadev = NULL; + return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 0a900afc66ff..45c9c6a7f1d6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -500,8 +500,6 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, vm_fault_t ret; pgoff_t fault_page_size; bool write = vmf->flags & FAULT_FLAG_WRITE; - bool is_cow_mapping = - (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; switch (pe_size) { case PE_SIZE_PMD: @@ -518,7 +516,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, } /* Always do write dirty-tracking and COW on PTE level. */ - if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping)) + if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping(vma->vm_flags))) return VM_FAULT_FALLBACK; ret = ttm_bo_vm_reserve(bo, vmf); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 3c03b1746661..cb9975889e2f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -49,7 +49,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_ops = &vmw_vm_ops; /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */ - if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE) + if (!is_cow_mapping(vma->vm_flags)) vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP; return 0; diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 347fb962b6c9..68a766ff0e9d 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -705,8 +705,9 @@ void host1x_driver_unregister(struct host1x_driver *driver) EXPORT_SYMBOL(host1x_driver_unregister); /** - * host1x_client_register() - register a host1x client + * __host1x_client_register() - register a host1x client * @client: host1x client + * @key: lock class key for the client-specific mutex * * Registers a host1x client with each host1x controller instance. Note that * each client will only match their parent host1x controller and will only be @@ -715,13 +716,14 @@ EXPORT_SYMBOL(host1x_driver_unregister); * device and call host1x_device_init(), which will in turn call each client's * &host1x_client_ops.init implementation. */ -int host1x_client_register(struct host1x_client *client) +int __host1x_client_register(struct host1x_client *client, + struct lock_class_key *key) { struct host1x *host1x; int err; INIT_LIST_HEAD(&client->list); - mutex_init(&client->lock); + __mutex_init(&client->lock, "host1x client lock", key); client->usecount = 0; mutex_lock(&devices_lock); @@ -742,7 +744,7 @@ int host1x_client_register(struct host1x_client *client) return 0; } -EXPORT_SYMBOL(host1x_client_register); +EXPORT_SYMBOL(__host1x_client_register); /** * host1x_client_unregister() - unregister a host1x client diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3273360f30f7..ec1b9d306ba6 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -744,8 +744,8 @@ static struct cpuidle_state icx_cstates[] __initdata = { .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 128, - .target_residency = 384, + .exit_latency = 170, + .target_residency = 600, .enter = &intel_idle, .enter_s2idle = intel_idle_s2idle, }, { @@ -1156,6 +1156,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl), X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt), diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index bf7d22fa4be2..e0667c4b3c08 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -266,6 +266,8 @@ config ADI_AXI_ADC select IIO_BUFFER select IIO_BUFFER_HW_CONSUMER select IIO_BUFFER_DMAENGINE + depends on HAS_IOMEM + depends on OF help Say yes here to build support for Analog Devices Generic AXI ADC IP core. The IP core is used for interfacing with @@ -923,6 +925,7 @@ config STM32_ADC_CORE depends on ARCH_STM32 || COMPILE_TEST depends on OF depends on REGULATOR + depends on HAS_IOMEM select IIO_BUFFER select MFD_STM32_TIMERS select IIO_STM32_TIMER_TRIGGER diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c index 6f9a3e2d5533..7b5212ba5501 100644 --- a/drivers/iio/adc/ab8500-gpadc.c +++ b/drivers/iio/adc/ab8500-gpadc.c @@ -918,7 +918,7 @@ static int ab8500_gpadc_read_raw(struct iio_dev *indio_dev, return processed; /* Return millivolt or milliamps or millicentigrades */ - *val = processed * 1000; + *val = processed; return IIO_VAL_INT; } diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c index 5d597e5050f6..1b4b3203e428 100644 --- a/drivers/iio/adc/ad7949.c +++ b/drivers/iio/adc/ad7949.c @@ -91,7 +91,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val, int ret; int i; int bits_per_word = ad7949_adc->resolution; - int mask = GENMASK(ad7949_adc->resolution, 0); + int mask = GENMASK(ad7949_adc->resolution - 1, 0); struct spi_message msg; struct spi_transfer tx[] = { { diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c index 05ff948372b3..07b1a99381d9 100644 --- a/drivers/iio/adc/qcom-spmi-vadc.c +++ b/drivers/iio/adc/qcom-spmi-vadc.c @@ -597,7 +597,7 @@ static const struct vadc_channels vadc_chans[] = { VADC_CHAN_NO_SCALE(P_MUX16_1_3, 1) VADC_CHAN_NO_SCALE(LR_MUX1_BAT_THERM, 0) - VADC_CHAN_NO_SCALE(LR_MUX2_BAT_ID, 0) + VADC_CHAN_VOLT(LR_MUX2_BAT_ID, 0, SCALE_DEFAULT) VADC_CHAN_NO_SCALE(LR_MUX3_XO_THERM, 0) VADC_CHAN_NO_SCALE(LR_MUX4_AMUX_THM1, 0) VADC_CHAN_NO_SCALE(LR_MUX5_AMUX_THM2, 0) diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index dfa31a23500f..ac90be03332a 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -551,6 +551,8 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p) MPU3050_FIFO_R, &fifo_values[offset], toread); + if (ret) + goto out_trigger_unlock; dev_dbg(mpu3050->dev, "%04x %04x %04x %04x %04x\n", diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c index 52f605114ef7..d62705448ae2 100644 --- a/drivers/iio/humidity/hid-sensor-humidity.c +++ b/drivers/iio/humidity/hid-sensor-humidity.c @@ -15,7 +15,10 @@ struct hid_humidity_state { struct hid_sensor_common common_attributes; struct hid_sensor_hub_attribute_info humidity_attr; - s32 humidity_data; + struct { + s32 humidity_data; + u64 timestamp __aligned(8); + } scan; int scale_pre_decml; int scale_post_decml; int scale_precision; @@ -125,9 +128,8 @@ static int humidity_proc_event(struct hid_sensor_hub_device *hsdev, struct hid_humidity_state *humid_st = iio_priv(indio_dev); if (atomic_read(&humid_st->common_attributes.data_ready)) - iio_push_to_buffers_with_timestamp(indio_dev, - &humid_st->humidity_data, - iio_get_time_ns(indio_dev)); + iio_push_to_buffers_with_timestamp(indio_dev, &humid_st->scan, + iio_get_time_ns(indio_dev)); return 0; } @@ -142,7 +144,7 @@ static int humidity_capture_sample(struct hid_sensor_hub_device *hsdev, switch (usage_id) { case HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY: - humid_st->humidity_data = *(s32 *)raw_data; + humid_st->scan.humidity_data = *(s32 *)raw_data; return 0; default: diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c index 54af2ed664f6..785a4ce606d8 100644 --- a/drivers/iio/imu/adis16400.c +++ b/drivers/iio/imu/adis16400.c @@ -462,8 +462,7 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev) if (ret) goto err_ret; - ret = sscanf(indio_dev->name, "adis%u\n", &device_id); - if (ret != 1) { + if (sscanf(indio_dev->name, "adis%u\n", &device_id) != 1) { ret = -EINVAL; goto err_ret; } diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 330cf359e0b8..e9e00ce0c6d4 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -23,6 +23,9 @@ struct prox_state { struct hid_sensor_common common_attributes; struct hid_sensor_hub_attribute_info prox_attr; u32 human_presence; + int scale_pre_decml; + int scale_post_decml; + int scale_precision; }; /* Channel definitions */ @@ -93,8 +96,9 @@ static int prox_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: - *val = prox_state->prox_attr.units; - ret_type = IIO_VAL_INT; + *val = prox_state->scale_pre_decml; + *val2 = prox_state->scale_post_decml; + ret_type = prox_state->scale_precision; break; case IIO_CHAN_INFO_OFFSET: *val = hid_sensor_convert_exponent( @@ -234,6 +238,11 @@ static int prox_parse_report(struct platform_device *pdev, HID_USAGE_SENSOR_HUMAN_PRESENCE, &st->common_attributes.sensitivity); + st->scale_precision = hid_sensor_format_scale( + hsdev->usage, + &st->prox_attr, + &st->scale_pre_decml, &st->scale_post_decml); + return ret; } diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c index 81688f1b932f..da9a247097fa 100644 --- a/drivers/iio/temperature/hid-sensor-temperature.c +++ b/drivers/iio/temperature/hid-sensor-temperature.c @@ -15,7 +15,10 @@ struct temperature_state { struct hid_sensor_common common_attributes; struct hid_sensor_hub_attribute_info temperature_attr; - s32 temperature_data; + struct { + s32 temperature_data; + u64 timestamp __aligned(8); + } scan; int scale_pre_decml; int scale_post_decml; int scale_precision; @@ -32,7 +35,7 @@ static const struct iio_chan_spec temperature_channels[] = { BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS), }, - IIO_CHAN_SOFT_TIMESTAMP(3), + IIO_CHAN_SOFT_TIMESTAMP(1), }; /* Adjust channel real bits based on report descriptor */ @@ -123,9 +126,8 @@ static int temperature_proc_event(struct hid_sensor_hub_device *hsdev, struct temperature_state *temp_st = iio_priv(indio_dev); if (atomic_read(&temp_st->common_attributes.data_ready)) - iio_push_to_buffers_with_timestamp(indio_dev, - &temp_st->temperature_data, - iio_get_time_ns(indio_dev)); + iio_push_to_buffers_with_timestamp(indio_dev, &temp_st->scan, + iio_get_time_ns(indio_dev)); return 0; } @@ -140,7 +142,7 @@ static int temperature_capture_sample(struct hid_sensor_hub_device *hsdev, switch (usage_id) { case HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE: - temp_st->temperature_data = *(s32 *)raw_data; + temp_st->scan.temperature_data = *(s32 *)raw_data; return 0; default: return -EINVAL; diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index be996dba040c..3d194bb60840 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3651,6 +3651,7 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, struct ib_cm_sidr_rep_param *param) { struct ib_mad_send_buf *msg; + unsigned long flags; int ret; lockdep_assert_held(&cm_id_priv->lock); @@ -3676,12 +3677,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, return ret; } cm_id_priv->id.state = IB_CM_IDLE; - spin_lock_irq(&cm.lock); + spin_lock_irqsave(&cm.lock, flags); if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); } - spin_unlock_irq(&cm.lock); + spin_unlock_irqrestore(&cm.lock, flags); return 0; } diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index e47c5949013f..ff047eb024ab 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -91,7 +91,7 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm, } /** - * uverbs_alloc() - Quickly allocate memory for use with a bundle + * _uverbs_alloc() - Quickly allocate memory for use with a bundle * @bundle: The bundle * @size: Number of bytes to allocate * @flags: Allocator flags diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 8769e7aa097f..81903749d241 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3610,13 +3610,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) ep->com.local_addr.ss_family == AF_INET) { err = cxgb4_remove_server_filter( ep->com.dev->rdev.lldi.ports[0], ep->stid, - ep->com.dev->rdev.lldi.rxq_ids[0], 0); + ep->com.dev->rdev.lldi.rxq_ids[0], false); } else { struct sockaddr_in6 *sin6; c4iw_init_wr_wait(ep->com.wr_waitp); err = cxgb4_remove_server( ep->com.dev->rdev.lldi.ports[0], ep->stid, - ep->com.dev->rdev.lldi.rxq_ids[0], 0); + ep->com.dev->rdev.lldi.rxq_ids[0], true); if (err) goto done; err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index c3934abeb260..ce26f97b2ca2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1194,8 +1194,10 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type) upper_32_bits(dma)); roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); - roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); + + /* Make sure to write tail first and then head */ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0); + roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); } else { roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma); roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG, diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index ebc2a4355fa5..07b8350929cd 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1116,7 +1116,7 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, case MLX5_CMD_OP_CREATE_MKEY: MLX5_SET(destroy_mkey_in, din, opcode, MLX5_CMD_OP_DESTROY_MKEY); - MLX5_SET(destroy_mkey_in, in, mkey_index, *obj_id); + MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id); break; case MLX5_CMD_OP_CREATE_CQ: MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ); @@ -2073,8 +2073,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)( num_alloc_xa_entries++; event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL); - if (!event_sub) + if (!event_sub) { + err = -ENOMEM; goto err; + } list_add_tail(&event_sub->event_list, &sub_list); uverbs_uobject_get(&ev_file->uobj); diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 374698186662..b103555b1f5d 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -1082,7 +1082,7 @@ end: return ret ? ret : npages; } -/** +/* * Parse a series of data segments for page fault handling. * * @dev: Pointer to mlx5 IB device diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ec4b3f6a8222..f5a52a6fae43 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1078,7 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev, qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); MLX5_SET(qpc, qpc, uar_page, uar_index); - MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); /* Set "fast registration enabled" for all kernel QPs */ @@ -1188,7 +1188,8 @@ static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) } return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING; } - return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT; + return fr_supported ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT; } static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) @@ -1206,7 +1207,8 @@ static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) } return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING; } - return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT; + return fr_supported ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT; } static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, @@ -1217,7 +1219,8 @@ static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING || MLX5_CAP_ROCE(dev->mdev, qp_ts_format) == MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; - int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; + int ts_format = fr_supported ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; if (recv_cq && recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) @@ -1930,6 +1933,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (qp->flags & IB_QP_CREATE_MANAGED_RECV) MLX5_SET(qpc, qpc, cd_slave_receive, 1); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ); MLX5_SET(qpc, qpc, no_sq, 1); MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); @@ -4873,6 +4877,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, struct mlx5_ib_dev *dev; int has_net_offloads; __be64 *rq_pas0; + int ts_format; void *in; void *rqc; void *wq; @@ -4881,6 +4886,10 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, dev = to_mdev(pd->device); + ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq)); + if (ts_format < 0) + return ts_format; + inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; in = kvzalloc(inlen, GFP_KERNEL); if (!in) @@ -4890,6 +4899,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); + MLX5_SET(rqc, rqc, ts_format, ts_format); MLX5_SET(rqc, rqc, user_index, rwq->user_index); MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig index 452149066792..06b8dc5093f7 100644 --- a/drivers/infiniband/sw/rxe/Kconfig +++ b/drivers/infiniband/sw/rxe/Kconfig @@ -4,6 +4,7 @@ config RDMA_RXE depends on INET && PCI && INFINIBAND depends on INFINIBAND_VIRT_DMA select NET_UDP_TUNNEL + select CRYPTO select CRYPTO_CRC32 help This driver implements the InfiniBand RDMA transport over diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index a8ac791a1bb9..17a361b8dbb1 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -547,6 +547,7 @@ int rxe_completer(void *arg) struct sk_buff *skb = NULL; struct rxe_pkt_info *pkt = NULL; enum comp_state state; + int ret = 0; rxe_add_ref(qp); @@ -554,7 +555,8 @@ int rxe_completer(void *arg) qp->req.state == QP_STATE_RESET) { rxe_drain_resp_pkts(qp, qp->valid && qp->req.state == QP_STATE_ERROR); - goto exit; + ret = -EAGAIN; + goto done; } if (qp->comp.timeout) { @@ -564,8 +566,10 @@ int rxe_completer(void *arg) qp->comp.timeout_retry = 0; } - if (qp->req.need_retry) - goto exit; + if (qp->req.need_retry) { + ret = -EAGAIN; + goto done; + } state = COMPST_GET_ACK; @@ -636,8 +640,6 @@ int rxe_completer(void *arg) break; case COMPST_DONE: - if (pkt) - free_pkt(pkt); goto done; case COMPST_EXIT: @@ -660,7 +662,8 @@ int rxe_completer(void *arg) qp->qp_timeout_jiffies) mod_timer(&qp->retrans_timer, jiffies + qp->qp_timeout_jiffies); - goto exit; + ret = -EAGAIN; + goto done; case COMPST_ERROR_RETRY: /* we come here if the retry timer fired and we did @@ -672,18 +675,18 @@ int rxe_completer(void *arg) */ /* there is nothing to retry in this case */ - if (!wqe || (wqe->state == wqe_state_posted)) - goto exit; + if (!wqe || (wqe->state == wqe_state_posted)) { + pr_warn("Retry attempted without a valid wqe\n"); + ret = -EAGAIN; + goto done; + } /* if we've started a retry, don't start another * retry sequence, unless this is a timeout. */ if (qp->comp.started_retry && - !qp->comp.timeout_retry) { - if (pkt) - free_pkt(pkt); + !qp->comp.timeout_retry) goto done; - } if (qp->comp.retry_cnt > 0) { if (qp->comp.retry_cnt != 7) @@ -704,8 +707,6 @@ int rxe_completer(void *arg) qp->comp.started_retry = 1; rxe_run_task(&qp->req.task, 0); } - if (pkt) - free_pkt(pkt); goto done; } else { @@ -726,8 +727,8 @@ int rxe_completer(void *arg) mod_timer(&qp->rnr_nak_timer, jiffies + rnrnak_jiffies(aeth_syn(pkt) & ~AETH_TYPE_MASK)); - free_pkt(pkt); - goto exit; + ret = -EAGAIN; + goto done; } else { rxe_counter_inc(rxe, RXE_CNT_RNR_RETRY_EXCEEDED); @@ -740,25 +741,15 @@ int rxe_completer(void *arg) WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS); do_complete(qp, wqe); rxe_qp_error(qp); - if (pkt) - free_pkt(pkt); - goto exit; + ret = -EAGAIN; + goto done; } } -exit: - /* we come here if we are done with processing and want the task to - * exit from the loop calling us - */ - WARN_ON_ONCE(skb); - rxe_drop_ref(qp); - return -EAGAIN; - done: - /* we come here if we have processed a packet we want the task to call - * us again to see if there is anything else to do - */ - WARN_ON_ONCE(skb); + if (pkt) + free_pkt(pkt); rxe_drop_ref(qp); - return 0; + + return ret; } diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 0701bd1ffd1a..01662727dca0 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -407,14 +407,22 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb) return 0; } +/* fix up a send packet to match the packets + * received from UDP before looping them back + */ void rxe_loopback(struct sk_buff *skb) { + struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); + if (skb->protocol == htons(ETH_P_IP)) skb_pull(skb, sizeof(struct iphdr)); else skb_pull(skb, sizeof(struct ipv6hdr)); - rxe_rcv(skb); + if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) + kfree_skb(skb); + else + rxe_rcv(skb); } struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index 45d2f711bce2..7a49e27da23a 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -237,8 +237,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) struct rxe_mc_elem *mce; struct rxe_qp *qp; union ib_gid dgid; - struct sk_buff *per_qp_skb; - struct rxe_pkt_info *per_qp_pkt; int err; if (skb->protocol == htons(ETH_P_IP)) @@ -250,10 +248,15 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) /* lookup mcast group corresponding to mgid, takes a ref */ mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid); if (!mcg) - goto err1; /* mcast group not registered */ + goto drop; /* mcast group not registered */ spin_lock_bh(&mcg->mcg_lock); + /* this is unreliable datagram service so we let + * failures to deliver a multicast packet to a + * single QP happen and just move on and try + * the rest of them on the list + */ list_for_each_entry(mce, &mcg->qp_list, qp_list) { qp = mce->qp; @@ -266,39 +269,47 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) if (err) continue; - /* for all but the last qp create a new clone of the - * skb and pass to the qp. If an error occurs in the - * checks for the last qp in the list we need to - * free the skb since it hasn't been passed on to - * rxe_rcv_pkt() which would free it later. + /* for all but the last QP create a new clone of the + * skb and pass to the QP. Pass the original skb to + * the last QP in the list. */ if (mce->qp_list.next != &mcg->qp_list) { - per_qp_skb = skb_clone(skb, GFP_ATOMIC); - if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) { - kfree_skb(per_qp_skb); + struct sk_buff *cskb; + struct rxe_pkt_info *cpkt; + + cskb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!cskb)) continue; + + if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) { + kfree_skb(cskb); + break; } + + cpkt = SKB_TO_PKT(cskb); + cpkt->qp = qp; + rxe_add_ref(qp); + rxe_rcv_pkt(cpkt, cskb); } else { - per_qp_skb = skb; - /* show we have consumed the skb */ - skb = NULL; + pkt->qp = qp; + rxe_add_ref(qp); + rxe_rcv_pkt(pkt, skb); + skb = NULL; /* mark consumed */ } - - if (unlikely(!per_qp_skb)) - continue; - - per_qp_pkt = SKB_TO_PKT(per_qp_skb); - per_qp_pkt->qp = qp; - rxe_add_ref(qp); - rxe_rcv_pkt(per_qp_pkt, per_qp_skb); } spin_unlock_bh(&mcg->mcg_lock); rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */ -err1: - /* free skb if not consumed */ + if (likely(!skb)) + return; + + /* This only occurs if one of the checks fails on the last + * QP in the list above + */ + +drop: kfree_skb(skb); ib_device_put(&rxe->ib_dev); } diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 430dc6975004..da8963a9f044 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -26,7 +26,6 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Joystick device interfaces"); -MODULE_SUPPORTED_DEVICE("input/js"); MODULE_LICENSE("GPL"); #define JOYDEV_MINOR_BASE 0 diff --git a/drivers/interconnect/bulk.c b/drivers/interconnect/bulk.c index 73e2c8d0a412..448cc536aa79 100644 --- a/drivers/interconnect/bulk.c +++ b/drivers/interconnect/bulk.c @@ -53,7 +53,7 @@ void icc_bulk_put(int num_paths, struct icc_bulk_data *paths) EXPORT_SYMBOL_GPL(icc_bulk_put); /** - * icc_bulk_set() - set bandwidth to a set of paths + * icc_bulk_set_bw() - set bandwidth to a set of paths * @num_paths: the number of icc_bulk_data * @paths: the icc_bulk_data table containing the paths and bandwidth * diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 5ad519c9f239..8a1e70e00876 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -942,6 +942,8 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst) GFP_KERNEL); if (new) src->links = new; + else + ret = -ENOMEM; out: mutex_unlock(&icc_lock); diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c index dfbec30ed149..20f31a1b4192 100644 --- a/drivers/interconnect/qcom/msm8939.c +++ b/drivers/interconnect/qcom/msm8939.c @@ -131,7 +131,7 @@ DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8939_MASTER_SDCC_1, 8, -1, -1, MSM8939_PNOC_IN DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8939_MASTER_SDCC_2, 8, -1, -1, MSM8939_PNOC_INT_1); DEFINE_QNODE(mas_qdss_bam, MSM8939_MASTER_QDSS_BAM, 8, -1, -1, MSM8939_SNOC_QDSS_INT); DEFINE_QNODE(mas_qdss_etr, MSM8939_MASTER_QDSS_ETR, 8, -1, -1, MSM8939_SNOC_QDSS_INT); -DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, 20, -1, MSM8939_SLAVE_SRVC_SNOC); +DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, -1, -1, MSM8939_SLAVE_SRVC_SNOC); DEFINE_QNODE(mas_spdm, MSM8939_MASTER_SPDM, 4, -1, -1, MSM8939_PNOC_MAS_0); DEFINE_QNODE(mas_tcu0, MSM8939_MASTER_TCU0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2); DEFINE_QNODE(mas_usb_hs1, MSM8939_MASTER_USB_HS1, 4, -1, -1, MSM8939_PNOC_MAS_1); @@ -156,14 +156,14 @@ DEFINE_QNODE(pcnoc_snoc_mas, MSM8939_PNOC_SNOC_MAS, 8, 29, -1, MSM8939_PNOC_SNOC DEFINE_QNODE(pcnoc_snoc_slv, MSM8939_PNOC_SNOC_SLV, 8, -1, 45, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC, MSM8939_SNOC_INT_1); DEFINE_QNODE(qdss_int, MSM8939_SNOC_QDSS_INT, 8, -1, -1, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC); DEFINE_QNODE(slv_apps_l2, MSM8939_SLAVE_AMPSS_L2, 16, -1, -1, 0); -DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, 20, 0); +DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, -1, 0); DEFINE_QNODE(slv_audio, MSM8939_SLAVE_LPASS, 4, -1, -1, 0); DEFINE_QNODE(slv_bimc_cfg, MSM8939_SLAVE_BIMC_CFG, 4, -1, -1, 0); DEFINE_QNODE(slv_blsp_1, MSM8939_SLAVE_BLSP_1, 4, -1, -1, 0); DEFINE_QNODE(slv_boot_rom, MSM8939_SLAVE_BOOT_ROM, 4, -1, -1, 0); DEFINE_QNODE(slv_camera_cfg, MSM8939_SLAVE_CAMERA_CFG, 4, -1, -1, 0); -DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, 106, 0); -DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, 107, 0); +DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, -1, 0); +DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, -1, 0); DEFINE_QNODE(slv_clk_ctl, MSM8939_SLAVE_CLK_CTL, 4, -1, -1, 0); DEFINE_QNODE(slv_crypto_0_cfg, MSM8939_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0); DEFINE_QNODE(slv_dehr_cfg, MSM8939_SLAVE_DEHR_CFG, 4, -1, -1, 0); @@ -187,20 +187,20 @@ DEFINE_QNODE(slv_sdcc_2, MSM8939_SLAVE_SDCC_2, 4, -1, -1, 0); DEFINE_QNODE(slv_security, MSM8939_SLAVE_SECURITY, 4, -1, -1, 0); DEFINE_QNODE(slv_snoc_cfg, MSM8939_SLAVE_SNOC_CFG, 4, -1, -1, 0); DEFINE_QNODE(slv_spdm, MSM8939_SLAVE_SPDM, 4, -1, -1, 0); -DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, 29, 0); +DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, -1, 0); DEFINE_QNODE(slv_tcsr, MSM8939_SLAVE_TCSR, 4, -1, -1, 0); DEFINE_QNODE(slv_tlmm, MSM8939_SLAVE_TLMM, 4, -1, -1, 0); DEFINE_QNODE(slv_usb_hs1, MSM8939_SLAVE_USB_HS1, 4, -1, -1, 0); DEFINE_QNODE(slv_usb_hs2, MSM8939_SLAVE_USB_HS2, 4, -1, -1, 0); DEFINE_QNODE(slv_venus_cfg, MSM8939_SLAVE_VENUS_CFG, 4, -1, -1, 0); -DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, 3, -1, MSM8939_SNOC_BIMC_0_SLV); -DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, 24, MSM8939_SLAVE_EBI_CH0); +DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_0_SLV); +DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0); DEFINE_QNODE(snoc_bimc_1_mas, MSM8939_SNOC_BIMC_1_MAS, 16, 76, -1, MSM8939_SNOC_BIMC_1_SLV); DEFINE_QNODE(snoc_bimc_1_slv, MSM8939_SNOC_BIMC_1_SLV, 16, -1, 104, MSM8939_SLAVE_EBI_CH0); DEFINE_QNODE(snoc_bimc_2_mas, MSM8939_SNOC_BIMC_2_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_2_SLV); DEFINE_QNODE(snoc_bimc_2_slv, MSM8939_SNOC_BIMC_2_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0); DEFINE_QNODE(snoc_int_0, MSM8939_SNOC_INT_0, 8, 99, 130, MSM8939_SLAVE_QDSS_STM, MSM8939_SLAVE_IMEM, MSM8939_SNOC_PNOC_MAS); -DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, 100, 131, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64); +DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, -1, -1, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64); DEFINE_QNODE(snoc_int_bimc, MSM8939_SNOC_INT_BIMC, 8, 101, 132, MSM8939_SNOC_BIMC_1_MAS); DEFINE_QNODE(snoc_pcnoc_mas, MSM8939_SNOC_PNOC_MAS, 8, -1, -1, MSM8939_SNOC_PNOC_SLV); DEFINE_QNODE(snoc_pcnoc_slv, MSM8939_SNOC_PNOC_SLV, 8, -1, -1, MSM8939_PNOC_INT_0); diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 9126efcbaf2c..321f5906e6ed 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -2714,7 +2714,6 @@ static int __init early_amd_iommu_init(void) struct acpi_table_header *ivrs_base; int i, remap_cache_sz, ret; acpi_status status; - u32 pci_id; if (!amd_iommu_detected) return -ENODEV; @@ -2804,16 +2803,6 @@ static int __init early_amd_iommu_init(void) if (ret) goto out; - /* Disable IOMMU if there's Stoney Ridge graphics */ - for (i = 0; i < 32; i++) { - pci_id = read_pci_config(0, i, 0, 0); - if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { - pr_info("Disable IOMMU on Stoney Ridge\n"); - amd_iommu_disabled = true; - break; - } - } - /* Disable any previously enabled IOMMUs */ if (!is_kdump_kernel() || amd_iommu_disabled) disable_iommus(); @@ -2880,6 +2869,7 @@ static bool detect_ivrs(void) { struct acpi_table_header *ivrs_base; acpi_status status; + int i; status = acpi_get_table("IVRS", 0, &ivrs_base); if (status == AE_NOT_FOUND) @@ -2892,6 +2882,17 @@ static bool detect_ivrs(void) acpi_put_table(ivrs_base); + /* Don't use IOMMU if there is Stoney Ridge graphics */ + for (i = 0; i < 32; i++) { + u32 pci_id; + + pci_id = read_pci_config(0, i, 0, 0); + if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { + pr_info("Disable IOMMU on Stoney Ridge\n"); + return false; + } + } + /* Make sure ACS will be enabled during PCI probe */ pci_request_acs(); @@ -2918,12 +2919,12 @@ static int __init state_next(void) } break; case IOMMU_IVRS_DETECTED: - ret = early_amd_iommu_init(); - init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; - if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { - pr_info("AMD IOMMU disabled\n"); + if (amd_iommu_disabled) { init_state = IOMMU_CMDLINE_DISABLED; ret = -EINVAL; + } else { + ret = early_amd_iommu_init(); + init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; } break; case IOMMU_ACPI_FINISHED: @@ -3001,8 +3002,11 @@ int __init amd_iommu_prepare(void) amd_iommu_irq_remap = true; ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); - if (ret) + if (ret) { + amd_iommu_irq_remap = false; return ret; + } + return amd_iommu_irq_remap ? 0 : -ENODEV; } diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 1c4961e05c12..bb0ee5c9fde7 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -182,6 +182,10 @@ static bool increase_address_space(struct protection_domain *domain, bool ret = true; u64 *pte; + pte = (void *)get_zeroed_page(gfp); + if (!pte) + return false; + spin_lock_irqsave(&domain->lock, flags); if (address <= PM_LEVEL_SIZE(domain->iop.mode)) @@ -191,10 +195,6 @@ static bool increase_address_space(struct protection_domain *domain, if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL)) goto out; - pte = (void *)get_zeroed_page(gfp); - if (!pte) - goto out; - *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root)); domain->iop.root = pte; @@ -208,10 +208,12 @@ static bool increase_address_space(struct protection_domain *domain, */ amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode); + pte = NULL; ret = true; out: spin_unlock_irqrestore(&domain->lock, flags); + free_page((unsigned long)pte); return ret; } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9ab6ee22c110..af765c813cc8 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -311,6 +311,11 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) domain->ops->flush_iotlb_all(domain); } +static bool dev_is_untrusted(struct device *dev) +{ + return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; +} + /** * iommu_dma_init_domain - Initialise a DMA mapping domain * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() @@ -365,8 +370,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, init_iova_domain(iovad, 1UL << order, base_pfn); - if (!cookie->fq_domain && !iommu_domain_get_attr(domain, - DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { + if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) && + !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && + attr) { if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, iommu_dma_entry_dtor)) pr_warn("iova flush queue initialization failed\n"); @@ -508,11 +514,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, iova_align(iovad, size), dir, attrs); } -static bool dev_is_untrusted(struct device *dev) -{ - return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; -} - static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t size, int prot, u64 dma_mask) { diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 97dfcffbf495..444c0bec221a 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -30,8 +30,8 @@ #define VCMD_VRSP_IP 0x1 #define VCMD_VRSP_SC(e) (((e) >> 1) & 0x3) #define VCMD_VRSP_SC_SUCCESS 0 -#define VCMD_VRSP_SC_NO_PASID_AVAIL 1 -#define VCMD_VRSP_SC_INVALID_PASID 1 +#define VCMD_VRSP_SC_NO_PASID_AVAIL 2 +#define VCMD_VRSP_SC_INVALID_PASID 2 #define VCMD_VRSP_RESULT_PASID(e) (((e) >> 8) & 0xfffff) #define VCMD_CMD_OPERAND(e) ((e) << 8) /* diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 4a3f095a1c26..602aab98c079 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -798,10 +798,69 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova); } +static struct tegra_smmu *tegra_smmu_find(struct device_node *np) +{ + struct platform_device *pdev; + struct tegra_mc *mc; + + pdev = of_find_device_by_node(np); + if (!pdev) + return NULL; + + mc = platform_get_drvdata(pdev); + if (!mc) + return NULL; + + return mc->smmu; +} + +static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, + struct of_phandle_args *args) +{ + const struct iommu_ops *ops = smmu->iommu.ops; + int err; + + err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops); + if (err < 0) { + dev_err(dev, "failed to initialize fwspec: %d\n", err); + return err; + } + + err = ops->of_xlate(dev, args); + if (err < 0) { + dev_err(dev, "failed to parse SW group ID: %d\n", err); + iommu_fwspec_free(dev); + return err; + } + + return 0; +} + static struct iommu_device *tegra_smmu_probe_device(struct device *dev) { - struct tegra_smmu *smmu = dev_iommu_priv_get(dev); + struct device_node *np = dev->of_node; + struct tegra_smmu *smmu = NULL; + struct of_phandle_args args; + unsigned int index = 0; + int err; + + while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, + &args) == 0) { + smmu = tegra_smmu_find(args.np); + if (smmu) { + err = tegra_smmu_configure(smmu, dev, &args); + if (err < 0) { + of_node_put(args.np); + return ERR_PTR(err); + } + } + + of_node_put(args.np); + index++; + } + + smmu = dev_iommu_priv_get(dev); if (!smmu) return ERR_PTR(-ENODEV); @@ -1028,6 +1087,16 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, if (!smmu) return ERR_PTR(-ENOMEM); + /* + * This is a bit of a hack. Ideally we'd want to simply return this + * value. However the IOMMU registration process will attempt to add + * all devices to the IOMMU when bus_set_iommu() is called. In order + * not to rely on global variables to track the IOMMU instance, we + * set it here so that it can be looked up from the .probe_device() + * callback via the IOMMU device's .drvdata field. + */ + mc->smmu = smmu; + size = BITS_TO_LONGS(soc->num_asids) * sizeof(long); smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index e74fa206240a..15536e321df5 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -8,7 +8,6 @@ config IRQCHIP config ARM_GIC bool select IRQ_DOMAIN_HIERARCHY - select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_EFFECTIVE_AFF_MASK config ARM_GIC_PM @@ -33,7 +32,6 @@ config GIC_NON_BANKED config ARM_GIC_V3 bool - select GENERIC_IRQ_MULTI_HANDLER select IRQ_DOMAIN_HIERARCHY select PARTITION_PERCPU select GENERIC_IRQ_EFFECTIVE_AFF_MASK @@ -64,7 +62,6 @@ config ARM_NVIC config ARM_VIC bool select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER config ARM_VIC_NR int @@ -99,14 +96,12 @@ config ATMEL_AIC_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config ATMEL_AIC5_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config I8259 @@ -153,7 +148,6 @@ config DW_APB_ICTL config FARADAY_FTINTC010 bool select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config HISILICON_IRQ_MBIGEN @@ -169,7 +163,6 @@ config IMGPDC_IRQ config IXP4XX_IRQ bool select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config MADERA_IRQ @@ -186,7 +179,6 @@ config CLPS711X_IRQCHIP bool depends on ARCH_CLPS711X select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ default y @@ -205,7 +197,6 @@ config OMAP_IRQCHIP config ORION_IRQCHIP bool select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER config PIC32_EVIC bool diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c index 7a7222d4c19c..b938d1d04d96 100644 --- a/drivers/irqchip/irq-ingenic-tcu.c +++ b/drivers/irqchip/irq-ingenic-tcu.c @@ -179,5 +179,6 @@ err_free_tcu: } IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init); +IRQCHIP_DECLARE(jz4760_tcu_irq, "ingenic,jz4760-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init); diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index b61a8901ef72..ea36bb00be80 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -155,6 +155,7 @@ static int __init intc_2chip_of_init(struct device_node *node, { return ingenic_intc_of_init(node, 2); } +IRQCHIP_DECLARE(jz4760_intc, "ingenic,jz4760-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4770_intc, "ingenic,jz4770-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4775_intc, "ingenic,jz4775-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4780_intc, "ingenic,jz4780-intc", intc_2chip_of_init); diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 7168778fbbe1..cb0afe897162 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -721,7 +721,7 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb) * Return value: CAPI result code */ -u16 capi20_get_manufacturer(u32 contr, u8 *buf) +u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]) { struct capi_ctr *ctr; u16 ret; @@ -787,7 +787,7 @@ u16 capi20_get_version(u32 contr, struct capi_version *verp) * Return value: CAPI result code */ -u16 capi20_get_serial(u32 contr, u8 *serial) +u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]) { struct capi_ctr *ctr; u16 ret; diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index ec475087fbf9..39f841b42488 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -694,7 +694,7 @@ isac_release(struct isac_hw *isac) { if (isac->type & IPAC_TYPE_ISACX) WriteISAC(isac, ISACX_MASK, 0xff); - else + else if (isac->type != 0) WriteISAC(isac, ISAC_MASK, 0xff); if (isac->dch.timer.function != NULL) { del_timer(&isac->dch.timer); diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c index d2ab6ab080ac..f62db7e520b5 100644 --- a/drivers/leds/trigger/ledtrig-tty.c +++ b/drivers/leds/trigger/ledtrig-tty.c @@ -51,10 +51,8 @@ static ssize_t ttyname_store(struct device *dev, if (size) { ttyname = kmemdup_nul(buf, size, GFP_KERNEL); - if (!ttyname) { - ret = -ENOMEM; - goto out_unlock; - } + if (!ttyname) + return -ENOMEM; } else { ttyname = NULL; } @@ -69,7 +67,6 @@ static ssize_t ttyname_store(struct device *dev, trigger_data->ttyname = ttyname; -out_unlock: mutex_unlock(&trigger_data->mutex); if (ttyname && !running) @@ -125,12 +122,12 @@ static void ledtrig_tty_work(struct work_struct *work) if (icount.rx != trigger_data->rx || icount.tx != trigger_data->tx) { - led_set_brightness(trigger_data->led_cdev, LED_ON); + led_set_brightness_sync(trigger_data->led_cdev, LED_ON); trigger_data->rx = icount.rx; trigger_data->tx = icount.tx; } else { - led_set_brightness(trigger_data->led_cdev, LED_OFF); + led_set_brightness_sync(trigger_data->led_cdev, LED_OFF); } out: diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 71691f32959b..03e1fe4de53d 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -965,7 +965,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, q->limits.max_hw_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX; q->limits.max_segment_size = UINT_MAX; - q->limits.max_segments = BIO_MAX_PAGES; + q->limits.max_segments = BIO_MAX_VECS; blk_queue_max_discard_sectors(q, UINT_MAX); q->limits.discard_granularity = 512; q->limits.io_min = block_size; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index fce4cbf9529d..50f3e673729c 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1526,6 +1526,10 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size); sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) { sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT; + if (s >= c->start) + s -= c->start; + else + s = 0; if (likely(c->sectors_per_block_bits >= 0)) s >>= c->sectors_per_block_bits; else diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 11c105ecd165..b0ab080f2567 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -229,7 +229,7 @@ static DEFINE_SPINLOCK(dm_crypt_clients_lock); static unsigned dm_crypt_clients_n = 0; static volatile unsigned long dm_crypt_pages_per_client; #define DM_CRYPT_MEMORY_PERCENT 2 -#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16) +#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16) static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); @@ -3246,7 +3246,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size, ARCH_KMALLOC_MINALIGN); - ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc); + ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc); if (ret) { ti->error = "Cannot allocate page mempool"; goto bad; @@ -3373,9 +3373,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) /* * Check if bio is too large, split as needed. */ - if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) && + if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) && (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size)) - dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT)); + dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT)); /* * Ensure that bio is a multiple of internal sector encryption size diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 5e306bba4375..1ca65b434f1f 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -529,7 +529,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ * Grab our output buffer. */ nl = orig_nl = get_result_buffer(param, param_size, &len); - if (len < needed) { + if (len < needed || len < sizeof(nl->dev)) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 95391f78b8d5..e5f0f1703c5d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1594,6 +1594,13 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, return blk_queue_zoned_model(q) != *zoned_model; } +/* + * Check the device zoned model based on the target feature flag. If the target + * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are + * also accepted but all devices must have the same zoned model. If the target + * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any + * zoned model with all zoned devices having the same zone size. + */ static bool dm_table_supports_zoned_model(struct dm_table *t, enum blk_zoned_model zoned_model) { @@ -1603,13 +1610,15 @@ static bool dm_table_supports_zoned_model(struct dm_table *t, for (i = 0; i < dm_table_get_num_targets(t); i++) { ti = dm_table_get_target(t, i); - if (zoned_model == BLK_ZONED_HM && - !dm_target_supports_zoned_hm(ti->type)) - return false; - - if (!ti->type->iterate_devices || - ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model)) - return false; + if (dm_target_supports_zoned_hm(ti->type)) { + if (!ti->type->iterate_devices || + ti->type->iterate_devices(ti, device_not_zoned_model, + &zoned_model)) + return false; + } else if (!dm_target_supports_mixed_zoned_model(ti->type)) { + if (zoned_model == BLK_ZONED_HM) + return false; + } } return true; @@ -1621,9 +1630,17 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev * struct request_queue *q = bdev_get_queue(dev->bdev); unsigned int *zone_sectors = data; + if (!blk_queue_is_zoned(q)) + return 0; + return blk_queue_zone_sectors(q) != *zone_sectors; } +/* + * Check consistency of zoned model and zone sectors across all targets. For + * zone sectors, if the destination device is a zoned block device, it shall + * have the specified zone_sectors. + */ static int validate_hardware_zoned_model(struct dm_table *table, enum blk_zoned_model zoned_model, unsigned int zone_sectors) @@ -1642,7 +1659,7 @@ static int validate_hardware_zoned_model(struct dm_table *table, return -EINVAL; if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { - DMERR("%s: zone sectors is not consistent across all devices", + DMERR("%s: zone sectors is not consistent across all zoned devices", dm_device_name(table->md)); return -EINVAL; } diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index fb41b4f23c48..66f4c6398f67 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -61,19 +61,18 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio, static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index, unsigned *offset, struct dm_buffer **buf) { - u64 position, block; + u64 position, block, rem; u8 *res; position = (index + rsb) * v->fec->roots; - block = position >> v->data_dev_block_bits; - *offset = (unsigned)(position - (block << v->data_dev_block_bits)); + block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem); + *offset = (unsigned)rem; - res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf); + res = dm_bufio_read(v->fec->bufio, block, buf); if (IS_ERR(res)) { DMERR("%s: FEC %llu: parity read failed (block %llu): %ld", v->data_dev->name, (unsigned long long)rsb, - (unsigned long long)(v->fec->start + block), - PTR_ERR(res)); + (unsigned long long)block, PTR_ERR(res)); *buf = NULL; } @@ -155,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, /* read the next block when we run out of parity bytes */ offset += v->fec->roots; - if (offset >= 1 << v->data_dev_block_bits) { + if (offset >= v->fec->roots << SECTOR_SHIFT) { dm_bufio_release(buf); par = fec_read_parity(v, rsb, block_offset, &offset, &buf); @@ -674,7 +673,7 @@ int verity_fec_ctr(struct dm_verity *v) { struct dm_verity_fec *f = v->fec; struct dm_target *ti = v->ti; - u64 hash_blocks; + u64 hash_blocks, fec_blocks; int ret; if (!verity_fec_is_enabled(v)) { @@ -744,15 +743,17 @@ int verity_fec_ctr(struct dm_verity *v) } f->bufio = dm_bufio_client_create(f->dev->bdev, - 1 << v->data_dev_block_bits, + f->roots << SECTOR_SHIFT, 1, 0, NULL, NULL); if (IS_ERR(f->bufio)) { ti->error = "Cannot initialize FEC bufio client"; return PTR_ERR(f->bufio); } - if (dm_bufio_get_device_size(f->bufio) < - ((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) { + dm_bufio_set_sector_offset(f->bufio, f->start << (v->data_dev_block_bits - SECTOR_SHIFT)); + + fec_blocks = div64_u64(f->rounds * f->roots, v->fec->roots << SECTOR_SHIFT); + if (dm_bufio_get_device_size(f->bufio) < fec_blocks) { ti->error = "FEC device is too small"; return -E2BIG; } diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 6b8e5bdd8526..808a98ef624c 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -34,7 +34,7 @@ #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks" #define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once" -#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC + \ +#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \ DM_VERITY_ROOT_HASH_VERIFICATION_OPTS) static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE; diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 844c4be11768..4f72b6f66c3a 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -1892,10 +1892,10 @@ restart: list_add(&g->lru, &wbl.list); wbl.size++; g->write_in_progress = true; - g->wc_list_contiguous = BIO_MAX_PAGES; + g->wc_list_contiguous = BIO_MAX_VECS; f = g; e->wc_list_contiguous++; - if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) { + if (unlikely(e->wc_list_contiguous == BIO_MAX_VECS)) { if (unlikely(wc->writeback_all)) { next_node = rb_next(&f->rb_node); if (likely(next_node)) diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 697f9de37355..7e88df64d197 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -1143,7 +1143,7 @@ static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv, static struct target_type dmz_type = { .name = "zoned", .version = {2, 0, 0}, - .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM, + .features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL, .module = THIS_MODULE, .ctr = dmz_ctr, .dtr = dmz_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 50b693d776d6..3f3be9408afa 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2036,7 +2036,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, if (size != dm_get_size(md)) memset(&md->geometry, 0, sizeof(md->geometry)); - set_capacity_and_notify(md->disk, size); + if (!get_capacity(md->disk)) + set_capacity(md->disk, size); + else + set_capacity_and_notify(md->disk, size); dm_table_event_callback(t, event_callback, md); diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 4337ae0e6af2..0b5dcaabbc15 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -735,7 +735,7 @@ static void r5l_submit_current_io(struct r5l_log *log) static struct bio *r5l_bio_alloc(struct r5l_log *log) { - struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, &log->bs); + struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &log->bs); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_dev(bio, log->rdev->bdev); @@ -1634,7 +1634,7 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, { struct page *page; - ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, &log->bs); + ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_VECS, &log->bs); if (!ctx->ra_bio) return -ENOMEM; diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index e8c118e05dfd..3ddc2aa0b530 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -496,7 +496,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) { struct bio *prev = bio; - bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, + bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &ppl_conf->bs); bio->bi_opf = prev->bi_opf; bio->bi_write_hint = prev->bi_write_hint; diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c index 8a8585261bb8..5f6e97a8d1c0 100644 --- a/drivers/media/firewire/firedtv-fw.c +++ b/drivers/media/firewire/firedtv-fw.c @@ -430,4 +430,3 @@ MODULE_AUTHOR("Andreas Monitzer <andy@monitzer.com>"); MODULE_AUTHOR("Ben Backx <ben@bbackx.com>"); MODULE_DESCRIPTION("FireDTV DVB Driver"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("FireDTV DVB"); diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c index 692b95a685d1..9a82e68303b6 100644 --- a/drivers/media/pci/cx18/cx18-alsa-main.c +++ b/drivers/media/pci/cx18/cx18-alsa-main.c @@ -41,7 +41,6 @@ MODULE_PARM_DESC(debug, MODULE_AUTHOR("Andy Walls"); MODULE_DESCRIPTION("CX23418 ALSA Interface"); -MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder"); MODULE_LICENSE("GPL"); MODULE_VERSION(CX18_VERSION); diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c index 95aed00f353b..f2440eb38820 100644 --- a/drivers/media/pci/cx18/cx18-driver.c +++ b/drivers/media/pci/cx18/cx18-driver.c @@ -232,7 +232,6 @@ MODULE_PARM_DESC(cx18_first_minor, MODULE_AUTHOR("Hans Verkuil"); MODULE_DESCRIPTION("CX23418 driver"); -MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder"); MODULE_LICENSE("GPL"); MODULE_VERSION(CX18_VERSION); diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c index 608fbaf0f659..8797d85a6b0a 100644 --- a/drivers/media/pci/cx25821/cx25821-alsa.c +++ b/drivers/media/pci/cx25821/cx25821-alsa.c @@ -104,7 +104,6 @@ MODULE_PARM_DESC(index, "Index value for cx25821 capture interface(s)."); MODULE_DESCRIPTION("ALSA driver module for cx25821 based capture cards"); MODULE_AUTHOR("Hiep Huynh"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("{{Conexant,25821}"); /* "{{Conexant,23881}," */ static unsigned int debug; module_param(debug, int, 0644); diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index 95e0cbb1277d..c83814c052d3 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c @@ -98,7 +98,6 @@ MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(CX88_VERSION); -MODULE_SUPPORTED_DEVICE("{{Conexant,23881},{{Conexant,23882},{{Conexant,23883}"); static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c index 39029b8e12c9..4cefdb2e4d40 100644 --- a/drivers/media/pci/ivtv/ivtv-alsa-main.c +++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c @@ -38,7 +38,6 @@ MODULE_PARM_DESC(index, MODULE_AUTHOR("Andy Walls"); MODULE_DESCRIPTION("CX23415/CX23416 ALSA Interface"); -MODULE_SUPPORTED_DEVICE("CX23415/CX23416 MPEG2 encoder"); MODULE_LICENSE("GPL"); MODULE_VERSION(IVTV_VERSION); diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index 6e448cb3b51c..942b8c266f50 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -275,9 +275,6 @@ MODULE_PARM_DESC(ivtv_first_minor, "Set device node number assigned to first car MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil"); MODULE_DESCRIPTION("CX23415/CX23416 driver"); -MODULE_SUPPORTED_DEVICE - ("CX23415/CX23416 MPEG2 encoder (WinTV PVR-150/250/350/500,\n" - "\t\t\tYuan MPG series and similar)"); MODULE_LICENSE("GPL"); MODULE_VERSION(IVTV_VERSION); diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c index 336df65c8af1..524912f20d9f 100644 --- a/drivers/media/pci/sta2x11/sta2x11_vip.c +++ b/drivers/media/pci/sta2x11/sta2x11_vip.c @@ -1269,6 +1269,5 @@ late_initcall_sync(sta2x11_vip_init_module); MODULE_DESCRIPTION("STA2X11 Video Input Port driver"); MODULE_AUTHOR("Wind River"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("sta2x11 video input"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, sta2x11_vip_pci_tbl); diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c index 0514be6153df..e392b3efe363 100644 --- a/drivers/media/platform/atmel/atmel-isi.c +++ b/drivers/media/platform/atmel/atmel-isi.c @@ -1363,4 +1363,3 @@ module_platform_driver(atmel_isi_driver); MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>"); MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("video"); diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c index 0b78fecfd2a8..61d9885765f4 100644 --- a/drivers/media/platform/atmel/atmel-sama5d2-isc.c +++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c @@ -330,4 +330,3 @@ module_platform_driver(atmel_isc_driver); MODULE_AUTHOR("Songjun Wu"); MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("video"); diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c index 9c94a8b58b7c..baac86f3d153 100644 --- a/drivers/media/platform/marvell-ccic/cafe-driver.c +++ b/drivers/media/platform/marvell-ccic/cafe-driver.c @@ -44,10 +44,6 @@ MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>"); MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("Video"); - - - struct cafe_camera { int registered; /* Fully initialized? */ diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c index aa5f45749543..a60c302ef267 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c @@ -1288,7 +1288,6 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params) memset(hst.hist_weight, 0x01, sizeof(hst.hist_weight)); rkisp1_hst_config(params, &hst); rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP, - ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK | rkisp1_hst_params_default_config.mode); /* set the range */ diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index bbcc2254fa2e..d9b4ad0abf0c 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c @@ -2149,4 +2149,3 @@ MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>"); MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>"); MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("video"); diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index 86d5e3f4b1ff..06f74d410973 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -245,7 +245,7 @@ static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1, brx = &vsp1->bru->entity; else if (pipe->brx && !drm_pipe->force_brx_release) brx = pipe->brx; - else if (!vsp1->bru->entity.pipe) + else if (vsp1_feature(vsp1, VSP1_HAS_BRU) && !vsp1->bru->entity.pipe) brx = &vsp1->bru->entity; else brx = &vsp1->brs->entity; @@ -462,9 +462,9 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1, * make sure it is present in the pipeline's list of entities if it * wasn't already. */ - if (!use_uif) { + if (drm_pipe->uif && !use_uif) { drm_pipe->uif->pipe = NULL; - } else if (!drm_pipe->uif->pipe) { + } else if (drm_pipe->uif && !drm_pipe->uif->pipe) { drm_pipe->uif->pipe = pipe; list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities); } diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile index 5bb2932ab119..ff6a8fc4c38e 100644 --- a/drivers/media/rc/Makefile +++ b/drivers/media/rc/Makefile @@ -5,6 +5,7 @@ obj-y += keymaps/ obj-$(CONFIG_RC_CORE) += rc-core.o rc-core-y := rc-main.o rc-ir-raw.o rc-core-$(CONFIG_LIRC) += lirc_dev.o +rc-core-$(CONFIG_MEDIA_CEC_RC) += keymaps/rc-cec.o rc-core-$(CONFIG_BPF_LIRC_MODE2) += bpf-lirc.o obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile index b252a1d2ebd6..cc6662e1903f 100644 --- a/drivers/media/rc/keymaps/Makefile +++ b/drivers/media/rc/keymaps/Makefile @@ -21,7 +21,6 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-behold.o \ rc-behold-columbus.o \ rc-budget-ci-old.o \ - rc-cec.o \ rc-cinergy-1400.o \ rc-cinergy.o \ rc-d680-dmb.o \ diff --git a/drivers/media/rc/keymaps/rc-cec.c b/drivers/media/rc/keymaps/rc-cec.c index 3e3bd11092b4..068e22aeac8c 100644 --- a/drivers/media/rc/keymaps/rc-cec.c +++ b/drivers/media/rc/keymaps/rc-cec.c @@ -1,6 +1,16 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Keytable for the CEC remote control * + * This keymap is unusual in that it can't be built as a module, + * instead it is registered directly in rc-main.c if CONFIG_MEDIA_CEC_RC + * is set. This is because it can be called from drm_dp_cec_set_edid() via + * cec_register_adapter() in an asynchronous context, and it is not + * allowed to use request_module() to load rc-cec.ko in that case. + * + * Since this keymap is only used if CONFIG_MEDIA_CEC_RC is set, we + * just compile this keymap into the rc-core module and never as a + * separate module. + * * Copyright (c) 2015 by Kamil Debski */ @@ -152,7 +162,7 @@ static struct rc_map_table cec[] = { /* 0x77-0xff: Reserved */ }; -static struct rc_map_list cec_map = { +struct rc_map_list cec_map = { .map = { .scan = cec, .size = ARRAY_SIZE(cec), @@ -160,19 +170,3 @@ static struct rc_map_list cec_map = { .name = RC_MAP_CEC, } }; - -static int __init init_rc_map_cec(void) -{ - return rc_map_register(&cec_map); -} - -static void __exit exit_rc_map_cec(void) -{ - rc_map_unregister(&cec_map); -} - -module_init(init_rc_map_cec); -module_exit(exit_rc_map_cec); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Kamil Debski"); diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 1fd62c1dac76..8e88dc8ea6c5 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -2069,6 +2069,9 @@ static int __init rc_core_init(void) led_trigger_register_simple("rc-feedback", &led_feedback); rc_map_register(&empty_map); +#ifdef CONFIG_MEDIA_CEC_RC + rc_map_register(&cec_map); +#endif return 0; } @@ -2078,6 +2081,9 @@ static void __exit rc_core_exit(void) lirc_dev_exit(); class_unregister(&rc_class); led_trigger_unregister_simple(led_feedback); +#ifdef CONFIG_MEDIA_CEC_RC + rc_map_unregister(&cec_map); +#endif rc_map_unregister(&empty_map); } diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index e488e7870f42..69d5c628a797 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -56,7 +56,6 @@ MODULE_PARM_DESC(flicker_mode, "Flicker frequency (0 (disabled), " __stringify(5 MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>"); MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras"); -MODULE_SUPPORTED_DEVICE("video"); MODULE_LICENSE("GPL"); MODULE_VERSION(CPIA_VERSION); diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c index 3a2df36ef1db..a19a46770c2b 100644 --- a/drivers/media/usb/tm6000/tm6000-alsa.c +++ b/drivers/media/usb/tm6000/tm6000-alsa.c @@ -51,7 +51,6 @@ MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s)."); MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards"); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},{{Trident,tm6000},{{Trident,tm6010}"); static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c index 293a460f4616..4990fa886d7a 100644 --- a/drivers/media/usb/tm6000/tm6000-dvb.c +++ b/drivers/media/usb/tm6000/tm6000-dvb.c @@ -23,8 +23,6 @@ MODULE_DESCRIPTION("DVB driver extension module for tm5600/6000/6010 based TV ca MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},{{Trident, tm6000},{{Trident, tm6010}"); - static int debug; module_param(debug, int, 0644); diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c index b57e94fb1977..333bd305a4f9 100644 --- a/drivers/media/usb/usbtv/usbtv-audio.c +++ b/drivers/media/usb/usbtv/usbtv-audio.c @@ -371,7 +371,7 @@ void usbtv_audio_free(struct usbtv *usbtv) cancel_work_sync(&usbtv->snd_trigger); if (usbtv->snd && usbtv->udev) { - snd_card_free(usbtv->snd); + snd_card_free_when_closed(usbtv->snd); usbtv->snd = NULL; } } diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c index fe8ca945f367..b67cb0a3ab05 100644 --- a/drivers/mfd/intel_quark_i2c_gpio.c +++ b/drivers/mfd/intel_quark_i2c_gpio.c @@ -72,7 +72,8 @@ static const struct dmi_system_id dmi_platform_info[] = { {} }; -static const struct resource intel_quark_i2c_res[] = { +/* This is used as a place holder and will be modified at run-time */ +static struct resource intel_quark_i2c_res[] = { [INTEL_QUARK_IORES_MEM] = { .flags = IORESOURCE_MEM, }, @@ -85,7 +86,8 @@ static struct mfd_cell_acpi_match intel_quark_acpi_match_i2c = { .adr = MFD_ACPI_MATCH_I2C, }; -static const struct resource intel_quark_gpio_res[] = { +/* This is used as a place holder and will be modified at run-time */ +static struct resource intel_quark_gpio_res[] = { [INTEL_QUARK_IORES_MEM] = { .flags = IORESOURCE_MEM, }, diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index f12e909034ac..beda610e6b30 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -950,6 +950,11 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (!fl->cctx->rpdev) return -EPIPE; + if (handle == FASTRPC_INIT_HANDLE && !kernel) { + dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle); + return -EPERM; + } + ctx = fastrpc_context_alloc(fl, kernel, sc, args); if (IS_ERR(ctx)) return PTR_ERR(ctx); diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index df847a6d19f4..9f19bee7b592 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -992,7 +992,6 @@ void hl_debugfs_add_device(struct hl_device *hdev) struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; int count = ARRAY_SIZE(hl_debugfs_list); struct hl_debugfs_entry *entry; - struct dentry *ent; int i; dev_entry->hdev = hdev; @@ -1105,13 +1104,11 @@ void hl_debugfs_add_device(struct hl_device *hdev) &hl_security_violations_fops); for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) { - - ent = debugfs_create_file(hl_debugfs_list[i].name, + debugfs_create_file(hl_debugfs_list[i].name, 0444, dev_entry->root, entry, &hl_debugfs_fops); - entry->dent = ent; entry->info_ent = &hl_debugfs_list[i]; entry->dev_entry = dev_entry; } diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 15fcb5c31c4b..334009e83823 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -93,12 +93,19 @@ void hl_hpriv_put(struct hl_fpriv *hpriv) static int hl_device_release(struct inode *inode, struct file *filp) { struct hl_fpriv *hpriv = filp->private_data; + struct hl_device *hdev = hpriv->hdev; + + filp->private_data = NULL; + + if (!hdev) { + pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n"); + put_pid(hpriv->taskpid); + return 0; + } hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); - filp->private_data = NULL; - hl_hpriv_put(hpriv); return 0; @@ -107,15 +114,20 @@ static int hl_device_release(struct inode *inode, struct file *filp) static int hl_device_release_ctrl(struct inode *inode, struct file *filp) { struct hl_fpriv *hpriv = filp->private_data; - struct hl_device *hdev; + struct hl_device *hdev = hpriv->hdev; filp->private_data = NULL; - hdev = hpriv->hdev; + if (!hdev) { + pr_err("Closing FD after device was removed\n"); + goto out; + } mutex_lock(&hdev->fpriv_list_lock); list_del(&hpriv->dev_node); mutex_unlock(&hdev->fpriv_list_lock); +out: + put_pid(hpriv->taskpid); kfree(hpriv); @@ -134,8 +146,14 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp) static int hl_mmap(struct file *filp, struct vm_area_struct *vma) { struct hl_fpriv *hpriv = filp->private_data; + struct hl_device *hdev = hpriv->hdev; unsigned long vm_pgoff; + if (!hdev) { + pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n"); + return -ENODEV; + } + vm_pgoff = vma->vm_pgoff; vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff); @@ -883,6 +901,16 @@ wait_for_processes: return -EBUSY; } +static void device_disable_open_processes(struct hl_device *hdev) +{ + struct hl_fpriv *hpriv; + + mutex_lock(&hdev->fpriv_list_lock); + list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) + hpriv->hdev = NULL; + mutex_unlock(&hdev->fpriv_list_lock); +} + /* * hl_device_reset - reset the device * @@ -1556,8 +1584,10 @@ void hl_device_fini(struct hl_device *hdev) HL_PENDING_RESET_LONG_SEC); rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC); - if (rc) + if (rc) { dev_crit(hdev->dev, "Failed to kill all open processes\n"); + device_disable_open_processes(hdev); + } hl_cb_pool_fini(hdev); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index d933878b24d1..4b321e4f8059 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1465,12 +1465,10 @@ struct hl_info_list { /** * struct hl_debugfs_entry - debugfs dentry wrapper. - * @dent: base debugfs entry structure. * @info_ent: dentry realted ops. * @dev_entry: ASIC specific debugfs manager. */ struct hl_debugfs_entry { - struct dentry *dent; const struct hl_info_list *info_ent; struct hl_dbg_device_entry *dev_entry; }; diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 03af61cecd37..083a30969c5f 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -5,6 +5,8 @@ * All Rights Reserved. */ +#define pr_fmt(fmt) "habanalabs: " fmt + #include <uapi/misc/habanalabs.h> #include "habanalabs.h" @@ -682,6 +684,11 @@ long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) const struct hl_ioctl_desc *ioctl = NULL; unsigned int nr = _IOC_NR(cmd); + if (!hdev) { + pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n"); + return -ENODEV; + } + if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) { ioctl = &hl_ioctls[nr]; } else { @@ -700,6 +707,11 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg) const struct hl_ioctl_desc *ioctl = NULL; unsigned int nr = _IOC_NR(cmd); + if (!hdev) { + pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n"); + return -ENODEV; + } + if (nr == _IOC_NR(HL_IOCTL_INFO)) { ioctl = &hl_ioctls_control[nr]; } else { diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c index de53fb5f978a..44a0522b59b9 100644 --- a/drivers/misc/habanalabs/common/irq.c +++ b/drivers/misc/habanalabs/common/irq.c @@ -47,7 +47,7 @@ inline u32 hl_cq_inc_ptr(u32 ptr) * Increment ptr by 1. If it reaches the number of event queue * entries, set it to 0 */ -inline u32 hl_eq_inc_ptr(u32 ptr) +static inline u32 hl_eq_inc_ptr(u32 ptr) { ptr++; if (unlikely(ptr == HL_EQ_LENGTH)) diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c index 71703a32350f..93c9e5f587e1 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu.c +++ b/drivers/misc/habanalabs/common/mmu/mmu.c @@ -499,18 +499,32 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr, else /* HL_VA_RANGE_TYPE_DRAM */ p = &prop->dmmu; - /* - * find the correct hop shift field in hl_mmu_properties structure - * in order to determine the right maks for the page offset. - */ - hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift); - p = (char *)p + hop0_shift_off; - p = (char *)p + ((hops->used_hops - 1) * sizeof(u64)); - hop_shift = *(u64 *)p; - offset_mask = (1ull << hop_shift) - 1; - addr_mask = ~(offset_mask); - *phys_addr = (tmp_phys_addr & addr_mask) | - (virt_addr & offset_mask); + if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) && + !is_power_of_2(prop->dram_page_size)) { + u32 bit; + u64 page_offset_mask; + u64 phys_addr_mask; + + bit = __ffs64((u64)prop->dram_page_size); + page_offset_mask = ((1ull << bit) - 1); + phys_addr_mask = ~page_offset_mask; + *phys_addr = (tmp_phys_addr & phys_addr_mask) | + (virt_addr & page_offset_mask); + } else { + /* + * find the correct hop shift field in hl_mmu_properties + * structure in order to determine the right masks + * for the page offset. + */ + hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift); + p = (char *)p + hop0_shift_off; + p = (char *)p + ((hops->used_hops - 1) * sizeof(u64)); + hop_shift = *(u64 *)p; + offset_mask = (1ull << hop_shift) - 1; + addr_mask = ~(offset_mask); + *phys_addr = (tmp_phys_addr & addr_mask) | + (virt_addr & offset_mask); + } } int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr) diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index 2d778d0f011e..c0fe3295c330 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c @@ -2288,15 +2288,13 @@ crq_failed: return -EPERM; } -static int ibmvmc_remove(struct vio_dev *vdev) +static void ibmvmc_remove(struct vio_dev *vdev) { struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev); dev_info(adapter->dev, "Entering remove for UA 0x%x\n", vdev->unit_address); ibmvmc_release_crq_queue(adapter); - - return 0; } static struct vio_device_id ibmvmc_device_table[] = { diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 4378a9b25848..2cc370adb238 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -2286,8 +2286,8 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, if (buffer_id == 0) return -EINVAL; - if (!mei_cl_is_connected(cl)) - return -ENODEV; + if (mei_cl_is_connected(cl)) + return -EPROTO; if (cl->dma_mapped) return -EPROTO; @@ -2327,9 +2327,7 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, - cl->dma_mapped || - cl->status || - !mei_cl_is_connected(cl), + cl->dma_mapped || cl->status, mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); @@ -2376,8 +2374,9 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) return -EOPNOTSUPP; } - if (!mei_cl_is_connected(cl)) - return -ENODEV; + /* do not allow unmap for connected client */ + if (mei_cl_is_connected(cl)) + return -EPROTO; if (!cl->dma_mapped) return -EPROTO; @@ -2405,9 +2404,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, - !cl->dma_mapped || - cl->status || - !mei_cl_is_connected(cl), + !cl->dma_mapped || cl->status, mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c index 9f350e05ef68..f1655f5ca016 100644 --- a/drivers/misc/pvpanic.c +++ b/drivers/misc/pvpanic.c @@ -140,6 +140,7 @@ static const struct of_device_id pvpanic_mmio_match[] = { { .compatible = "qemu,pvpanic-mmio", }, {} }; +MODULE_DEVICE_TABLE(of, pvpanic_mmio_match); static const struct acpi_device_id pvpanic_device_ids[] = { { "QEMU0001", 0 }, diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index c2e70b757dd1..4383c262b3f5 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -399,11 +399,6 @@ void mmc_remove_card(struct mmc_card *card) mmc_remove_card_debugfs(card); #endif - if (host->cqe_enabled) { - host->cqe_ops->cqe_disable(host); - host->cqe_enabled = false; - } - if (mmc_card_present(card)) { if (mmc_host_is_spi(card->host)) { pr_info("%s: SPI card removed\n", @@ -416,6 +411,10 @@ void mmc_remove_card(struct mmc_card *card) of_node_put(card->dev.of_node); } + if (host->cqe_enabled) { + host->cqe_ops->cqe_disable(host); + host->cqe_enabled = false; + } + put_device(&card->dev); } - diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 0d80b72ddde8..8741271d3971 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -423,10 +423,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) /* EXT_CSD value is in units of 10ms, but we store in ms */ card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; - /* Some eMMC set the value too low so set a minimum */ - if (card->ext_csd.part_time && - card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) - card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; /* Sleep / awake timeout in 100ns units */ if (sa_shift > 0 && sa_shift <= 0x17) @@ -616,6 +612,17 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) card->ext_csd.data_sector_size = 512; } + /* + * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined + * when accessing a specific field", so use it here if there is no + * PARTITION_SWITCH_TIME. + */ + if (!card->ext_csd.part_time) + card->ext_csd.part_time = card->ext_csd.generic_cmd6_time; + /* Some eMMC set the value too low so set a minimum */ + if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) + card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; + /* eMMC v5 or later */ if (card->ext_csd.rev >= 7) { memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION], diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 17dbc81c221e..984d35055156 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1242,7 +1242,11 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) if (!cmd->busy_timeout) cmd->busy_timeout = 10 * MSEC_PER_SEC; - clks = (unsigned long long)cmd->busy_timeout * host->cclk; + if (cmd->busy_timeout > host->mmc->max_busy_timeout) + clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk; + else + clks = (unsigned long long)cmd->busy_timeout * host->cclk; + do_div(clks, MSEC_PER_SEC); writel_relaxed(clks, host->base + MMCIDATATIMER); } @@ -2151,6 +2155,10 @@ static int mmci_probe(struct amba_device *dev, mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; } + /* Variants with mandatory busy timeout in HW needs R1B responses. */ + if (variant->busy_timeout) + mmc->caps |= MMC_CAP_NEED_RSP_BUSY; + /* Prepare a CMD12 - needed to clear the DPSM on some variants. */ host->stop_abort.opcode = MMC_STOP_TRANSMISSION; host->stop_abort.arg = 0; diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c index eb7258293256..f9cfb084c029 100644 --- a/drivers/mtd/maps/sun_uflash.c +++ b/drivers/mtd/maps/sun_uflash.c @@ -32,7 +32,6 @@ MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets"); -MODULE_SUPPORTED_DEVICE(DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_VERSION("2.1"); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index b09bed554f26..bcd31f458d1a 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -94,7 +94,7 @@ config WIREGUARD select CRYPTO_BLAKE2S_ARM if ARM select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2 - select CRYPTO_POLY1305_MIPS if CPU_MIPS32 || (CPU_MIPS64 && 64BIT) + select CRYPTO_POLY1305_MIPS if MIPS help WireGuard is a secure, fast, and easy to use replacement for IPSec that uses modern cryptography and clever networking tricks. It's diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 8bdc44b7e09a..3c8f665c1558 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -127,6 +127,8 @@ static int com20020pci_probe(struct pci_dev *pdev, int i, ioaddr, ret; struct resource *r; + ret = 0; + if (pci_enable_device(pdev)) return -EIO; @@ -139,6 +141,8 @@ static int com20020pci_probe(struct pci_dev *pdev, priv->ci = ci; mm = &ci->misc_map; + pci_set_drvdata(pdev, priv); + INIT_LIST_HEAD(&priv->list_dev); if (mm->size) { @@ -161,7 +165,7 @@ static int com20020pci_probe(struct pci_dev *pdev, dev = alloc_arcdev(device); if (!dev) { ret = -ENOMEM; - goto out_port; + break; } dev->dev_port = i; @@ -178,7 +182,7 @@ static int com20020pci_probe(struct pci_dev *pdev, pr_err("IO region %xh-%xh already allocated\n", ioaddr, ioaddr + cm->size - 1); ret = -EBUSY; - goto out_port; + goto err_free_arcdev; } /* Dummy access after Reset @@ -216,18 +220,18 @@ static int com20020pci_probe(struct pci_dev *pdev, if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) { pr_err("IO address %Xh is empty!\n", ioaddr); ret = -EIO; - goto out_port; + goto err_free_arcdev; } if (com20020_check(dev)) { ret = -EIO; - goto out_port; + goto err_free_arcdev; } card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev), GFP_KERNEL); if (!card) { ret = -ENOMEM; - goto out_port; + goto err_free_arcdev; } card->index = i; @@ -253,29 +257,29 @@ static int com20020pci_probe(struct pci_dev *pdev, ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); if (ret) - goto out_port; + goto err_free_arcdev; ret = devm_led_classdev_register(&pdev->dev, &card->recon_led); if (ret) - goto out_port; + goto err_free_arcdev; dev_set_drvdata(&dev->dev, card); ret = com20020_found(dev, IRQF_SHARED); if (ret) - goto out_port; + goto err_free_arcdev; devm_arcnet_led_init(dev, dev->dev_id, i); list_add(&card->list, &priv->list_dev); - } + continue; - pci_set_drvdata(pdev, priv); - - return 0; - -out_port: - com20020pci_remove(pdev); +err_free_arcdev: + free_arcdev(dev); + break; + } + if (ret) + com20020pci_remove(pdev); return ret; } diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index ef474bae47a1..6958830cb983 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = { .brp_inc = 1, }; -static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv) -{ - if (priv->device) - pm_runtime_enable(priv->device); -} - -static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv) -{ - if (priv->device) - pm_runtime_disable(priv->device); -} - static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv) { if (priv->device) @@ -1335,7 +1323,6 @@ static const struct net_device_ops c_can_netdev_ops = { int register_c_can_dev(struct net_device *dev) { - struct c_can_priv *priv = netdev_priv(dev); int err; /* Deactivate pins to prevent DRA7 DCAN IP from being @@ -1345,28 +1332,19 @@ int register_c_can_dev(struct net_device *dev) */ pinctrl_pm_select_sleep_state(dev->dev.parent); - c_can_pm_runtime_enable(priv); - dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &c_can_netdev_ops; err = register_candev(dev); - if (err) - c_can_pm_runtime_disable(priv); - else + if (!err) devm_can_led_init(dev); - return err; } EXPORT_SYMBOL_GPL(register_c_can_dev); void unregister_c_can_dev(struct net_device *dev) { - struct c_can_priv *priv = netdev_priv(dev); - unregister_candev(dev); - - c_can_pm_runtime_disable(priv); } EXPORT_SYMBOL_GPL(unregister_c_can_dev); diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index 406b4847e5dc..7efb60b50876 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(dev); + void __iomem *addr = priv->base; unregister_c_can_dev(dev); free_c_can_dev(dev); - pci_iounmap(pdev, priv->base); + pci_iounmap(pdev, addr); pci_disable_msi(pdev); pci_clear_master(pdev); pci_release_regions(pdev); diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 05f425ceb53a..47b251b1607c 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -29,6 +29,7 @@ #include <linux/list.h> #include <linux/io.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/of_device.h> @@ -386,6 +387,7 @@ static int c_can_plat_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); + pm_runtime_enable(priv->device); ret = register_c_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", @@ -398,6 +400,7 @@ static int c_can_plat_probe(struct platform_device *pdev) return 0; exit_free_device: + pm_runtime_disable(priv->device); free_c_can_dev(dev); exit: dev_err(&pdev->dev, "probe failed\n"); @@ -408,9 +411,10 @@ exit: static int c_can_plat_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + struct c_can_priv *priv = netdev_priv(dev); unregister_c_can_dev(dev); - + pm_runtime_disable(priv->device); free_c_can_dev(dev); return 0; diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index 867f6be31230..f5d79e6e5483 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -355,6 +355,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head) struct rtnl_link_ops can_link_ops __read_mostly = { .kind = "can", + .netns_refund = true, .maxtype = IFLA_CAN_MAX, .policy = can_policy, .setup = can_setup, diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 971ada36e37f..57f3635ad8d7 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -697,11 +697,17 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) static int flexcan_chip_freeze(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; + unsigned int timeout; + u32 bitrate = priv->can.bittiming.bitrate; u32 reg; + if (bitrate) + timeout = 1000 * 1000 * 10 / bitrate; + else + timeout = FLEXCAN_TIMEOUT_US / 10; + reg = priv->read(®s->mcr); - reg |= FLEXCAN_MCR_HALT; + reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT; priv->write(reg, ®s->mcr); while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) @@ -1480,10 +1486,13 @@ static int flexcan_chip_start(struct net_device *dev) flexcan_set_bittiming(dev); + /* set freeze, halt */ + err = flexcan_chip_freeze(priv); + if (err) + goto out_chip_disable; + /* MCR * - * enable freeze - * halt now * only supervisor access * enable warning int * enable individual RX masking @@ -1492,9 +1501,8 @@ static int flexcan_chip_start(struct net_device *dev) */ reg_mcr = priv->read(®s->mcr); reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); - reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | - FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_IDAM_C | - FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); + reg_mcr |= FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); /* MCR * @@ -1865,10 +1873,14 @@ static int register_flexcandev(struct net_device *dev) if (err) goto out_chip_disable; - /* set freeze, halt and activate FIFO, restrict register access */ + /* set freeze, halt */ + err = flexcan_chip_freeze(priv); + if (err) + goto out_chip_disable; + + /* activate FIFO, restrict register access */ reg = priv->read(®s->mcr); - reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | - FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; + reg |= FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; priv->write(reg, ®s->mcr); /* Currently we only support newer versions of this core diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 37e05010ca91..74d9899fc904 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -57,6 +57,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 +#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 /* Loopback control register */ @@ -949,6 +950,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0); + /* Disable Bus load reporting */ + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); + tx_npackets = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) & diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 3752520a7d4b..0c8d36bc668c 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -501,9 +501,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) } while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { - if (rxfs & RXFS_RFL) - netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); - m_can_read_fifo(dev, rxfs); quota--; @@ -876,7 +873,7 @@ static int m_can_rx_peripheral(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); - m_can_rx_handler(dev, 1); + m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT); m_can_enable_all_interrupts(cdev); diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index b7caec769ddb..4147cecfbbd6 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -237,14 +237,14 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) if (ret) return ret; + /* Zero out the MCAN buffers */ + m_can_init_ram(cdev); + ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL); if (ret) return ret; - /* Zero out the MCAN buffers */ - m_can_init_ram(cdev); - return ret; } diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 0df1cdfa6835..1df3c4b54f03 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -21,7 +21,6 @@ MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards"); MODULE_LICENSE("GPL v2"); #define PCIEFD_DRV_NAME "peak_pciefd" diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c index 6f88c9932920..4ab91759a5c6 100644 --- a/drivers/net/can/sja1000/ems_pci.c +++ b/drivers/net/can/sja1000/ems_pci.c @@ -21,7 +21,6 @@ MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>"); MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards"); -MODULE_SUPPORTED_DEVICE("EMS CPC-PCI/PCIe/104P CAN card"); MODULE_LICENSE("GPL v2"); #define EMS_PCI_V1_MAX_CHAN 2 diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c index 770304eaef95..e21b169c14c0 100644 --- a/drivers/net/can/sja1000/ems_pcmcia.c +++ b/drivers/net/can/sja1000/ems_pcmcia.c @@ -21,7 +21,6 @@ MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>"); MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards"); -MODULE_SUPPORTED_DEVICE("EMS CPC-CARD CAN card"); MODULE_LICENSE("GPL v2"); #define EMS_PCMCIA_MAX_CHAN 2 diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c index 0ea6b711c07b..95fe9ee1ce32 100644 --- a/drivers/net/can/sja1000/kvaser_pci.c +++ b/drivers/net/can/sja1000/kvaser_pci.c @@ -33,7 +33,6 @@ MODULE_AUTHOR("Per Dalen <per.dalen@cnw.se>"); MODULE_DESCRIPTION("Socket-CAN driver for KVASER PCAN PCI cards"); -MODULE_SUPPORTED_DEVICE("KVASER PCAN PCI CAN card"); MODULE_LICENSE("GPL v2"); #define MAX_NO_OF_CHANNELS 4 /* max no of channels on a single card */ diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 4713921bd511..84eac8cb8686 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -24,8 +24,6 @@ MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards"); MODULE_LICENSE("GPL v2"); #define DRV_NAME "peak_pci" diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c index cf951a783078..131a084c3535 100644 --- a/drivers/net/can/sja1000/peak_pcmcia.c +++ b/drivers/net/can/sja1000/peak_pcmcia.c @@ -22,7 +22,6 @@ MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN-PC Card"); /* PEAK-System PCMCIA driver name */ #define PCC_NAME "peak_pcmcia" diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 85679588ef73..5de1ebb0c6f0 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -25,18 +25,6 @@ MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>"); MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with " "the SJA1000 chips"); -MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, " - "Adlink PCI-7841/cPCI-7841 SE, " - "Marathon CAN-bus-PCI, " - "Marathon CAN-bus-PCIe, " - "TEWS TECHNOLOGIES TPMC810, " - "esd CAN-PCI/CPCI/PCI104/200, " - "esd CAN-PCI/PMC/266, " - "esd CAN-PCIe/2000, " - "Connect Tech Inc. CANpro/104-Plus Opto (CRG001), " - "IXXAT PC-I 04/PCI, " - "ELCUS CAN-200-PCI, " - "ASEM DUAL CAN-RAW") MODULE_LICENSE("GPL v2"); #define PLX_PCI_MAX_CHAN 2 diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 3c5b92911d46..799e9d5d3481 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -335,8 +335,6 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) u8 len; int i, j; - netdev_reset_queue(priv->ndev); - /* TEF */ tef_ring = priv->tef; tef_ring->head = 0; @@ -1249,8 +1247,7 @@ mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) static int mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, - const struct mcp251xfd_hw_tef_obj *hw_tef_obj, - unsigned int *frame_len_ptr) + const struct mcp251xfd_hw_tef_obj *hw_tef_obj) { struct net_device_stats *stats = &priv->ndev->stats; u32 seq, seq_masked, tef_tail_masked; @@ -1272,8 +1269,7 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload, mcp251xfd_get_tef_tail(priv), - hw_tef_obj->ts, - frame_len_ptr); + hw_tef_obj->ts, NULL); stats->tx_packets++; priv->tef->tail++; @@ -1331,7 +1327,6 @@ mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv, static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) { struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX]; - unsigned int total_frame_len = 0; u8 tef_tail, len, l; int err, i; @@ -1353,9 +1348,7 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) } for (i = 0; i < len; i++) { - unsigned int frame_len; - - err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); + err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]); /* -EAGAIN means the Sequence Number in the TEF * doesn't match our tef_tail. This can happen if we * read the TEF objects too early. Leave loop let the @@ -1365,8 +1358,6 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) goto out_netif_wake_queue; if (err) return err; - - total_frame_len += frame_len; } out_netif_wake_queue: @@ -1397,7 +1388,6 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) return err; tx_ring->tail += len; - netdev_completed_queue(priv->ndev, len, total_frame_len); err = mcp251xfd_check_tef_tail(priv); if (err) @@ -2443,7 +2433,6 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, struct mcp251xfd_priv *priv = netdev_priv(ndev); struct mcp251xfd_tx_ring *tx_ring = priv->tx; struct mcp251xfd_tx_obj *tx_obj; - unsigned int frame_len; u8 tx_head; int err; @@ -2462,9 +2451,7 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, if (mcp251xfd_get_tx_free(tx_ring) == 0) netif_stop_queue(ndev); - frame_len = can_skb_get_frame_len(skb); - can_put_echo_skb(skb, ndev, tx_head, frame_len); - netdev_sent_queue(priv->ndev, frame_len); + can_put_echo_skb(skb, ndev, tx_head, 0); err = mcp251xfd_tx_obj_write(priv, tx_obj); if (err) diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index c1e5d5b570b6..538f4d9adb91 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -73,6 +73,7 @@ config CAN_KVASER_USB - Kvaser Memorator Pro 5xHS - Kvaser USBcan Light 4xHS - Kvaser USBcan Pro 2xHS v2 + - Kvaser USBcan Pro 4xHS - Kvaser USBcan Pro 5xHS - Kvaser U100 - Kvaser U100P diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 2b7efd296758..4e97da8434ab 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -86,8 +86,9 @@ #define USB_U100_PRODUCT_ID 273 #define USB_U100P_PRODUCT_ID 274 #define USB_U100S_PRODUCT_ID 275 +#define USB_USBCAN_PRO_4HS_PRODUCT_ID 276 #define USB_HYDRA_PRODUCT_ID_END \ - USB_U100S_PRODUCT_ID + USB_USBCAN_PRO_4HS_PRODUCT_ID static inline bool kvaser_is_leaf(const struct usb_device_id *id) { @@ -193,6 +194,7 @@ static const struct usb_device_id kvaser_usb_table[] = { { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) }, { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) }, { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) }, { } }; MODULE_DEVICE_TABLE(usb, kvaser_usb_table); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index e6c1e5d33924..e393e8457d77 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -18,8 +18,6 @@ #include "pcan_usb_core.h" -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter"); - /* PCAN-USB Endpoints */ #define PCAN_USB_EP_CMDOUT 1 #define PCAN_USB_EP_CMDIN (PCAN_USB_EP_CMDOUT | USB_DIR_IN) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index f347ecc79aef..bae078579c0d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -16,9 +16,6 @@ #include "pcan_usb_core.h" #include "pcan_usb_pro.h" -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB FD adapter"); -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter"); - #define PCAN_USBPROFD_CHANNEL_COUNT 2 #define PCAN_USBFD_CHANNEL_COUNT 1 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 275087c39602..18fa180ecc81 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -17,8 +17,6 @@ #include "pcan_usb_core.h" #include "pcan_usb_pro.h" -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter"); - #define PCAN_USBPRO_CHANNEL_COUNT 2 /* PCAN-USB Pro adapter internal clock (MHz) */ diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index a162499bcafc..eb443721c58e 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1105,13 +1105,6 @@ static int b53_setup(struct dsa_switch *ds) b53_disable_port(ds, port); } - /* Let DSA handle the case were multiple bridges span the same switch - * device and different VLAN awareness settings are requested, which - * would be breaking filtering semantics for any of the other bridge - * devices. (not hardware supported) - */ - ds->vlan_filtering_is_global = true; - return b53_setup_devlink_resources(ds); } @@ -2664,6 +2657,13 @@ struct b53_device *b53_switch_alloc(struct device *base, ds->ops = &b53_switch_ops; ds->untag_bridge_pvid = true; dev->vlan_enabled = true; + /* Let DSA handle the case were multiple bridges span the same switch + * device and different VLAN awareness settings are requested, which + * would be breaking filtering semantics for any of the other bridge + * devices. (not hardware supported) + */ + ds->vlan_filtering_is_global = true; + mutex_init(&dev->reg_mutex); mutex_init(&dev->stats_mutex); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 5ee8103b8e9c..ba5d546d06aa 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -114,7 +114,10 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) /* Force link status for IMP port */ reg = core_readl(priv, offset); reg |= (MII_SW_OR | LINK_STS); - reg &= ~GMII_SPEED_UP_2G; + if (priv->type == BCM4908_DEVICE_ID) + reg |= GMII_SPEED_UP_2G; + else + reg &= ~GMII_SPEED_UP_2G; core_writel(priv, reg, offset); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ @@ -406,7 +409,7 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) /* The watchdog reset does not work on 7278, we need to hit the * "external" reset line through the reset controller. */ - if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) { + if (priv->type == BCM7278_DEVICE_ID) { ret = reset_control_assert(priv->rcdev); if (ret) return ret; @@ -585,8 +588,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) * in bits 15:8 and the patch level in bits 7:0 which is exactly what * the REG_PHY_REVISION register layout is. */ - - return priv->hw_params.gphy_rev; + if (priv->int_phy_mask & BIT(port)) + return priv->hw_params.gphy_rev; + else + return 0; } static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, @@ -1265,7 +1270,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev, "switch"); - if (PTR_ERR(priv->rcdev) == -EPROBE_DEFER) + if (IS_ERR(priv->rcdev)) return PTR_ERR(priv->rcdev); /* Auto-detection using standard registers will not work, so @@ -1426,7 +1431,7 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev) bcm_sf2_mdio_unregister(priv); clk_disable_unprepare(priv->clk_mdiv); clk_disable_unprepare(priv->clk); - if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) + if (priv->type == BCM7278_DEVICE_ID) reset_control_assert(priv->rcdev); return 0; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index c17de2bcf2fe..9871d7cff93a 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -436,34 +436,32 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) TD_DM_DRVP(8) | TD_DM_DRVN(8)); /* Setup core clock for MT7530 */ - if (!trgint) { - /* Disable MT7530 core clock */ - core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); - - /* Disable PLL, since phy_device has not yet been created - * provided for phy_[read,write]_mmd_indirect is called, we - * provide our own core_write_mmd_indirect to complete this - * function. - */ - core_write_mmd_indirect(priv, - CORE_GSWPLL_GRP1, - MDIO_MMD_VEND2, - 0); - - /* Set core clock into 500Mhz */ - core_write(priv, CORE_GSWPLL_GRP2, - RG_GSWPLL_POSDIV_500M(1) | - RG_GSWPLL_FBKDIV_500M(25)); + /* Disable MT7530 core clock */ + core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); - /* Enable PLL */ - core_write(priv, CORE_GSWPLL_GRP1, - RG_GSWPLL_EN_PRE | - RG_GSWPLL_POSDIV_200M(2) | - RG_GSWPLL_FBKDIV_200M(32)); - - /* Enable MT7530 core clock */ - core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); - } + /* Disable PLL, since phy_device has not yet been created + * provided for phy_[read,write]_mmd_indirect is called, we + * provide our own core_write_mmd_indirect to complete this + * function. + */ + core_write_mmd_indirect(priv, + CORE_GSWPLL_GRP1, + MDIO_MMD_VEND2, + 0); + + /* Set core clock into 500Mhz */ + core_write(priv, CORE_GSWPLL_GRP2, + RG_GSWPLL_POSDIV_500M(1) | + RG_GSWPLL_FBKDIV_500M(25)); + + /* Enable PLL */ + core_write(priv, CORE_GSWPLL_GRP1, + RG_GSWPLL_EN_PRE | + RG_GSWPLL_POSDIV_200M(2) | + RG_GSWPLL_FBKDIV_200M(32)); + + /* Enable MT7530 core clock */ + core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); /* Setup the MT7530 TRGMII Tx Clock */ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); @@ -1624,6 +1622,7 @@ mtk_get_tag_protocol(struct dsa_switch *ds, int port, } } +#ifdef CONFIG_GPIOLIB static inline u32 mt7530_gpio_to_bit(unsigned int offset) { @@ -1726,6 +1725,7 @@ mt7530_setup_gpio(struct mt7530_priv *priv) return devm_gpiochip_add_data(dev, gc, priv); } +#endif /* CONFIG_GPIOLIB */ static int mt7530_setup(struct dsa_switch *ds) @@ -1868,11 +1868,13 @@ mt7530_setup(struct dsa_switch *ds) } } +#ifdef CONFIG_GPIOLIB if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) { ret = mt7530_setup_gpio(priv); if (ret) return ret; } +#endif /* CONFIG_GPIOLIB */ mt7530_setup_port5(ds, interface); diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 7692338730df..51ea104c63bb 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1922,7 +1922,7 @@ out_unlock_ptp: speed = SPEED_1000; else if (bmcr & BMCR_SPEED100) speed = SPEED_100; - else if (bmcr & BMCR_SPEED10) + else speed = SPEED_10; sja1105_sgmii_pcs_force_speed(priv, speed); @@ -3369,14 +3369,14 @@ static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to, if (flags.val & BR_FLOOD) priv->ucast_egress_floods |= BIT(to); else - priv->ucast_egress_floods |= BIT(to); + priv->ucast_egress_floods &= ~BIT(to); } if (flags.mask & BR_BCAST_FLOOD) { if (flags.val & BR_BCAST_FLOOD) priv->bcast_egress_floods |= BIT(to); else - priv->bcast_egress_floods |= BIT(to); + priv->bcast_egress_floods &= ~BIT(to); } return sja1105_manage_flood_domains(priv); diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c index f025f968f96d..fde6e99274b6 100644 --- a/drivers/net/dsa/xrs700x/xrs700x.c +++ b/drivers/net/dsa/xrs700x/xrs700x.c @@ -528,7 +528,10 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port, return -EOPNOTSUPP; dsa_hsr_foreach_port(dp, ds, hsr) { - partner = dp; + if (dp->index != port) { + partner = dp; + break; + } } /* We can't enable redundancy on the switch until both @@ -582,7 +585,10 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port, unsigned int val; dsa_hsr_foreach_port(dp, ds, hsr) { - partner = dp; + if (dp->index != port) { + partner = dp; + break; + } } if (!partner) diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 9b7f1af5f574..9e02f8864593 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1894,13 +1894,16 @@ static int alx_resume(struct device *dev) if (!netif_running(alx->dev)) return 0; - netif_device_attach(alx->dev); rtnl_lock(); err = __alx_open(alx, true); rtnl_unlock(); + if (err) + return err; - return err; + netif_device_attach(alx->dev); + + return 0; } static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index f8a168b73307..cb88ffb8f12f 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -54,7 +54,7 @@ config B44_PCI config BCM4908_ENET tristate "Broadcom BCM4908 internal mac support" depends on ARCH_BCM4908 || COMPILE_TEST - default y + default y if ARCH_BCM4908 help This driver supports Ethernet controller integrated into Broadcom BCM4908 family SoCs. diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 0b70e9e0ddad..98cf82dea3e4 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -592,6 +592,9 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight) bcm4908_enet_intrs_on(enet); } + /* Hardware could disable ring if it run out of descriptors */ + bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring); + return handled; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a680fd9c68ea..b53a0d87371a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -8556,10 +8556,18 @@ static void bnxt_setup_inta(struct bnxt *bp) bp->irq_tbl[0].handler = bnxt_inta; } +static int bnxt_init_int_mode(struct bnxt *bp); + static int bnxt_setup_int_mode(struct bnxt *bp) { int rc; + if (!bp->irq_tbl) { + rc = bnxt_init_int_mode(bp); + if (rc || !bp->irq_tbl) + return rc ?: -ENODEV; + } + if (bp->flags & BNXT_FLAG_USING_MSIX) bnxt_setup_msix(bp); else @@ -8744,7 +8752,7 @@ static int bnxt_init_inta(struct bnxt *bp) static int bnxt_init_int_mode(struct bnxt *bp) { - int rc = 0; + int rc = -ENODEV; if (bp->flags & BNXT_FLAG_MSIX_CAP) rc = bnxt_init_msix(bp); @@ -9514,7 +9522,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) { struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_drv_if_change_input req = {0}; - bool resc_reinit = false, fw_reset = false; + bool fw_reset = !bp->irq_tbl; + bool resc_reinit = false; int rc, retry = 0; u32 flags = 0; @@ -9557,6 +9566,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); return -ENODEV; } if (resc_reinit || fw_reset) { @@ -9890,6 +9900,9 @@ static int bnxt_reinit_after_abort(struct bnxt *bp) if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return -EBUSY; + if (bp->dev->reg_state == NETREG_UNREGISTERED) + return -ENODEV; + rc = bnxt_fw_init_one(bp); if (!rc) { bnxt_clear_int_mode(bp); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 472bf8f220bc..15362d016a87 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3954,6 +3954,13 @@ static int macb_init(struct platform_device *pdev) return 0; } +static const struct macb_usrio_config macb_default_usrio = { + .mii = MACB_BIT(MII), + .rmii = MACB_BIT(RMII), + .rgmii = GEM_BIT(RGMII), + .refclk = MACB_BIT(CLKEN), +}; + #if defined(CONFIG_OF) /* 1518 rounded up */ #define AT91ETHER_MAX_RBUFF_SZ 0x600 @@ -4439,13 +4446,6 @@ static int fu540_c000_init(struct platform_device *pdev) return macb_init(pdev); } -static const struct macb_usrio_config macb_default_usrio = { - .mii = MACB_BIT(MII), - .rmii = MACB_BIT(RMII), - .rgmii = GEM_BIT(RGMII), - .refclk = MACB_BIT(CLKEN), -}; - static const struct macb_usrio_config sama7g5_usrio = { .mii = 0, .rmii = 1, @@ -4594,6 +4594,7 @@ static const struct macb_config default_gem_config = { .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, .jumbo_max_len = 10240, }; diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 46a809f2aeca..1115b8f9ea4e 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -672,7 +672,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, if (tx_info->pending_close) { spin_unlock(&tx_info->lock); if (!status) { - /* it's a late success, tcb status is establised, + /* it's a late success, tcb status is established, * mark it close. */ chcr_ktls_mark_tcb_close(tx_info); @@ -722,7 +722,7 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input) kvfree(tx_info); return 0; } - tx_info->open_state = false; + tx_info->open_state = CH_KTLS_OPEN_SUCCESS; spin_unlock(&tx_info->lock); complete(&tx_info->completion); @@ -930,7 +930,7 @@ chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len) } /* - * chcr_ktls_check_tcp_options: To check if there is any TCP option availbale + * chcr_ktls_check_tcp_options: To check if there is any TCP option available * other than timestamp. * @skb - skb contains partial record.. * return: 1 / 0 @@ -1115,7 +1115,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb, } if (unlikely(credits < ETHTXQ_STOP_THRES)) { - /* Credits are below the threshold vaues, stop the queue after + /* Credits are below the threshold values, stop the queue after * injecting the Work Request for this packet. */ chcr_eth_txq_stop(q); @@ -2006,7 +2006,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) /* TCP segments can be in received either complete or partial. * chcr_end_part_handler will handle cases if complete record or end - * part of the record is received. Incase of partial end part of record, + * part of the record is received. In case of partial end part of record, * we will send the complete record again. */ diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 3fdc70dab5c1..252adfa5d837 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -133,6 +133,8 @@ struct board_info { u32 wake_state; int ip_summed; + + struct regulator *power_supply; }; /* debug code */ @@ -1449,7 +1451,7 @@ dm9000_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "failed to request reset gpio %d: %d\n", reset_gpios, ret); - return -ENODEV; + goto out_regulator_disable; } /* According to manual PWRST# Low Period Min 1ms */ @@ -1461,8 +1463,10 @@ dm9000_probe(struct platform_device *pdev) if (!pdata) { pdata = dm9000_parse_dt(&pdev->dev); - if (IS_ERR(pdata)) - return PTR_ERR(pdata); + if (IS_ERR(pdata)) { + ret = PTR_ERR(pdata); + goto out_regulator_disable; + } } /* Init network device */ @@ -1479,6 +1483,8 @@ dm9000_probe(struct platform_device *pdev) db->dev = &pdev->dev; db->ndev = ndev; + if (!IS_ERR(power)) + db->power_supply = power; spin_lock_init(&db->lock); mutex_init(&db->addr_lock); @@ -1501,7 +1507,7 @@ dm9000_probe(struct platform_device *pdev) goto out; } - db->irq_wake = platform_get_irq(pdev, 1); + db->irq_wake = platform_get_irq_optional(pdev, 1); if (db->irq_wake >= 0) { dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); @@ -1703,6 +1709,10 @@ out: dm9000_release_board(pdev, db); free_netdev(ndev); +out_regulator_disable: + if (!IS_ERR(power)) + regulator_disable(power); + return ret; } @@ -1760,10 +1770,13 @@ static int dm9000_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); + struct board_info *dm = to_dm9000_board(ndev); unregister_netdev(ndev); - dm9000_release_board(pdev, netdev_priv(ndev)); + dm9000_release_board(pdev, dm); free_netdev(ndev); /* free device structure */ + if (dm->power_supply) + regulator_disable(dm->power_supply); dev_dbg(&pdev->dev, "released and freed device\n"); return 0; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 88bfe2107938..04421aec2dfd 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1337,6 +1337,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget) */ if (unlikely(priv->need_mac_restart)) { ftgmac100_start_hw(priv); + priv->need_mac_restart = false; /* Re-enable "bad" interrupts */ iowrite32(FTGMAC100_INT_BAD, diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index c78d12229730..09471329f3a3 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -281,6 +281,8 @@ static int enetc_poll(struct napi_struct *napi, int budget) int work_done; int i; + enetc_lock_mdio(); + for (i = 0; i < v->count_tx_rings; i++) if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) complete = false; @@ -291,8 +293,10 @@ static int enetc_poll(struct napi_struct *napi, int budget) if (work_done) v->rx_napi_work = true; - if (!complete) + if (!complete) { + enetc_unlock_mdio(); return budget; + } napi_complete_done(napi, work_done); @@ -301,8 +305,6 @@ static int enetc_poll(struct napi_struct *napi, int budget) v->rx_napi_work = false; - enetc_lock_mdio(); - /* enable interrupts */ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); @@ -327,8 +329,8 @@ static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd, { u32 lo, hi, tstamp_lo; - lo = enetc_rd(hw, ENETC_SICTR0); - hi = enetc_rd(hw, ENETC_SICTR1); + lo = enetc_rd_hot(hw, ENETC_SICTR0); + hi = enetc_rd_hot(hw, ENETC_SICTR1); tstamp_lo = le32_to_cpu(txbd->wb.tstamp); if (lo <= tstamp_lo) hi -= 1; @@ -342,6 +344,12 @@ static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp) if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(tstamp); + /* Ensure skb_mstamp_ns, which might have been populated with + * the txtime, is not mistaken for a software timestamp, + * because this will prevent the dispatch of our hardware + * timestamp to the socket. + */ + skb->tstamp = ktime_set(0, 0); skb_tstamp_tx(skb, &shhwtstamps); } } @@ -358,9 +366,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) i = tx_ring->next_to_clean; tx_swbd = &tx_ring->tx_swbd[i]; - enetc_lock_mdio(); bds_to_clean = enetc_bd_ready_count(tx_ring, i); - enetc_unlock_mdio(); do_tstamp = false; @@ -403,8 +409,6 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) tx_swbd = tx_ring->tx_swbd; } - enetc_lock_mdio(); - /* BD iteration loop end */ if (is_eof) { tx_frm_cnt++; @@ -415,8 +419,6 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) if (unlikely(!bds_to_clean)) bds_to_clean = enetc_bd_ready_count(tx_ring, i); - - enetc_unlock_mdio(); } tx_ring->next_to_clean = i; @@ -527,9 +529,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev, static void enetc_get_offloads(struct enetc_bdr *rx_ring, union enetc_rx_bd *rxbd, struct sk_buff *skb) { -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); -#endif + /* TODO: hashing */ if (rx_ring->ndev->features & NETIF_F_RXCSUM) { u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); @@ -538,12 +539,31 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring, skb->ip_summed = CHECKSUM_COMPLETE; } - /* copy VLAN to skb, if one is extracted, for now we assume it's a - * standard TPID, but HW also supports custom values - */ - if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - le16_to_cpu(rxbd->r.vlan_opt)); + if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) { + __be16 tpid = 0; + + switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) { + case 0: + tpid = htons(ETH_P_8021Q); + break; + case 1: + tpid = htons(ETH_P_8021AD); + break; + case 2: + tpid = htons(enetc_port_rd(&priv->si->hw, + ENETC_PCVLANR1)); + break; + case 3: + tpid = htons(enetc_port_rd(&priv->si->hw, + ENETC_PCVLANR2)); + break; + default: + break; + } + + __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); + } + #ifdef CONFIG_FSL_ENETC_PTP_CLOCK if (priv->active_offloads & ENETC_F_RX_TSTAMP) enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); @@ -660,8 +680,6 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, u32 bd_status; u16 size; - enetc_lock_mdio(); - if (cleaned_cnt >= ENETC_RXBD_BUNDLE) { int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt); @@ -672,19 +690,15 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, rxbd = enetc_rxbd(rx_ring, i); bd_status = le32_to_cpu(rxbd->r.lstatus); - if (!bd_status) { - enetc_unlock_mdio(); + if (!bd_status) break; - } enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); dma_rmb(); /* for reading other rxbd fields */ size = le16_to_cpu(rxbd->r.buf_len); skb = enetc_map_rx_buff_to_skb(rx_ring, i, size); - if (!skb) { - enetc_unlock_mdio(); + if (!skb) break; - } enetc_get_offloads(rx_ring, rxbd, skb); @@ -696,7 +710,6 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, if (unlikely(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) { - enetc_unlock_mdio(); dev_kfree_skb(skb); while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { dma_rmb(); @@ -736,8 +749,6 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, enetc_process_skb(rx_ring, skb); - enetc_unlock_mdio(); - napi_gro_receive(napi, skb); rx_frm_cnt++; @@ -984,7 +995,7 @@ static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) enetc_free_tx_ring(priv->tx_ring[i]); } -static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) { int size = cbdr->bd_count * sizeof(struct enetc_cbd); @@ -1005,7 +1016,7 @@ static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) return 0; } -static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) { int size = cbdr->bd_count * sizeof(struct enetc_cbd); @@ -1013,7 +1024,7 @@ static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) cbdr->bd_base = NULL; } -static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) +void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) { /* set CBDR cache attributes */ enetc_wr(hw, ENETC_SICAR2, @@ -1033,7 +1044,7 @@ static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) cbdr->cir = hw->reg + ENETC_SICBDRCIR; } -static void enetc_clear_cbdr(struct enetc_hw *hw) +void enetc_clear_cbdr(struct enetc_hw *hw) { enetc_wr(hw, ENETC_SICBDRMR, 0); } @@ -1058,13 +1069,12 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) return 0; } -static int enetc_configure_si(struct enetc_ndev_priv *priv) +int enetc_configure_si(struct enetc_ndev_priv *priv) { struct enetc_si *si = priv->si; struct enetc_hw *hw = &si->hw; int err; - enetc_setup_cbdr(hw, &si->cbd_ring); /* set SI cache attributes */ enetc_wr(hw, ENETC_SICAR0, ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); @@ -1112,6 +1122,8 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) if (err) return err; + enetc_setup_cbdr(&si->hw, &si->cbd_ring); + priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), GFP_KERNEL); if (!priv->cls_rules) { @@ -1119,14 +1131,8 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) goto err_alloc_cls; } - err = enetc_configure_si(priv); - if (err) - goto err_config_si; - return 0; -err_config_si: - kfree(priv->cls_rules); err_alloc_cls: enetc_clear_cbdr(&si->hw); enetc_free_cbdr(priv->dev, &si->cbd_ring); @@ -1212,7 +1218,8 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) rx_ring->idr = hw->reg + ENETC_SIRXIDR; enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); - enetc_wr(hw, ENETC_SIRXIDR, rx_ring->next_to_use); + /* update ENETC's consumer index */ + enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use); /* enable ring */ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 8532d23b54f5..8b380fc13314 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -292,6 +292,7 @@ void enetc_get_si_caps(struct enetc_si *si); void enetc_init_si_rings_params(struct enetc_ndev_priv *priv); int enetc_alloc_si_resources(struct enetc_ndev_priv *priv); void enetc_free_si_resources(struct enetc_ndev_priv *priv); +int enetc_configure_si(struct enetc_ndev_priv *priv); int enetc_open(struct net_device *ndev); int enetc_close(struct net_device *ndev); @@ -309,6 +310,10 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, void enetc_set_ethtool_ops(struct net_device *ndev); /* control buffer descriptor ring (CBDR) */ +int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr); +void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr); +void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr); +void enetc_clear_cbdr(struct enetc_hw *hw); int enetc_set_mac_flt_entry(struct enetc_si *si, int index, char *mac_addr, int si_map); int enetc_clear_mac_flt_entry(struct enetc_si *si, int index); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index c71fe8d751d5..00938f7960a4 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -172,6 +172,8 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PSIPMAR0(n) (0x0100 + (n) * 0x8) /* n = SI index */ #define ENETC_PSIPMAR1(n) (0x0104 + (n) * 0x8) #define ENETC_PVCLCTR 0x0208 +#define ENETC_PCVLANR1 0x0210 +#define ENETC_PCVLANR2 0x0214 #define ENETC_VLAN_TYPE_C BIT(0) #define ENETC_VLAN_TYPE_S BIT(1) #define ENETC_PVCLCTR_OVTPIDL(bmp) ((bmp) & 0xff) /* VLAN_TYPE */ @@ -232,14 +234,23 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PM0_MAXFRM 0x8014 #define ENETC_SET_TX_MTU(val) ((val) << 16) #define ENETC_SET_MAXFRM(val) ((val) & 0xffff) +#define ENETC_PM0_RX_FIFO 0x801c +#define ENETC_PM0_RX_FIFO_VAL 1 #define ENETC_PM_IMDIO_BASE 0x8030 #define ENETC_PM0_IF_MODE 0x8300 -#define ENETC_PMO_IFM_RG BIT(2) +#define ENETC_PM0_IFM_RG BIT(2) #define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11)) -#define ENETC_PM0_IFM_RGAUTO (BIT(15) | ENETC_PMO_IFM_RG | BIT(1)) -#define ENETC_PM0_IFM_XGMII BIT(12) +#define ENETC_PM0_IFM_EN_AUTO BIT(15) +#define ENETC_PM0_IFM_SSP_MASK GENMASK(14, 13) +#define ENETC_PM0_IFM_SSP_1000 (2 << 13) +#define ENETC_PM0_IFM_SSP_100 (0 << 13) +#define ENETC_PM0_IFM_SSP_10 (1 << 13) +#define ENETC_PM0_IFM_FULL_DPX BIT(12) +#define ENETC_PM0_IFM_IFMODE_MASK GENMASK(1, 0) +#define ENETC_PM0_IFM_IFMODE_XGMII 0 +#define ENETC_PM0_IFM_IFMODE_GMII 2 #define ENETC_PSIDCAPR 0x1b08 #define ENETC_PSIDCAPR_MSK GENMASK(15, 0) #define ENETC_PSFCAPR 0x1b18 @@ -453,6 +464,8 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg) #define enetc_wr_reg(reg, val) _enetc_wr_reg_wa((reg), (val)) #define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off)) #define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val) +#define enetc_rd_hot(hw, off) enetc_rd_reg_hot((hw)->reg + (off)) +#define enetc_wr_hot(hw, off, val) enetc_wr_reg_hot((hw)->reg + (off), val) #define enetc_rd64(hw, off) _enetc_rd_reg64_wa((hw)->reg + (off)) /* port register accessors - PF only */ #define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off)) @@ -568,6 +581,7 @@ union enetc_rx_bd { #define ENETC_RXBD_LSTATUS(flags) ((flags) << 16) #define ENETC_RXBD_FLAG_VLAN BIT(9) #define ENETC_RXBD_FLAG_TSTMP BIT(10) +#define ENETC_RXBD_FLAG_TPID GENMASK(1, 0) #define ENETC_MAC_ADDR_FILT_CNT 8 /* # of supported entries per port */ #define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */ diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 515c5b29d7aa..224fc37a6757 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -190,7 +190,6 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct enetc_pf *pf = enetc_si_priv(priv->si); - char vlan_promisc_simap = pf->vlan_promisc_simap; struct enetc_hw *hw = &priv->si->hw; bool uprom = false, mprom = false; struct enetc_mac_filter *filter; @@ -203,16 +202,12 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev) psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); uprom = true; mprom = true; - /* Enable VLAN promiscuous mode for SI0 (PF) */ - vlan_promisc_simap |= BIT(0); } else if (ndev->flags & IFF_ALLMULTI) { /* enable multi cast promisc mode for SI0 (PF) */ psipmr = ENETC_PSIPMR_SET_MP(0); mprom = true; } - enetc_set_vlan_promisc(&pf->si->hw, vlan_promisc_simap); - /* first 2 filter entries belong to PF */ if (!uprom) { /* Update unicast filters */ @@ -320,7 +315,7 @@ static void enetc_set_loopback(struct net_device *ndev, bool en) u32 reg; reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE); - if (reg & ENETC_PMO_IFM_RG) { + if (reg & ENETC_PM0_IFM_RG) { /* RGMII mode */ reg = (reg & ~ENETC_PM0_IFM_RLP) | (en ? ENETC_PM0_IFM_RLP : 0); @@ -495,17 +490,30 @@ static void enetc_configure_port_mac(struct enetc_hw *hw) enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN | ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC); + + /* On LS1028A, the MAC RX FIFO defaults to 2, which is too high + * and may lead to RX lock-up under traffic. Set it to 1 instead, + * as recommended by the hardware team. + */ + enetc_port_wr(hw, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL); } static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode) { - /* set auto-speed for RGMII */ - if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG || - phy_interface_mode_is_rgmii(phy_mode)) - enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO); + u32 val; + + if (phy_interface_mode_is_rgmii(phy_mode)) { + val = enetc_port_rd(hw, ENETC_PM0_IF_MODE); + val &= ~ENETC_PM0_IFM_EN_AUTO; + val &= ENETC_PM0_IFM_IFMODE_MASK; + val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG; + enetc_port_wr(hw, ENETC_PM0_IF_MODE, val); + } - if (phy_mode == PHY_INTERFACE_MODE_USXGMII) - enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII); + if (phy_mode == PHY_INTERFACE_MODE_USXGMII) { + val = ENETC_PM0_IFM_FULL_DPX | ENETC_PM0_IFM_IFMODE_XGMII; + enetc_port_wr(hw, ENETC_PM0_IF_MODE, val); + } } static void enetc_mac_enable(struct enetc_hw *hw, bool en) @@ -937,6 +945,34 @@ static void enetc_pl_mac_config(struct phylink_config *config, phylink_set_pcs(priv->phylink, &pf->pcs->pcs); } +static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex) +{ + u32 old_val, val; + + old_val = val = enetc_port_rd(hw, ENETC_PM0_IF_MODE); + + if (speed == SPEED_1000) { + val &= ~ENETC_PM0_IFM_SSP_MASK; + val |= ENETC_PM0_IFM_SSP_1000; + } else if (speed == SPEED_100) { + val &= ~ENETC_PM0_IFM_SSP_MASK; + val |= ENETC_PM0_IFM_SSP_100; + } else if (speed == SPEED_10) { + val &= ~ENETC_PM0_IFM_SSP_MASK; + val |= ENETC_PM0_IFM_SSP_10; + } + + if (duplex == DUPLEX_FULL) + val |= ENETC_PM0_IFM_FULL_DPX; + else + val &= ~ENETC_PM0_IFM_FULL_DPX; + + if (val == old_val) + return; + + enetc_port_wr(hw, ENETC_PM0_IF_MODE, val); +} + static void enetc_pl_mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, int speed, @@ -949,6 +985,10 @@ static void enetc_pl_mac_link_up(struct phylink_config *config, if (priv->active_offloads & ENETC_F_QBV) enetc_sched_speed_set(priv, speed); + if (!phylink_autoneg_inband(mode) && + phy_interface_mode_is_rgmii(interface)) + enetc_force_rgmii_mac(&pf->si->hw, speed, duplex); + enetc_mac_enable(&pf->si->hw, true); } @@ -1041,6 +1081,26 @@ static int enetc_init_port_rss_memory(struct enetc_si *si) return err; } +static void enetc_init_unused_port(struct enetc_si *si) +{ + struct device *dev = &si->pdev->dev; + struct enetc_hw *hw = &si->hw; + int err; + + si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; + err = enetc_alloc_cbdr(dev, &si->cbd_ring); + if (err) + return; + + enetc_setup_cbdr(hw, &si->cbd_ring); + + enetc_init_port_rfs_memory(si); + enetc_init_port_rss_memory(si); + + enetc_clear_cbdr(hw); + enetc_free_cbdr(dev, &si->cbd_ring); +} + static int enetc_pf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1051,11 +1111,6 @@ static int enetc_pf_probe(struct pci_dev *pdev, struct enetc_pf *pf; int err; - if (node && !of_device_is_available(node)) { - dev_info(&pdev->dev, "device is disabled, skipping\n"); - return -ENODEV; - } - err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); if (err) { dev_err(&pdev->dev, "PCI probing failed\n"); @@ -1069,6 +1124,13 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_map_pf_space; } + if (node && !of_device_is_available(node)) { + enetc_init_unused_port(si); + dev_info(&pdev->dev, "device is disabled, skipping\n"); + err = -ENODEV; + goto err_device_disabled; + } + pf = enetc_si_priv(si); pf->si = si; pf->total_vfs = pci_sriov_get_totalvfs(pdev); @@ -1108,6 +1170,12 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_init_port_rss; } + err = enetc_configure_si(priv); + if (err) { + dev_err(&pdev->dev, "Failed to configure SI\n"); + goto err_config_si; + } + err = enetc_alloc_msix(priv); if (err) { dev_err(&pdev->dev, "MSIX alloc failed\n"); @@ -1136,6 +1204,7 @@ err_phylink_create: enetc_mdiobus_destroy(pf); err_mdiobus_create: enetc_free_msix(priv); +err_config_si: err_init_port_rss: err_init_port_rfs: err_alloc_msix: @@ -1144,6 +1213,7 @@ err_alloc_si_res: si->ndev = NULL; free_netdev(ndev); err_alloc_netdev: +err_device_disabled: err_map_pf_space: enetc_pci_remove(pdev); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 39c1a09e69a9..9b755a84c2d6 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -171,6 +171,12 @@ static int enetc_vf_probe(struct pci_dev *pdev, goto err_alloc_si_res; } + err = enetc_configure_si(priv); + if (err) { + dev_err(&pdev->dev, "Failed to configure SI\n"); + goto err_config_si; + } + err = enetc_alloc_msix(priv); if (err) { dev_err(&pdev->dev, "MSIX alloc failed\n"); @@ -187,6 +193,7 @@ static int enetc_vf_probe(struct pci_dev *pdev, err_reg_netdev: enetc_free_msix(priv); +err_config_si: err_alloc_msix: enetc_free_si_resources(priv); err_alloc_si_res: diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 2e344aada4c6..1753807cbf97 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -377,9 +377,16 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) u64 ns; unsigned long flags; + mutex_lock(&adapter->ptp_clk_mutex); + /* Check the ptp clock */ + if (!adapter->ptp_clk_on) { + mutex_unlock(&adapter->ptp_clk_mutex); + return -EINVAL; + } spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_read(&adapter->tc); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + mutex_unlock(&adapter->ptp_clk_mutex); *ts = ns_to_timespec64(ns); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 541de32ea662..1cf8ef717453 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2390,6 +2390,10 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, if (lstatus & BD_LFLAG(RXBD_LAST)) size -= skb->len; + WARN(size < 0, "gianfar: rx fragment size underflow"); + if (size < 0) + return false; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, rxb->page_offset + RXBUF_ALIGNMENT, size, GFAR_RXB_TRUESIZE); @@ -2552,6 +2556,17 @@ static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, if (lstatus & BD_LFLAG(RXBD_EMPTY)) break; + /* lost RXBD_LAST descriptor due to overrun */ + if (skb && + (lstatus & BD_LFLAG(RXBD_FIRST))) { + /* discard faulty buffer */ + dev_kfree_skb(skb); + skb = NULL; + rx_queue->stats.rx_dropped++; + + /* can continue normally */ + } + /* order rx buffer descriptor reads */ rmb(); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 5d7824d2b4d4..c66a7a51198e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1663,8 +1663,10 @@ static int hns_nic_clear_all_rx_fetch(struct net_device *ndev) for (j = 0; j < fetch_num; j++) { /* alloc one skb and init */ skb = hns_assemble_skb(ndev); - if (!skb) + if (!skb) { + ret = -ENOMEM; goto out; + } rd = &tx_ring_data(priv, skb->queue_mapping); hns_nic_net_xmit_hw(ndev, skb, rd); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index ff52a65b4cff..057dda735492 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -1053,16 +1053,16 @@ struct hclge_fd_tcam_config_3_cmd { #define HCLGE_FD_AD_DROP_B 0 #define HCLGE_FD_AD_DIRECT_QID_B 1 #define HCLGE_FD_AD_QID_S 2 -#define HCLGE_FD_AD_QID_M GENMASK(12, 2) +#define HCLGE_FD_AD_QID_M GENMASK(11, 2) #define HCLGE_FD_AD_USE_COUNTER_B 12 #define HCLGE_FD_AD_COUNTER_NUM_S 13 #define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13) #define HCLGE_FD_AD_NXT_STEP_B 20 #define HCLGE_FD_AD_NXT_KEY_S 21 -#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21) +#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21) #define HCLGE_FD_AD_WR_RULE_ID_B 0 #define HCLGE_FD_AD_RULE_ID_S 1 -#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1) +#define HCLGE_FD_AD_RULE_ID_M GENMASK(12, 1) #define HCLGE_FD_AD_TC_OVRD_B 16 #define HCLGE_FD_AD_TC_SIZE_S 17 #define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 34b744df6709..e3f81c7e0ce7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5245,9 +5245,9 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, case BIT(INNER_SRC_MAC): for (i = 0; i < ETH_ALEN; i++) { calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], - rule->tuples.src_mac[i]); + rule->tuples_mask.src_mac[i]); calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], - rule->tuples.src_mac[i]); + rule->tuples_mask.src_mac[i]); } return true; @@ -6330,8 +6330,7 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); fs->m_ext.vlan_tci = rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? - cpu_to_be16(VLAN_VID_MASK) : - cpu_to_be16(rule->tuples_mask.vlan_tag1); + 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); } if (fs->flow_type & FLOW_MAC_EXT) { diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index c3ec9ceed833..7fea9ae60f13 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1758,7 +1758,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) return 0; } -static int ibmveth_remove(struct vio_dev *dev) +static void ibmveth_remove(struct vio_dev *dev) { struct net_device *netdev = dev_get_drvdata(&dev->dev); struct ibmveth_adapter *adapter = netdev_priv(netdev); @@ -1771,8 +1771,6 @@ static int ibmveth_remove(struct vio_dev *dev) free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); - - return 0; } static struct attribute veth_active_attr; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 118a4bd3f877..9c6438d3b3a5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -78,7 +78,6 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(IBMVNIC_DRIVER_VERSION); static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; -static int ibmvnic_remove(struct vio_dev *); static void release_sub_crqs(struct ibmvnic_adapter *, bool); static int ibmvnic_reset_crq(struct ibmvnic_adapter *); static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); @@ -1906,10 +1905,9 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - if (adapter->state != VNIC_PROBED) { - ether_addr_copy(adapter->mac_addr, addr->sa_data); + ether_addr_copy(adapter->mac_addr, addr->sa_data); + if (adapter->state != VNIC_PROBED) rc = __ibmvnic_set_mac(netdev, addr->sa_data); - } return rc; } @@ -5219,16 +5217,14 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) { struct device *dev = &adapter->vdev->dev; unsigned long timeout = msecs_to_jiffies(20000); - u64 old_num_rx_queues, old_num_tx_queues; + u64 old_num_rx_queues = adapter->req_rx_queues; + u64 old_num_tx_queues = adapter->req_tx_queues; int rc; adapter->from_passive_init = false; - if (reset) { - old_num_rx_queues = adapter->req_rx_queues; - old_num_tx_queues = adapter->req_tx_queues; + if (reset) reinit_completion(&adapter->init_done); - } adapter->init_done_rc = 0; rc = ibmvnic_send_crq_init(adapter); @@ -5396,7 +5392,7 @@ ibmvnic_init_fail: return rc; } -static int ibmvnic_remove(struct vio_dev *dev) +static void ibmvnic_remove(struct vio_dev *dev) { struct net_device *netdev = dev_get_drvdata(&dev->dev); struct ibmvnic_adapter *adapter = netdev_priv(netdev); @@ -5411,9 +5407,9 @@ static int ibmvnic_remove(struct vio_dev *dev) * after setting state, so __ibmvnic_reset() which is called * from the flush_work() below, can make progress. */ - spin_lock_irqsave(&adapter->rwi_lock, flags); + spin_lock(&adapter->rwi_lock); adapter->state = VNIC_REMOVING; - spin_unlock_irqrestore(&adapter->rwi_lock, flags); + spin_unlock(&adapter->rwi_lock); spin_unlock_irqrestore(&adapter->state_lock, flags); @@ -5437,8 +5433,6 @@ static int ibmvnic_remove(struct vio_dev *dev) device_remove_file(&dev->dev, &dev_attr_failover); free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); - - return 0; } static ssize_t failover_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 88faf05e23ba..0b1e890dd583 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 69a2329ea463..db79c4e6413e 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 1999 - 2018 Intel Corporation. */ -#ifndef _E1000_HW_H_ -#define _E1000_HW_H_ +#ifndef _E1000E_HW_H_ +#define _E1000E_HW_H_ #include "regs.h" #include "defines.h" @@ -714,4 +714,4 @@ struct e1000_hw { #include "80003es2lan.h" #include "ich8lan.h" -#endif +#endif /* _E1000E_HW_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index e9b82c209c2d..a0948002ddf8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5974,15 +5974,19 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter; adapter = container_of(work, struct e1000_adapter, reset_task); + rtnl_lock(); /* don't run the task if already down */ - if (test_bit(__E1000_DOWN, &adapter->state)) + if (test_bit(__E1000_DOWN, &adapter->state)) { + rtnl_unlock(); return; + } if (!(adapter->flags & FLAG_RESTART_NOW)) { e1000e_dump(adapter); e_err("Reset adapter unexpectedly\n"); } e1000e_reinit_locked(adapter); + rtnl_unlock(); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 353deae139f9..17f3b800640e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3259,6 +3259,17 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) } /** + * i40e_rx_offset - Return expected offset into page to access data + * @rx_ring: Ring we are requesting offset of + * + * Returns the offset value for ring into the data buffer. + */ +static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; +} + +/** * i40e_configure_rx_ring - Configure a receive ring context * @ring: The Rx ring to configure * @@ -3369,6 +3380,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) else set_ring_build_skb_enabled(ring); + ring->rx_offset = i40e_rx_offset(ring); + /* cache tail for quicker writes, and clear the reg before use */ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 627794b31e33..5747a99122fb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1570,17 +1570,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) } /** - * i40e_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) -{ - return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; -} - -/** * i40e_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -1608,7 +1597,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - rx_ring->rx_offset = i40e_rx_offset(rx_ring); /* XDP RX-queue info only needed for RX rings exposed to XDP */ if (rx_ring->vsi->type == I40E_VSI_MAIN) { diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 0a867d64d467..dc5b3c06d1e0 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1776,7 +1776,8 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) goto err_alloc; } - if (iavf_process_config(adapter)) + err = iavf_process_config(adapter); + if (err) goto err_alloc; adapter->current_op = VIRTCHNL_OP_UNKNOWN; diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 3124a3bf519a..1148d768f8ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -275,6 +275,22 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) } /** + * ice_rx_offset - Return expected offset into page to access data + * @rx_ring: Ring we are requesting offset of + * + * Returns the offset value for ring into the data buffer. + */ +static unsigned int ice_rx_offset(struct ice_ring *rx_ring) +{ + if (ice_ring_uses_build_skb(rx_ring)) + return ICE_SKB_PAD; + else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +/** * ice_setup_rx_ctx - Configure a receive ring context * @ring: The Rx ring to configure * @@ -413,11 +429,15 @@ int ice_setup_rx_ctx(struct ice_ring *ring) else ice_set_ring_build_skb_ena(ring); + ring->rx_offset = ice_rx_offset(ring); + /* init queue specific tail register */ ring->tail = hw->hw_addr + QRX_TAIL(pf_q); writel(0, ring->tail); if (ring->xsk_pool) { + bool ok; + if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", num_bufs, ring->q_index); @@ -426,8 +446,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring) return 0; } - err = ice_alloc_rx_bufs_zc(ring, num_bufs); - if (err) + ok = ice_alloc_rx_bufs_zc(ring, num_bufs); + if (!ok) dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n", ring->q_index, pf_q); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index b7dc25da1202..b91dcfd12727 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -444,22 +444,6 @@ void ice_free_rx_ring(struct ice_ring *rx_ring) } /** - * ice_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static unsigned int ice_rx_offset(struct ice_ring *rx_ring) -{ - if (ice_ring_uses_build_skb(rx_ring)) - return ICE_SKB_PAD; - else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) - return XDP_PACKET_HEADROOM; - - return 0; -} - -/** * ice_setup_rx_ring - Allocate the Rx descriptors * @rx_ring: the Rx ring to set up * @@ -493,7 +477,6 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) rx_ring->next_to_use = 0; rx_ring->next_to_clean = 0; - rx_ring->rx_offset = ice_rx_offset(rx_ring); if (ice_is_xdp_ena_vsi(rx_ring->vsi)) WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 83f3c9574ed1..9f94d9159acd 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -358,18 +358,18 @@ xsk_pool_if_up: * This function allocates a number of Rx buffers from the fill ring * or the internal recycle mechanism and places them on the Rx ring. * - * Returns false if all allocations were successful, true if any fail. + * Returns true if all allocations were successful, false if any fail. */ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) { union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; struct ice_rx_buf *rx_buf; - bool ret = false; + bool ok = true; dma_addr_t dma; if (!count) - return false; + return true; rx_desc = ICE_RX_DESC(rx_ring, ntu); rx_buf = &rx_ring->rx_buf[ntu]; @@ -377,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) do { rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool); if (!rx_buf->xdp) { - ret = true; + ok = false; break; } @@ -402,7 +402,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) ice_release_rx_desc(rx_ring, ntu); } - return ret; + return ok; } /** diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 5d87957b2627..44111f65afc7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2007 - 2018 Intel Corporation. */ -#ifndef _E1000_HW_H_ -#define _E1000_HW_H_ +#ifndef _E1000_IGB_HW_H_ +#define _E1000_IGB_HW_H_ #include <linux/types.h> #include <linux/delay.h> @@ -551,4 +551,4 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); -#endif /* _E1000_HW_H_ */ +#endif /* _E1000_IGB_HW_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index aaa954aae574..7bda8c5edea5 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -748,8 +748,8 @@ void igb_ptp_suspend(struct igb_adapter *adapter); void igb_ptp_rx_hang(struct igb_adapter *adapter); void igb_ptp_tx_hang(struct igb_adapter *adapter); void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); -void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, - struct sk_buff *skb); +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + struct sk_buff *skb); int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 878b31d534ec..a45cd2b416c8 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -8214,7 +8214,8 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, new_buff->pagecnt_bias = old_buff->pagecnt_bias; } -static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, + int rx_buf_pgcnt) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; @@ -8225,7 +8226,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) return false; #else #define IGB_LAST_OFFSET \ @@ -8301,9 +8302,10 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, return NULL; if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb); - xdp->data += IGB_TS_HDR_LEN; - size -= IGB_TS_HDR_LEN; + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) { + xdp->data += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + } } /* Determine available headroom for copy */ @@ -8364,8 +8366,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, /* pull timestamp out of packet data */ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); - __skb_pull(skb, IGB_TS_HDR_LEN); + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb)) + __skb_pull(skb, IGB_TS_HDR_LEN); } /* update buffer offset */ @@ -8614,11 +8616,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring) } static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, - const unsigned int size) + const unsigned int size, int *rx_buf_pgcnt) { struct igb_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buf_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif prefetchw(rx_buffer->page); /* we are reusing so sync this buffer for CPU use */ @@ -8634,9 +8642,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, } static void igb_put_rx_buffer(struct igb_ring *rx_ring, - struct igb_rx_buffer *rx_buffer) + struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt) { - if (igb_can_reuse_rx_page(rx_buffer)) { + if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) { /* hand second half of page back to the ring */ igb_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -8664,6 +8672,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) unsigned int xdp_xmit = 0; struct xdp_buff xdp; u32 frame_sz = 0; + int rx_buf_pgcnt; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) @@ -8693,7 +8702,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) */ dma_rmb(); - rx_buffer = igb_get_rx_buffer(rx_ring, size); + rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); /* retrieve a buffer from the ring */ if (!skb) { @@ -8736,7 +8745,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) break; } - igb_put_rx_buffer(rx_ring, rx_buffer); + igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt); cleaned_count++; /* fetch next buffer in frame if non-eop */ diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 7cc5428c3b3d..86a576201f5f 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -856,6 +856,9 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) dev_kfree_skb_any(skb); } +#define IGB_RET_PTP_DISABLED 1 +#define IGB_RET_PTP_INVALID 2 + /** * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp * @q_vector: Pointer to interrupt specific structure @@ -864,19 +867,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) * * This function is meant to retrieve a timestamp from the first buffer of an * incoming frame. The value is stored in little endian format starting on - * byte 8. + * byte 8 + * + * Returns: 0 if success, nonzero if failure **/ -void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, - struct sk_buff *skb) +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + struct sk_buff *skb) { - __le64 *regval = (__le64 *)va; struct igb_adapter *adapter = q_vector->adapter; + __le64 *regval = (__le64 *)va; int adjust = 0; + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return IGB_RET_PTP_DISABLED; + /* The timestamp is recorded in little endian format. * DWORD: 0 1 2 3 * Field: Reserved Reserved SYSTIML SYSTIMH */ + + /* check reserved dwords are zero, be/le doesn't matter for zero */ + if (regval[0]) + return IGB_RET_PTP_INVALID; + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), le64_to_cpu(regval[1])); @@ -896,6 +909,8 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, } skb_hwtstamps(skb)->hwtstamp = ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + + return 0; } /** @@ -906,13 +921,15 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, * This function is meant to retrieve a timestamp from the internal registers * of the adapter and store it in the skb. **/ -void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, - struct sk_buff *skb) +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; - u64 regval; int adjust = 0; + u64 regval; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return; /* If this bit is set, then the RX registers contain the time stamp. No * other packet will be time stamped until we read these registers, so diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 5d2809dfd06a..1b08a7dc7bc4 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -547,7 +547,7 @@ void igc_ptp_init(struct igc_adapter *adapter); void igc_ptp_reset(struct igc_adapter *adapter); void igc_ptp_suspend(struct igc_adapter *adapter); void igc_ptp_stop(struct igc_adapter *adapter); -void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, +void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, struct sk_buff *skb); int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 824a6c454bca..8722294ab90c 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1711,6 +1711,9 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev, Autoneg); } + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + switch (hw->fc.requested_mode) { case igc_fc_full: ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); @@ -1725,9 +1728,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev, Asym_Pause); break; default: - ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Asym_Pause); + break; } status = pm_runtime_suspended(&adapter->pdev->dev) ? diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 7ac9597ddb84..4d989ebc9713 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3831,10 +3831,19 @@ static void igc_reset_task(struct work_struct *work) adapter = container_of(work, struct igc_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGC_DOWN, &adapter->state) || + test_bit(__IGC_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igc_rings_dump(adapter); igc_regs_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igc_reinit_locked(adapter); + rtnl_unlock(); } /** diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index ac0b9c85da7c..545f4d0e67cf 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -152,46 +152,54 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, } /** - * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer * @q_vector: Pointer to interrupt specific structure * @va: Pointer to address containing Rx buffer * @skb: Buffer containing timestamp and packet * - * This function is meant to retrieve the first timestamp from the - * first buffer of an incoming frame. The value is stored in little - * endian format starting on byte 0. There's a second timestamp - * starting on byte 8. - **/ -void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, + * This function retrieves the timestamp saved in the beginning of packet + * buffer. While two timestamps are available, one in timer0 reference and the + * other in timer1 reference, this function considers only the timestamp in + * timer0 reference. + */ +void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, struct sk_buff *skb) { struct igc_adapter *adapter = q_vector->adapter; - __le64 *regval = (__le64 *)va; - int adjust = 0; - - /* The timestamp is recorded in little endian format. - * DWORD: | 0 | 1 | 2 | 3 - * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High + u64 regval; + int adjust; + + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. */ - igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), - le64_to_cpu(regval[0])); - - /* adjust timestamp for the RX latency based on link speed */ - if (adapter->hw.mac.type == igc_i225) { - switch (adapter->link_speed) { - case SPEED_10: - adjust = IGC_I225_RX_LATENCY_10; - break; - case SPEED_100: - adjust = IGC_I225_RX_LATENCY_100; - break; - case SPEED_1000: - adjust = IGC_I225_RX_LATENCY_1000; - break; - case SPEED_2500: - adjust = IGC_I225_RX_LATENCY_2500; - break; - } + regval = le32_to_cpu(va[2]); + regval |= (u64)le32_to_cpu(va[3]) << 32; + igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + + /* Adjust timestamp for the RX latency based on link speed */ + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_RX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_RX_LATENCY_2500; + break; + default: + adjust = 0; + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); + break; } skb_hwtstamps(skb)->hwtstamp = ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index eca73526ac86..54d47265a7ac 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -575,6 +575,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } + if (xs->props.mode != XFRM_MODE_TRANSPORT) { + netdev_err(dev, "Unsupported mode for ipsec offload\n"); + return -EINVAL; + } + if (ixgbe_ipsec_check_mgmt_ip(xs)) { netdev_err(dev, "IPsec IP addr clash with mgmt filters\n"); return -EINVAL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index fae84202d870..03d9aad516d4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4118,6 +4118,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, #endif } + ring->rx_offset = ixgbe_rx_offset(ring); + if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); @@ -6578,7 +6580,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - rx_ring->rx_offset = ixgbe_rx_offset(rx_ring); /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, @@ -9565,8 +9566,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, input->sw_idx, queue); - if (!err) - ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + if (err) + goto err_out_w_lock; + + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock); if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index 5170dd9d8705..caaea2c920a6 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -272,6 +272,11 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } + if (xs->props.mode != XFRM_MODE_TRANSPORT) { + netdev_err(dev, "Unsupported mode for ipsec offload\n"); + return -EINVAL; + } + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { struct rx_sa rsa; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 7fe15a3286f4..fe0989c0fc25 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_MARVELL bool "Marvell devices" default y - depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET || COMPILE_TEST + depends on PCI || CPU_PXA168 || PPC32 || PLAT_ORION || INET || COMPILE_TEST help If you have a network (Ethernet) card belonging to this class, say Y. @@ -19,7 +19,7 @@ if NET_VENDOR_MARVELL config MV643XX_ETH tristate "Marvell Discovery (643XX) and Orion ethernet support" - depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST + depends on PPC32 || PLAT_ORION || COMPILE_TEST depends on INET select PHYLIB select MVMDIO diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 90e6111ce534..3bfb659b5c99 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2684,7 +2684,7 @@ static const struct of_device_id mv643xx_eth_shared_ids[] = { MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); #endif -#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60) +#ifdef CONFIG_OF_IRQ #define mv643xx_eth_property(_np, _name, _v) \ do { \ u32 tmp; \ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 9caa375d01b1..68deae529bc9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -56,7 +56,9 @@ static bool is_dev_rpm(void *cgxd) bool is_lmac_valid(struct cgx *cgx, int lmac_id) { - return cgx && test_bit(lmac_id, &cgx->lmac_bmap); + if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX) + return false; + return test_bit(lmac_id, &cgx->lmac_bmap); } struct mac_ops *get_mac_ops(void *cgxd) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index b192692b4fc4..5c372d2c24a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -13499,8 +13499,6 @@ static struct npc_mcam_kex npc_mkex_default = { [NPC_LT_LC_IP] = { /* SIP+DIP: 8 bytes, KW2[63:0] */ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10), - /* TOS: 1 byte, KW1[63:56] */ - KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf), }, /* Layer C: IPv6 */ [NPC_LT_LC_IP6] = { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index d9a1a71c7ccc..ab24a5e8ee8a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2462,8 +2462,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu) INTR_MASK(rvu->hw->total_pfs) & ~1ULL); for (irq = 0; irq < rvu->num_vec; irq++) { - if (rvu->irq_allocated[irq]) + if (rvu->irq_allocated[irq]) { free_irq(pci_irq_vector(rvu->pdev, irq), rvu); + rvu->irq_allocated[irq] = false; + } } pci_free_irq_vectors(rvu->pdev); @@ -2975,8 +2977,8 @@ static void rvu_remove(struct pci_dev *pdev) struct rvu *rvu = pci_get_drvdata(pdev); rvu_dbg_exit(rvu); - rvu_unregister_interrupts(rvu); rvu_unregister_dl(rvu); + rvu_unregister_interrupts(rvu); rvu_flr_wq_destroy(rvu); rvu_cgx_exit(rvu); rvu_fwdata_exit(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index fa6e46e36ae4..76f399229ddb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -678,6 +678,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, u8 *intf, u8 *ena); bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); +void *rvu_first_cgx_pdata(struct rvu *rvu); /* CPT APIs */ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index e668e482383a..6e2bf4fcd29c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -89,6 +89,21 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) return rvu->cgx_idmap[cgx_id]; } +/* Return first enabled CGX instance if none are enabled then return NULL */ +void *rvu_first_cgx_pdata(struct rvu *rvu) +{ + int first_enabled_cgx = 0; + void *cgxd = NULL; + + for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) { + cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu); + if (cgxd) + break; + } + + return cgxd; +} + /* Based on P2X connectivity find mapped NIX block for a PF */ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, int cgx_id, int lmac_id) @@ -711,10 +726,9 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, u32 rvu_cgx_get_fifolen(struct rvu *rvu) { struct mac_ops *mac_ops; - int rvu_def_cgx_id = 0; u32 fifo_len; - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); fifo_len = mac_ops ? mac_ops->fifo_len : 0; return fifo_len; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index aa2ca8780b9c..de3968d2e5ce 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -234,12 +234,14 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - int index, off = 0, flag = 0, go_back = 0, off_prev; + int index, off = 0, flag = 0, go_back = 0, len = 0; struct rvu *rvu = filp->private_data; int lf, pf, vf, pcifunc; struct rvu_block block; int bytes_not_copied; + int lf_str_size = 12; int buf_size = 2048; + char *lfs; char *buf; /* don't allow partial reads */ @@ -249,12 +251,20 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOSPC; - off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t"); + + lfs = kzalloc(lf_str_size, GFP_KERNEL); + if (!lfs) { + kfree(buf); + return -ENOMEM; + } + off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, + "pcifunc"); for (index = 0; index < BLK_COUNT; index++) - if (strlen(rvu->hw->block[index].name)) - off += scnprintf(&buf[off], buf_size - 1 - off, - "%*s\t", (index - 1) * 2, - rvu->hw->block[index].name); + if (strlen(rvu->hw->block[index].name)) { + off += scnprintf(&buf[off], buf_size - 1 - off, + "%-*s", lf_str_size, + rvu->hw->block[index].name); + } off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { @@ -263,14 +273,15 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, continue; if (vf) { + sprintf(lfs, "PF%d:VF%d", pf, vf - 1); go_back = scnprintf(&buf[off], buf_size - 1 - off, - "PF%d:VF%d\t\t", pf, - vf - 1); + "%-*s", lf_str_size, lfs); } else { + sprintf(lfs, "PF%d", pf); go_back = scnprintf(&buf[off], buf_size - 1 - off, - "PF%d\t\t", pf); + "%-*s", lf_str_size, lfs); } off += go_back; @@ -278,20 +289,22 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, block = rvu->hw->block[index]; if (!strlen(block.name)) continue; - off_prev = off; + len = 0; + lfs[len] = '\0'; for (lf = 0; lf < block.lf.max; lf++) { if (block.fn_map[lf] != pcifunc) continue; flag = 1; - off += scnprintf(&buf[off], buf_size - 1 - - off, "%3d,", lf); + len += sprintf(&lfs[len], "%d,", lf); } - if (flag && off_prev != off) - off--; - else - go_back++; + + if (flag) + len--; + lfs[len] = '\0'; off += scnprintf(&buf[off], buf_size - 1 - off, - "\t"); + "%-*s", lf_str_size, lfs); + if (!strlen(lfs)) + go_back += lf_str_size; } if (!flag) off -= go_back; @@ -303,6 +316,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, } bytes_not_copied = copy_to_user(buffer, buf, off); + kfree(lfs); kfree(buf); if (bytes_not_copied) @@ -319,7 +333,6 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) struct rvu *rvu = filp->private; struct pci_dev *pdev = NULL; struct mac_ops *mac_ops; - int rvu_def_cgx_id = 0; char cgx[10], lmac[10]; struct rvu_pfvf *pfvf; int pf, domain, blkid; @@ -327,7 +340,10 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) u16 pcifunc; domain = 2; - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); + /* There can be no CGX devices at all */ + if (!mac_ops) + return 0; seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", mac_ops->name); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { @@ -1818,7 +1834,6 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) { struct mac_ops *mac_ops; unsigned long lmac_bmap; - int rvu_def_cgx_id = 0; int i, lmac_id; char dname[20]; void *cgx; @@ -1826,7 +1841,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) if (!cgx_get_cgxcnt_max()) return; - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); if (!mac_ops) return; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index d3000194e2d3..3d068b7d46bd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -2629,7 +2629,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) struct nix_rx_flowkey_alg *field; struct nix_rx_flowkey_alg tmp; u32 key_type, valid_key; - int l4_key_offset; + int l4_key_offset = 0; if (!alg) return -EINVAL; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 04bb0803a5c5..0bd49c7080a6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -2490,10 +2490,10 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); if (index >= mcam->bmap_entries) break; + entry = index + 1; if (mcam->entry2cntr_map[index] != req->cntr) continue; - entry = index + 1; npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, index, req->cntr); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 0dbbf38e0597..dc1778420978 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -257,17 +257,19 @@ int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 *rule_locs) { + u32 rule_cnt = nfc->rule_cnt; u32 location = 0; int idx = 0; int err = 0; nfc->data = pfvf->flow_cfg->ntuple_max_flows; - while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) { + while ((!err || err == -ENOENT) && idx < rule_cnt) { err = otx2_get_flow(pfvf, nfc, location); if (!err) rule_locs[idx++] = location; location++; } + nfc->rule_cnt = rule_cnt; return err; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 53ab1814d74b..2fd3d235d292 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1672,6 +1672,7 @@ int otx2_stop(struct net_device *netdev) struct otx2_nic *pf = netdev_priv(netdev); struct otx2_cq_poll *cq_poll = NULL; struct otx2_qset *qset = &pf->qset; + struct otx2_rss_info *rss; int qidx, vec, wrk; netif_carrier_off(netdev); @@ -1684,6 +1685,10 @@ int otx2_stop(struct net_device *netdev) /* First stop packet Rx/Tx */ otx2_rxtx_enable(pf, false); + /* Clear RSS enable flag */ + rss = &pf->hw.rss_info; + rss->enable = false; + /* Cleanup Queue IRQ */ vec = pci_irq_vector(pf->pdev, pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index d1e4d42e497d..3712e1786091 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1544,8 +1544,8 @@ static int pxa168_eth_remove(struct platform_device *pdev) clk_disable_unprepare(pep->clk); mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); - unregister_netdev(dev); cancel_work_sync(&pep->tx_timeout_task); + unregister_netdev(dev); free_netdev(dev); return 0; } diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index a8641a407c06..96d2891f1675 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1225,8 +1225,6 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv) goto push_new_skb; } - desc_data.dma_addr = new_dma_addr; - /* We can't fail anymore at this point: it's safe to unmap the skb. */ mtk_star_dma_unmap_rx(priv, &desc_data); @@ -1236,6 +1234,9 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv) desc_data.skb->dev = ndev; netif_receive_skb(desc_data.skb); + /* update dma_addr for new skb */ + desc_data.dma_addr = new_dma_addr; + push_new_skb: desc_data.len = skb_tailroom(new_skb); desc_data.skb = new_skb; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 23849f2b9c25..1434df66fcf2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -47,7 +47,7 @@ #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff) #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff) -static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) +int mlx4_en_moderation_update(struct mlx4_en_priv *priv) { int i, t; int err = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 51b9700fce83..5d0c9c62382d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3554,6 +3554,8 @@ int mlx4_en_reset_config(struct net_device *dev, en_err(priv, "Failed starting port\n"); } + if (!err) + err = mlx4_en_moderation_update(priv); out: mutex_unlock(&mdev->state_lock); kfree(tmp); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index e8ed23190de0..f3d1a20201ef 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -775,6 +775,7 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); #define DEV_FEATURE_CHANGED(dev, new_features, feature) \ ((dev->features & feature) ^ (new_features & feature)) +int mlx4_en_moderation_update(struct mlx4_en_priv *priv); int mlx4_en_reset_config(struct net_device *dev, struct hwtstamp_config ts_config, netdev_features_t new_features); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7435fe6829b6..304b296fe8b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -92,14 +92,15 @@ struct page_pool; MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) -#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) +#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8)) +#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2) +#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts))) /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between * WQEs, This page will absorb write overflow by the hardware, when * receiving packets larger than MTU. These oversize packets are * dropped by the driver at a later stage. */ -#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8)) -#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS)) +#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1)) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) #define MLX5E_MAX_RQ_NUM_MTTS \ ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index f3f6eb081948..b2cd29847a37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1181,7 +1181,8 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec) mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG, &ctstate, &ctstate_mask); - if (ctstate_mask) + + if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT) return -EOPNOTSUPP; ctstate_mask |= MLX5_CT_STATE_TRK_BIT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index f8075a604605..172e0474f2e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -685,14 +685,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, u16 vport_num; int err = 0; - if (flow_attr->ip_version == 4) { + if (flow_attr->tun_ip_version == 4) { /* Addresses are swapped for decap */ attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4; attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4; err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr); } #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) - else if (flow_attr->ip_version == 6) { + else if (flow_attr->tun_ip_version == 6) { /* Addresses are swapped for decap */ attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6; attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6; @@ -718,10 +718,10 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, esw_attr->rx_tun_attr->decap_vport = vport_num; out: - if (flow_attr->ip_version == 4) + if (flow_attr->tun_ip_version == 4) mlx5e_route_lookup_ipv4_put(&attr); #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) - else if (flow_attr->ip_version == 6) + else if (flow_attr->tun_ip_version == 6) mlx5e_route_lookup_ipv6_put(&attr); #endif return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 6a116335bb21..7f7b0f6dcdf9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -89,6 +89,7 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, * required to establish routing. */ flow_flag_set(flow, TUN_RX); + flow->attr->tun_ip_version = ip_version; return 0; } @@ -1091,7 +1092,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv, if (err || !esw_attr->rx_tun_attr->decap_vport) goto out; - key.ip_version = attr->ip_version; + key.ip_version = attr->tun_ip_version; if (key.ip_version == 4) key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4; else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c index e472ed0eacfb..7ed3f9f79f11 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c @@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv, option_key = (struct geneve_opt *)&enc_opts.key->data[0]; option_mask = (struct geneve_opt *)&enc_opts.mask->data[0]; + if (option_mask->opt_class == 0 && option_mask->type == 0 && + !memchr_inv(option_mask->opt_data, 0, option_mask->length * 4)) + return 0; + if (option_key->length > max_tlv_option_data_len) { NL_SET_ERR_MSG_MOD(extack, "Matching on GENEVE options: unsupported option len"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index abdf721bb264..f5f2a8fd0046 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1887,6 +1887,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + int err; if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; @@ -1896,7 +1897,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, return -EINVAL; } - mlx5e_modify_rx_cqe_compression_locked(priv, enable); + err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); + if (err) + return err; + priv->channels.params.rx_cqe_compress_def = enable; return 0; @@ -2014,8 +2018,13 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) */ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + struct mlx5e_params old_params; + + old_params = priv->channels.params; priv->channels.params = new_channels.params; err = mlx5e_num_channels_changed(priv); + if (err) + priv->channels.params = old_params; goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ec2fcb2a2977..158f947a8503 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -334,9 +334,9 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq rq->wqe_overflow.addr); } -static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) +static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix) { - return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; + return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT; } static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) @@ -577,7 +577,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); u32 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; - u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); + u64 dma_offset = mlx5e_get_mpwqe_offset(i); wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); wqe->data[0].byte_count = cpu_to_be32(byte_count); @@ -2368,8 +2368,9 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, { switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return order_base_2(MLX5E_UMR_WQEBBS) + - mlx5e_get_rq_log_wq_sz(rqp->rqc); + return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, + order_base_2(MLX5E_UMR_WQEBBS) + + mlx5e_get_rq_log_wq_sz(rqp->rqc)); default: /* MLX5_WQ_TYPE_CYCLIC */ return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; } @@ -2502,8 +2503,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) { int i; - if (chs->port_ptp) + if (chs->port_ptp) { mlx5e_port_ptp_close(chs->port_ptp); + chs->port_ptp = NULL; + } for (i = 0; i < chs->num; i++) mlx5e_close_channel(chs->c[i]); @@ -3815,6 +3818,15 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) s->tx_dropped += sq_stats->dropped; } } + if (priv->port_ptp_opened) { + for (i = 0; i < priv->max_opened_tc; i++) { + struct mlx5e_sq_stats *sq_stats = &priv->port_ptp_stats.sq[i]; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_dropped += sq_stats->dropped; + } + } } void @@ -3834,10 +3846,17 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) } if (mlx5e_is_uplink_rep(priv)) { + struct mlx5e_vport_stats *vstats = &priv->stats.vport; + stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); + + /* vport multicast also counts packets that are dropped due to steering + * or rx out of buffer + */ + stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); } else { mlx5e_fold_sw_stats64(priv, stats); } @@ -4683,8 +4702,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) struct mlx5e_channel *c = priv->channels.c[i]; mlx5e_rq_replace_xdp_prog(&c->rq, prog); - if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) { + bpf_prog_inc(prog); mlx5e_rq_replace_xdp_prog(&c->xskrq, prog); + } } unlock: @@ -4958,6 +4979,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 priv->max_nch); params->num_tc = 1; + /* Set an initial non-zero value, so that mlx5e_select_queue won't + * divide by zero if called before first activating channels. + */ + priv->num_tc_x_num_ch = params->num_channels * params->num_tc; + /* SQ */ params->log_sq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : @@ -5474,8 +5500,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, struct net_device *netdev, struct mlx5_core_dev *mdev) { - memset(priv, 0, sizeof(*priv)); - /* priv init */ priv->mdev = mdev; priv->netdev = netdev; @@ -5508,12 +5532,18 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) { int i; + /* bail if change profile failed and also rollback failed */ + if (!priv->mdev) + return; + destroy_workqueue(priv->wq); free_cpumask_var(priv->scratchpad.cpumask); for (i = 0; i < priv->htb.max_qos_sqs; i++) kfree(priv->htb.qos_sq_stats[i]); kvfree(priv->htb.qos_sq_stats); + + memset(priv, 0, sizeof(*priv)); } struct net_device * @@ -5630,11 +5660,10 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) } static int -mlx5e_netdev_attach_profile(struct mlx5e_priv *priv, +mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, const struct mlx5e_profile *new_profile, void *new_ppriv) { - struct net_device *netdev = priv->netdev; - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_priv *priv = netdev_priv(netdev); int err; err = mlx5e_priv_init(priv, netdev, mdev); @@ -5647,10 +5676,16 @@ mlx5e_netdev_attach_profile(struct mlx5e_priv *priv, priv->ppriv = new_ppriv; err = new_profile->init(priv->mdev, priv->netdev); if (err) - return err; + goto priv_cleanup; err = mlx5e_attach_netdev(priv); if (err) - new_profile->cleanup(priv); + goto profile_cleanup; + return err; + +profile_cleanup: + new_profile->cleanup(priv); +priv_cleanup: + mlx5e_priv_cleanup(priv); return err; } @@ -5659,13 +5694,14 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, { unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile); const struct mlx5e_profile *orig_profile = priv->profile; + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; void *orig_ppriv = priv->ppriv; int err, rollback_err; /* sanity */ if (new_max_nch != priv->max_nch) { - netdev_warn(priv->netdev, - "%s: Replacing profile with different max channels\n", + netdev_warn(netdev, "%s: Replacing profile with different max channels\n", __func__); return -EINVAL; } @@ -5675,22 +5711,19 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, priv->profile->cleanup(priv); mlx5e_priv_cleanup(priv); - err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv); + err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv); if (err) { /* roll back to original profile */ - netdev_warn(priv->netdev, "%s: new profile init failed, %d\n", - __func__, err); + netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err); goto rollback; } return 0; rollback: - rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv); - if (rollback_err) { - netdev_err(priv->netdev, - "%s: failed to rollback to orig profile, %d\n", + rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv); + if (rollback_err) + netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n", __func__, rollback_err); - } return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1b6ad94ebb10..249d8905e644 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -500,7 +500,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) struct mlx5e_icosq *sq = rq->icosq; struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; - u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); u16 pi; int err; int i; @@ -531,7 +530,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) umr_wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); - umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); + umr_wqe->uctrl.xlt_offset = + cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix))); sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0da69b98f38f..df2a0af854bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2296,6 +2296,16 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, *match_level = MLX5_MATCH_L4; } + /* Currenlty supported only for MPLS over UDP */ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && + !netif_is_bareudp(filter_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on MPLS is supported only for MPLS over UDP"); + netdev_err(priv->netdev, + "Matching on MPLS is supported only for MPLS over UDP\n"); + return -EOPNOTSUPP; + } + return 0; } @@ -2899,6 +2909,37 @@ static int is_action_keys_supported(const struct flow_action_entry *act, return 0; } +static bool modify_tuple_supported(bool modify_tuple, bool ct_clear, + bool ct_flow, struct netlink_ext_ack *extack, + struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec) +{ + if (!modify_tuple || ct_clear) + return true; + + if (ct_flow) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload tuple modification with non-clear ct()"); + netdev_info(priv->netdev, + "can't offload tuple modification with non-clear ct()"); + return false; + } + + /* Add ct_state=-trk match so it will be offloaded for non ct flows + * (or after clear action), as otherwise, since the tuple is changed, + * we can't restore ct state + */ + if (mlx5_tc_ct_add_no_trk_match(spec)) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload tuple modification with ct matches and no ct(clear) action"); + netdev_info(priv->netdev, + "can't offload tuple modification with ct matches and no ct(clear) action"); + return false; + } + + return true; +} + static bool modify_header_match_supported(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_action *flow_action, @@ -2937,18 +2978,9 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, return err; } - /* Add ct_state=-trk match so it will be offloaded for non ct flows - * (or after clear action), as otherwise, since the tuple is changed, - * we can't restore ct state - */ - if (!ct_clear && modify_tuple && - mlx5_tc_ct_add_no_trk_match(spec)) { - NL_SET_ERR_MSG_MOD(extack, - "can't offload tuple modify header with ct matches"); - netdev_info(priv->netdev, - "can't offload tuple modify header with ct matches"); + if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack, + priv, spec)) return false; - } ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); if (modify_ip_header && ip_proto != IPPROTO_TCP && @@ -4445,7 +4477,8 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate, */ if (rate) { rate = (rate * BITS_PER_BYTE) + 500000; - rate_mbps = max_t(u64, do_div(rate, 1000000), 1); + do_div(rate, 1000000); + rate_mbps = max_t(u32, rate, 1); } err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 89003ae7775a..25c091795bcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -79,6 +79,7 @@ struct mlx5_flow_attr { u8 inner_match_level; u8 outer_match_level; u8 ip_version; + u8 tun_ip_version; u32 flags; union { struct mlx5_esw_flow_attr esw_attr[0]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 94cb0217b4f3..8694b83968b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -551,7 +551,8 @@ esw_setup_dests(struct mlx5_flow_destination *dest, if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) && - mlx5_eswitch_vport_match_metadata_enabled(esw)) + mlx5_eswitch_vport_match_metadata_enabled(esw) && + MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE; if (attr->dest_ft) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 80da50e12915..bd66ab2af5b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -575,6 +575,7 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size)); MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev)); MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma); if (MLX5_CAP_GEN(mdev, cqe_version) == 1) MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 1eeca45cfcdf..6f7cef47e04c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -233,6 +233,7 @@ int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) } qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev)); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, @@ -694,6 +695,7 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) static void mlx5_rdma_netdev_free(struct net_device *netdev) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5i_priv *ipriv = priv->ppriv; const struct mlx5e_profile *profile = priv->profile; @@ -702,7 +704,7 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev) if (!ipriv->sub_interface) { mlx5i_pkey_qpn_ht_cleanup(netdev); - mlx5e_destroy_mdev_resources(priv->mdev); + mlx5e_destroy_mdev_resources(mdev); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index b0e129d0f6d8..1e7f26b240de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -495,15 +495,15 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, return -EINVAL; field_select = MLX5_MTPPS_FS_ENABLE; + pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index); + if (pin < 0) + return -EBUSY; + if (on) { bool rt_mode = mlx5_real_time_mode(mdev); u32 nsec; s64 sec; - pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index); - if (pin < 0) - return -EBUSY; - pin_mode = MLX5_PIN_MODE_OUT; pattern = MLX5_OUT_PATTERN_PERIODIC; ts.tv_sec = rq->perout.period.sec; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index b265f27b2166..90b524c59f3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -181,15 +181,13 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table) u16 max_functions; u16 function_id; int err = 0; - bool ecpu; int i; max_functions = mlx5_sf_max_functions(dev); function_id = MLX5_CAP_GEN(dev, sf_base_id); - ecpu = mlx5_read_embedded_cpu(dev); /* Arm the vhca context as the vhca event notifier */ for (i = 0; i < max_functions; i++) { - err = mlx5_vhca_event_arm(dev, function_id, ecpu); + err = mlx5_vhca_event_arm(dev, function_id); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c index 58b6be0b03d7..a5a0f60bef66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c @@ -6,7 +6,7 @@ #include "sf.h" #include "mlx5_ifc_vhca_event.h" #include "vhca_event.h" -#include "ecpf.h" +#include "mlx5_core.h" struct mlx5_sf_hw { u32 usr_sfnum; @@ -18,7 +18,6 @@ struct mlx5_sf_hw_table { struct mlx5_core_dev *dev; struct mlx5_sf_hw *sfs; int max_local_functions; - u8 ecpu: 1; struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */ struct notifier_block vhca_nb; }; @@ -64,7 +63,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) } if (sw_id == -ENOSPC) { err = -ENOSPC; - goto err; + goto exist_err; } hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id); @@ -72,7 +71,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) if (err) goto err; - err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum); + err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum); if (err) goto vhca_err; @@ -118,7 +117,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id) hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id); mutex_lock(&table->table_lock); - err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out)); + err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out)); if (err) goto err; state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); @@ -164,7 +163,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) table->dev = dev; table->sfs = sfs; table->max_local_functions = max_functions; - table->ecpu = mlx5_read_embedded_cpu(dev); dev->priv.sf_hw_table = table; mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h index 1daf5a122ba3..4fc870140d71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h @@ -20,7 +20,7 @@ struct mlx5_ifc_vhca_state_context_bits { u8 sw_function_id[0x20]; - u8 reserved_at_40[0x80]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_vhca_state_out_bits { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c index af2f2dd9db25..28b14b05086f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c @@ -19,52 +19,51 @@ struct mlx5_vhca_event_work { struct mlx5_vhca_state_event event; }; -int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, - bool ecpu, u32 *out, u32 outlen) +int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen) { u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {}; MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE); MLX5_SET(query_vhca_state_in, in, function_id, function_id); - MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu); + MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, 0); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id, - bool ecpu, u32 *in, u32 inlen) + u32 *in, u32 inlen) { u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); MLX5_SET(modify_vhca_state_in, in, function_id, function_id); - MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } -int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id) +int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id) { u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); MLX5_SET(modify_vhca_state_in, in, function_id, function_id); - MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0); MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1); MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id); return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out); } -int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu) +int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id) { u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1); MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1); - return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in)); + return mlx5_cmd_modify_vhca_state(dev, function_id, in, sizeof(in)); } static void @@ -73,7 +72,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event * u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; int err; - err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out)); + err = mlx5_cmd_query_vhca_state(dev, event->function_id, out, sizeof(out)); if (err) return; @@ -82,7 +81,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event * event->new_vhca_state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); - mlx5_vhca_event_arm(dev, event->function_id, event->ecpu); + mlx5_vhca_event_arm(dev, event->function_id); blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event); } @@ -94,6 +93,7 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work) struct mlx5_core_dev *dev = notifier->dev; mlx5_vhca_event_notify(dev, &work->event); + kfree(work); } static int @@ -110,7 +110,6 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v INIT_WORK(&work->work, &mlx5_vhca_state_work_handler); work->notifier = notifier; work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id); - work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function); mlx5_events_work_enqueue(notifier->dev, &work->work); return NOTIFY_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h index 1fe1ec6f4d4b..013cdfe90616 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h @@ -10,7 +10,6 @@ struct mlx5_vhca_state_event { u16 function_id; u16 sw_function_id; u8 new_vhca_state; - bool ecpu; }; static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev) @@ -25,10 +24,10 @@ void mlx5_vhca_event_start(struct mlx5_core_dev *dev); void mlx5_vhca_event_stop(struct mlx5_core_dev *dev); int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); -int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id); -int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu); +int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id); +int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id); int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, - bool ecpu, u32 *out, u32 outlen); + u32 *out, u32 outlen); #else static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 83c4c877d558..8a6a56f9dc4e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -169,6 +169,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt)); MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt)); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev)); MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma); if (MLX5_CAP_GEN(mdev, cqe_version) == 1) MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 4088d6e51508..9143ec326ebf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -264,8 +264,8 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) { u64 index = - (MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | - MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26); + ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | + ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26); return index << 6; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 16e2df6ef2f4..c4adc7f740d3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4430,6 +4430,7 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32); #define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20) #define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21) #define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22) +#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23) #define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27) #define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28) #define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index bd7f873f6290..0bd64169bf81 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1169,6 +1169,11 @@ static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, .speed = SPEED_100000, }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, + .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + .speed = SPEED_100000, + }, }; #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 9ce90841f92d..eda99d82766a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5951,6 +5951,10 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp->router->aborted) return 0; + if (fen_info->fi->nh && + !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id)) + return 0; + fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id, &fen_info->dst, sizeof(fen_info->dst), fen_info->dst_len, @@ -6601,6 +6605,9 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp_fib6_rt_should_ignore(rt)) return 0; + if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id)) + return 0; + fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id, &rt->fib6_dst.addr, sizeof(rt->fib6_dst.addr), diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 40e2e79d4517..131b2a53d261 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -613,7 +613,8 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = { { .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | - MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, .speed = 100000, }, }; diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index dbdfabff3b00..1c3e204d727c 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -2040,7 +2040,7 @@ lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length) dev_kfree_skb_irq(skb); return NULL; } - frame_length = max_t(int, 0, frame_length - RX_HEAD_PADDING - 2); + frame_length = max_t(int, 0, frame_length - RX_HEAD_PADDING - 4); if (skb->len > frame_length) { skb->tail -= skb->len - frame_length; skb->len = frame_length; diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index c0ede0ca7115..05cb040c2677 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -13,6 +13,7 @@ if NET_VENDOR_MICROSEMI # Users should depend on NET_SWITCHDEV, HAS_IOMEM config MSCC_OCELOT_SWITCH_LIB + select NET_DEVLINK select REGMAP_MMIO select PACKING select PHYLIB diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index c3ac026f6aea..a41b458b1b3e 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -540,13 +540,14 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, return -EOPNOTSUPP; } + flow_rule_match_ipv4_addrs(rule, &match); + if (filter->block_id == VCAP_IS1 && *(u32 *)&match.mask->dst) { NL_SET_ERR_MSG_MOD(extack, "Key type S1_NORMAL cannot match on destination IP"); return -EOPNOTSUPP; } - flow_rule_match_ipv4_addrs(rule, &match); tmp = &filter->key.ipv4.sip.value.addr[0]; memcpy(tmp, &match.key->src, 4); diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 5defd31d481c..aa06fcb38f8b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -327,8 +327,14 @@ int nfp_compile_flow_metadata(struct nfp_app *app, goto err_free_ctx_entry; } + /* Do net allocate a mask-id for pre_tun_rules. These flows are used to + * configure the pre_tun table and are never actually send to the + * firmware as an add-flow message. This causes the mask-id allocation + * on the firmware to get out of sync if allocated here. + */ new_mask_id = 0; - if (!nfp_check_mask_add(app, nfp_flow->mask_data, + if (!nfp_flow->pre_tun_rule.dev && + !nfp_check_mask_add(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, &nfp_flow->meta.flags, &new_mask_id)) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id"); @@ -359,7 +365,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app, goto err_remove_mask; } - if (!nfp_check_mask_remove(app, nfp_flow->mask_data, + if (!nfp_flow->pre_tun_rule.dev && + !nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, NULL, &new_mask_id)) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id"); @@ -374,8 +381,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app, return 0; err_remove_mask: - nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, - NULL, &new_mask_id); + if (!nfp_flow->pre_tun_rule.dev) + nfp_check_mask_remove(app, nfp_flow->mask_data, + nfp_flow->meta.mask_len, + NULL, &new_mask_id); err_remove_rhash: WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table, &ctx_entry->ht_node, @@ -406,9 +415,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app, __nfp_modify_flow_metadata(priv, nfp_flow); - nfp_check_mask_remove(app, nfp_flow->mask_data, - nfp_flow->meta.mask_len, &nfp_flow->meta.flags, - &new_mask_id); + if (!nfp_flow->pre_tun_rule.dev) + nfp_check_mask_remove(app, nfp_flow->mask_data, + nfp_flow->meta.mask_len, &nfp_flow->meta.flags, + &new_mask_id); /* Update flow payload with mask ids. */ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 1c59aff2163c..d72225d64a75 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1142,6 +1142,12 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, return -EOPNOTSUPP; } + if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && + !(key_layer & NFP_FLOWER_LAYER_IPV6)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present"); + return -EOPNOTSUPP; + } + /* Skip fields known to exist. */ mask += sizeof(struct nfp_flower_meta_tci); ext += sizeof(struct nfp_flower_meta_tci); @@ -1152,6 +1158,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, mask += sizeof(struct nfp_flower_in_port); ext += sizeof(struct nfp_flower_in_port); + /* Ensure destination MAC address matches pre_tun_dev. */ + mac = (struct nfp_flower_mac_mpls *)ext; + if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); + return -EOPNOTSUPP; + } + /* Ensure destination MAC address is fully matched. */ mac = (struct nfp_flower_mac_mpls *)mask; if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { @@ -1159,6 +1172,11 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, return -EOPNOTSUPP; } + if (mac->mpls_lse) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported"); + return -EOPNOTSUPP; + } + mask += sizeof(struct nfp_flower_mac_mpls); ext += sizeof(struct nfp_flower_mac_mpls); if (key_layer & NFP_FLOWER_LAYER_IPV4 || diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 7248d248f604..d19c02e99114 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -16,8 +16,9 @@ #define NFP_FL_MAX_ROUTES 32 #define NFP_TUN_PRE_TUN_RULE_LIMIT 32 -#define NFP_TUN_PRE_TUN_RULE_DEL 0x1 -#define NFP_TUN_PRE_TUN_IDX_BIT 0x8 +#define NFP_TUN_PRE_TUN_RULE_DEL BIT(0) +#define NFP_TUN_PRE_TUN_IDX_BIT BIT(3) +#define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7) /** * struct nfp_tun_pre_run_rule - rule matched before decap @@ -1268,6 +1269,7 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, { struct nfp_flower_priv *app_priv = app->priv; struct nfp_tun_offloaded_mac *mac_entry; + struct nfp_flower_meta_tci *key_meta; struct nfp_tun_pre_tun_rule payload; struct net_device *internal_dev; int err; @@ -1290,6 +1292,15 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, if (!mac_entry) return -ENOENT; + /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being + * set/clear for port_idx. + */ + key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data; + if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6) + mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT; + else + mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT; + payload.port_idx = cpu_to_be16(mac_entry->index); /* Copy mac id and vlan to flow - dev may not exist at delete time. */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 162a1ff1e9d2..4087311f7082 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -1079,15 +1079,17 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) { int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems; struct ionic_tx_stats *stats = q_to_tx_stats(q); + int ndescs; int err; - /* If TSO, need roundup(skb->len/mss) descs */ + /* Each desc is mss long max, so a descriptor for each gso_seg */ if (skb_is_gso(skb)) - return (skb->len / skb_shinfo(skb)->gso_size) + 1; + ndescs = skb_shinfo(skb)->gso_segs; + else + ndescs = 1; - /* If non-TSO, just need 1 desc and nr_frags sg elems */ if (skb_shinfo(skb)->nr_frags <= sg_elems) - return 1; + return ndescs; /* Too many frags, so linearize */ err = skb_linearize(skb); @@ -1096,8 +1098,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) stats->linearize++; - /* Need 1 desc and zero sg elems */ - return 1; + return ndescs; } static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 7760a3394e93..7ecb3dfe30bd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1425,6 +1425,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { vfree(fw_dump->tmpl_hdr); + fw_dump->tmpl_hdr = NULL; if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) extended = !qlcnic_83xx_extend_md_capab(adapter); @@ -1443,6 +1444,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) struct qlcnic_83xx_dump_template_hdr *hdr; hdr = fw_dump->tmpl_hdr; + if (!hdr) + return; hdr->drv_cap_mask = 0x1f; fw_dump->cap_mask = 0x1f; dev_info(&pdev->dev, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index f704da3f214c..581a92fc3292 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -767,7 +767,7 @@ static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int typ if (type == ERIAR_OOB && (tp->mac_version == RTL_GIGA_MAC_VER_52 || tp->mac_version == RTL_GIGA_MAC_VER_53)) - *cmd |= 0x7f0 << 18; + *cmd |= 0xf70 << 18; } DECLARE_RTL_COND(rtl_eriar_cond) @@ -4646,6 +4646,9 @@ static void rtl8169_down(struct rtl8169_private *tp) rtl8169_update_counters(tp); + pci_clear_master(tp->pci_dev); + rtl_pci_commit(tp); + rtl8169_cleanup(tp, true); rtl_prepare_power_down(tp); @@ -4653,6 +4656,7 @@ static void rtl8169_down(struct rtl8169_private *tp) static void rtl8169_up(struct rtl8169_private *tp) { + pci_set_master(tp->pci_dev); phy_resume(tp->phydev); rtl8169_init_phy(tp); napi_enable(&tp->napi); @@ -5307,8 +5311,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_hw_reset(tp); - pci_set_master(pdev); - rc = rtl_alloc_irq(tp); if (rc < 0) { dev_err(&pdev->dev, "Can't allocate interrupt\n"); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 590b088bc4c7..f029c7c03804 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -560,6 +560,8 @@ static struct sh_eth_cpu_data r7s72100_data = { EESR_TDE, .fdr_value = 0x0000070f, + .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, + .no_psr = 1, .apr = 1, .mpr = 1, @@ -780,6 +782,8 @@ static struct sh_eth_cpu_data r7s9210_data = { .fdr_value = 0x0000070f, + .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, + .apr = 1, .mpr = 1, .tpauser = 1, @@ -1089,6 +1093,9 @@ static struct sh_eth_cpu_data sh771x_data = { EESIPR_CEEFIP | EESIPR_CELFIP | EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, + + .trscer_err_mask = DESC_I_RINT8, + .tsu = 1, .dual_port = 1, }; diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 3c53051bdacf..200785e703c8 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1715,14 +1715,17 @@ static int netsec_netdev_init(struct net_device *ndev) goto err1; /* set phy power down */ - data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) | - BMCR_PDOWN; - netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); + data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR); + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, + data | BMCR_PDOWN); ret = netsec_reset_hardware(priv, true); if (ret) goto err2; + /* Restore phy power state */ + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); + spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock); spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 751dfdeec41c..0b64f7710d17 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -233,6 +233,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat) static int intel_mgbe_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + char clk_name[20]; int ret; int i; @@ -301,8 +302,10 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->eee_usecs_rate = plat->clk_ptp_rate; /* Set system clock */ + sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev)); + plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, - "stmmac-clk", NULL, 0, + clk_name, NULL, 0, plat->clk_ptp_rate); if (IS_ERR(plat->stmmac_clk)) { @@ -446,8 +449,8 @@ static int tgl_common_data(struct pci_dev *pdev, return intel_mgbe_common_data(pdev, plat); } -static int tgl_sgmii_data(struct pci_dev *pdev, - struct plat_stmmacenet_data *plat) +static int tgl_sgmii_phy0_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) { plat->bus_id = 1; plat->phy_interface = PHY_INTERFACE_MODE_SGMII; @@ -456,12 +459,26 @@ static int tgl_sgmii_data(struct pci_dev *pdev, return tgl_common_data(pdev, plat); } -static struct stmmac_pci_info tgl_sgmii1g_info = { - .setup = tgl_sgmii_data, +static struct stmmac_pci_info tgl_sgmii1g_phy0_info = { + .setup = tgl_sgmii_phy0_data, }; -static int adls_sgmii_data(struct pci_dev *pdev, - struct plat_stmmacenet_data *plat) +static int tgl_sgmii_phy1_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 2; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + return tgl_common_data(pdev, plat); +} + +static struct stmmac_pci_info tgl_sgmii1g_phy1_info = { + .setup = tgl_sgmii_phy1_data, +}; + +static int adls_sgmii_phy0_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) { plat->bus_id = 1; plat->phy_interface = PHY_INTERFACE_MODE_SGMII; @@ -471,10 +488,24 @@ static int adls_sgmii_data(struct pci_dev *pdev, return tgl_common_data(pdev, plat); } -static struct stmmac_pci_info adls_sgmii1g_info = { - .setup = adls_sgmii_data, +static struct stmmac_pci_info adls_sgmii1g_phy0_info = { + .setup = adls_sgmii_phy0_data, }; +static int adls_sgmii_phy1_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 2; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + + /* SerDes power up and power down are done in BIOS for ADL */ + + return tgl_common_data(pdev, plat); +} + +static struct stmmac_pci_info adls_sgmii1g_phy1_info = { + .setup = adls_sgmii_phy1_data, +}; static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = { { .func = 6, @@ -756,11 +787,11 @@ static const struct pci_device_id intel_eth_pci_id_table[] = { { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID, &ehl_pse1_rgmii1g_info) }, { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) }, { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0_ID, &adls_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1_ID, &adls_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_phy1_info) }, + { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0_ID, &adls_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1_ID, &adls_sgmii1g_phy1_info) }, {} }; MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 6b75cf2603ff..e62efd166ec8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -1214,6 +1214,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) plat_dat->init = sun8i_dwmac_init; plat_dat->exit = sun8i_dwmac_exit; plat_dat->setup = sun8i_dwmac_setup; + plat_dat->tx_fifo_size = 4096; + plat_dat->rx_fifo_size = 16384; ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index c6540b003b43..cbf4429fb1d2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -402,19 +402,53 @@ static void dwmac4_rd_set_tx_ic(struct dma_desc *p) p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION); } -static void dwmac4_display_ring(void *head, unsigned int size, bool rx) +static void dwmac4_display_ring(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size) { - struct dma_desc *p = (struct dma_desc *)head; + dma_addr_t dma_addr; int i; pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); - for (i = 0; i < size; i++) { - pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", - i, (unsigned int)virt_to_phys(p), - le32_to_cpu(p->des0), le32_to_cpu(p->des1), - le32_to_cpu(p->des2), le32_to_cpu(p->des3)); - p++; + if (desc_size == sizeof(struct dma_desc)) { + struct dma_desc *p = (struct dma_desc *)head; + + for (i = 0; i < size; i++) { + dma_addr = dma_rx_phy + i * sizeof(*p); + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(p->des0), le32_to_cpu(p->des1), + le32_to_cpu(p->des2), le32_to_cpu(p->des3)); + p++; + } + } else if (desc_size == sizeof(struct dma_extended_desc)) { + struct dma_extended_desc *extp = (struct dma_extended_desc *)head; + + for (i = 0; i < size; i++) { + dma_addr = dma_rx_phy + i * sizeof(*extp); + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(extp->basic.des0), le32_to_cpu(extp->basic.des1), + le32_to_cpu(extp->basic.des2), le32_to_cpu(extp->basic.des3), + le32_to_cpu(extp->des4), le32_to_cpu(extp->des5), + le32_to_cpu(extp->des6), le32_to_cpu(extp->des7)); + extp++; + } + } else if (desc_size == sizeof(struct dma_edesc)) { + struct dma_edesc *ep = (struct dma_edesc *)head; + + for (i = 0; i < size; i++) { + dma_addr = dma_rx_phy + i * sizeof(*ep); + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(ep->des4), le32_to_cpu(ep->des5), + le32_to_cpu(ep->des6), le32_to_cpu(ep->des7), + le32_to_cpu(ep->basic.des0), le32_to_cpu(ep->basic.des1), + le32_to_cpu(ep->basic.des2), le32_to_cpu(ep->basic.des3)); + ep++; + } + } else { + pr_err("unsupported descriptor!"); } } @@ -499,10 +533,15 @@ static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len) *len = le32_to_cpu(p->des2) & RDES2_HL; } -static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr) +static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool buf2_valid) { p->des2 = cpu_to_le32(lower_32_bits(addr)); - p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR); + p->des3 = cpu_to_le32(upper_32_bits(addr)); + + if (buf2_valid) + p->des3 |= cpu_to_le32(RDES3_BUFFER2_VALID_ADDR); + else + p->des3 &= cpu_to_le32(~RDES3_BUFFER2_VALID_ADDR); } static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index bb29bfcd62c3..62aa0e95beb7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -124,6 +124,23 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr, ioaddr + DMA_CHAN_INTR_ENA(chan)); } +static void dwmac410_dma_init_channel(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) +{ + u32 value; + + /* common channel control register config */ + value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + if (dma_cfg->pblx8) + value = value | DMA_BUS_MODE_PBL; + + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, + ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + static void dwmac4_dma_init(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, int atds) { @@ -523,7 +540,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { const struct stmmac_dma_ops dwmac410_dma_ops = { .reset = dwmac4_dma_reset, .init = dwmac4_dma_init, - .init_chan = dwmac4_dma_init_channel, + .init_chan = dwmac410_dma_init_channel, .init_rx_chan = dwmac4_dma_init_rx_chan, .init_tx_chan = dwmac4_dma_init_tx_chan, .axi = dwmac4_dma_axi, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 0b4ee2dbb691..71e50751ef2d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -53,10 +53,6 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan) value &= ~DMA_CONTROL_ST; writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); - - value = readl(ioaddr + GMAC_CONFIG); - value &= ~GMAC_CONFIG_TE; - writel(value, ioaddr + GMAC_CONFIG); } void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index 0aaf19ab5672..ccfb0102dde4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -292,7 +292,7 @@ static void dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len) *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL; } -static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr) +static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool is_valid) { p->des2 = cpu_to_le32(lower_32_bits(addr)); p->des3 = cpu_to_le32(upper_32_bits(addr)); diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index d02cec296f51..6650edfab5bc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -417,19 +417,22 @@ static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc, } } -static void enh_desc_display_ring(void *head, unsigned int size, bool rx) +static void enh_desc_display_ring(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size) { struct dma_extended_desc *ep = (struct dma_extended_desc *)head; + dma_addr_t dma_addr; int i; pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX"); for (i = 0; i < size; i++) { u64 x; + dma_addr = dma_rx_phy + i * sizeof(*ep); x = *(u64 *)ep; - pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", - i, (unsigned int)virt_to_phys(ep), + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, (unsigned int)x, (unsigned int)(x >> 32), ep->basic.des2, ep->basic.des3); ep++; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index b40b2e0667bb..979ac9fca23c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -78,7 +78,8 @@ struct stmmac_desc_ops { /* get rx timestamp status */ int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); /* Display ring */ - void (*display_ring)(void *head, unsigned int size, bool rx); + void (*display_ring)(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size); /* set MSS via context descriptor */ void (*set_mss)(struct dma_desc *p, unsigned int mss); /* get descriptor skbuff address */ @@ -91,7 +92,7 @@ struct stmmac_desc_ops { int (*get_rx_hash)(struct dma_desc *p, u32 *hash, enum pkt_hash_types *type); void (*get_rx_header_len)(struct dma_desc *p, unsigned int *len); - void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr); + void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr, bool buf2_valid); void (*set_sarc)(struct dma_desc *p, u32 sarc_type); void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag, u32 inner_type); diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index f083360e4ba6..98ef43f35802 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -269,19 +269,22 @@ static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats) return 1; } -static void ndesc_display_ring(void *head, unsigned int size, bool rx) +static void ndesc_display_ring(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size) { struct dma_desc *p = (struct dma_desc *)head; + dma_addr_t dma_addr; int i; pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); for (i = 0; i < size; i++) { u64 x; + dma_addr = dma_rx_phy + i * sizeof(*p); x = *(u64 *)p; - pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x", - i, (unsigned int)virt_to_phys(p), + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x", + i, &dma_addr, (unsigned int)x, (unsigned int)(x >> 32), p->des2, p->des3); p++; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 26b971cd4da5..208cae344ffa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1133,6 +1133,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) static void stmmac_display_rx_rings(struct stmmac_priv *priv) { u32 rx_cnt = priv->plat->rx_queues_to_use; + unsigned int desc_size; void *head_rx; u32 queue; @@ -1142,19 +1143,24 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) pr_info("\tRX Queue %u rings\n", queue); - if (priv->extend_desc) + if (priv->extend_desc) { head_rx = (void *)rx_q->dma_erx; - else + desc_size = sizeof(struct dma_extended_desc); + } else { head_rx = (void *)rx_q->dma_rx; + desc_size = sizeof(struct dma_desc); + } /* Display RX ring */ - stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true); + stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, + rx_q->dma_rx_phy, desc_size); } } static void stmmac_display_tx_rings(struct stmmac_priv *priv) { u32 tx_cnt = priv->plat->tx_queues_to_use; + unsigned int desc_size; void *head_tx; u32 queue; @@ -1164,14 +1170,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) pr_info("\tTX Queue %d rings\n", queue); - if (priv->extend_desc) + if (priv->extend_desc) { head_tx = (void *)tx_q->dma_etx; - else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc_size = sizeof(struct dma_extended_desc); + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { head_tx = (void *)tx_q->dma_entx; - else + desc_size = sizeof(struct dma_edesc); + } else { head_tx = (void *)tx_q->dma_tx; + desc_size = sizeof(struct dma_desc); + } - stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false); + stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, + tx_q->dma_tx_phy, desc_size); } } @@ -1303,9 +1314,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, return -ENOMEM; buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); - stmmac_set_desc_sec_addr(priv, p, buf->sec_addr); + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); } else { buf->sec_page = NULL; + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); } buf->addr = page_pool_get_dma_addr(buf->page); @@ -1368,6 +1380,88 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) } /** + * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer. + * @priv: driver private structure + * Description: this function is called to re-allocate a receive buffer, perform + * the DMA mapping and init the descriptor. + */ +static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv) +{ + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + int i; + + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + for (i = 0; i < priv->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + + if (buf->page) { + page_pool_recycle_direct(rx_q->page_pool, buf->page); + buf->page = NULL; + } + + if (priv->sph && buf->sec_page) { + page_pool_recycle_direct(rx_q->page_pool, buf->sec_page); + buf->sec_page = NULL; + } + } + } + + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + for (i = 0; i < priv->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + struct dma_desc *p; + + if (priv->extend_desc) + p = &((rx_q->dma_erx + i)->basic); + else + p = rx_q->dma_rx + i; + + if (!buf->page) { + buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); + if (!buf->page) + goto err_reinit_rx_buffers; + + buf->addr = page_pool_get_dma_addr(buf->page); + } + + if (priv->sph && !buf->sec_page) { + buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); + if (!buf->sec_page) + goto err_reinit_rx_buffers; + + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); + } + + stmmac_set_desc_addr(priv, p, buf->addr); + if (priv->sph) + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); + else + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); + if (priv->dma_buf_sz == BUF_SIZE_16KiB) + stmmac_init_desc3(priv, p); + } + } + + return; + +err_reinit_rx_buffers: + do { + while (--i >= 0) + stmmac_free_rx_buffer(priv, queue, i); + + if (queue == 0) + break; + + i = priv->dma_rx_size; + } while (queue-- > 0); +} + +/** * init_dma_rx_desc_rings - init the RX descriptor rings * @dev: net device structure * @flags: gfp flag. @@ -3648,7 +3742,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) DMA_FROM_DEVICE); stmmac_set_desc_addr(priv, p, buf->addr); - stmmac_set_desc_sec_addr(priv, p, buf->sec_addr); + if (priv->sph) + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); + else + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); stmmac_refill_desc3(priv, rx_q, p); rx_q->rx_count_frames++; @@ -3736,18 +3833,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) unsigned int count = 0, error = 0, len = 0; int status = 0, coe = priv->hw->rx_csum; unsigned int next_entry = rx_q->cur_rx; + unsigned int desc_size; struct sk_buff *skb = NULL; if (netif_msg_rx_status(priv)) { void *rx_head; netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); - if (priv->extend_desc) + if (priv->extend_desc) { rx_head = (void *)rx_q->dma_erx; - else + desc_size = sizeof(struct dma_extended_desc); + } else { rx_head = (void *)rx_q->dma_rx; + desc_size = sizeof(struct dma_desc); + } - stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true); + stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, + rx_q->dma_rx_phy, desc_size); } while (count < limit) { unsigned int buf1_len = 0, buf2_len = 0; @@ -4315,24 +4417,27 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr) static struct dentry *stmmac_fs_dir; static void sysfs_display_ring(void *head, int size, int extend_desc, - struct seq_file *seq) + struct seq_file *seq, dma_addr_t dma_phy_addr) { int i; struct dma_extended_desc *ep = (struct dma_extended_desc *)head; struct dma_desc *p = (struct dma_desc *)head; + dma_addr_t dma_addr; for (i = 0; i < size; i++) { if (extend_desc) { - seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", - i, (unsigned int)virt_to_phys(ep), + dma_addr = dma_phy_addr + i * sizeof(*ep); + seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, le32_to_cpu(ep->basic.des0), le32_to_cpu(ep->basic.des1), le32_to_cpu(ep->basic.des2), le32_to_cpu(ep->basic.des3)); ep++; } else { - seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", - i, (unsigned int)virt_to_phys(p), + dma_addr = dma_phy_addr + i * sizeof(*p); + seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, le32_to_cpu(p->des0), le32_to_cpu(p->des1), le32_to_cpu(p->des2), le32_to_cpu(p->des3)); p++; @@ -4360,11 +4465,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v) if (priv->extend_desc) { seq_printf(seq, "Extended descriptor ring:\n"); sysfs_display_ring((void *)rx_q->dma_erx, - priv->dma_rx_size, 1, seq); + priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); } else { seq_printf(seq, "Descriptor ring:\n"); sysfs_display_ring((void *)rx_q->dma_rx, - priv->dma_rx_size, 0, seq); + priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); } } @@ -4376,11 +4481,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v) if (priv->extend_desc) { seq_printf(seq, "Extended descriptor ring:\n"); sysfs_display_ring((void *)tx_q->dma_etx, - priv->dma_tx_size, 1, seq); + priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { seq_printf(seq, "Descriptor ring:\n"); sysfs_display_ring((void *)tx_q->dma_tx, - priv->dma_tx_size, 0, seq); + priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); } } @@ -5144,13 +5249,16 @@ int stmmac_dvr_remove(struct device *dev) netdev_info(priv->dev, "%s: removing driver", __func__); stmmac_stop_all_dma(priv); + stmmac_mac_set(priv, priv->ioaddr, false); + netif_carrier_off(ndev); + unregister_netdev(ndev); + /* Serdes power down needs to happen after VLAN filter + * is deleted that is triggered by unregister_netdev(). + */ if (priv->plat->serdes_powerdown) priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); - stmmac_mac_set(priv, priv->ioaddr, false); - netif_carrier_off(ndev); - unregister_netdev(ndev); #ifdef CONFIG_DEBUG_FS stmmac_exit_fs(ndev); #endif @@ -5257,6 +5365,8 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv) tx_q->cur_tx = 0; tx_q->dirty_tx = 0; tx_q->mss = 0; + + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); } } @@ -5318,7 +5428,7 @@ int stmmac_resume(struct device *dev) mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); - + stmmac_reinit_rx_buffers(priv); stmmac_free_tx_skbufs(priv); stmmac_clear_descriptors(priv); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 68695d4afacd..707ccdd03b19 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3931,8 +3931,6 @@ static void niu_xmac_interrupt(struct niu *np) mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; - if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) - mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index b8f4f419173f..d054c6e83b1c 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -2044,6 +2044,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /*bdx_hw_reset(priv); */ if (bdx_read_mac(priv)) { pr_err("load MAC address failed\n"); + err = -EFAULT; goto err_out_iomap; } SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 3a8775e0ca55..5d677db0aee5 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1880,7 +1880,7 @@ static int axienet_probe(struct platform_device *pdev) if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); ret = PTR_ERR(lp->regs); - goto free_netdev; + goto cleanup_clk; } lp->regs_start = ethres->start; @@ -1958,18 +1958,18 @@ static int axienet_probe(struct platform_device *pdev) break; default: ret = -EINVAL; - goto free_netdev; + goto cleanup_clk; } } else { ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); if (ret) - goto free_netdev; + goto cleanup_clk; } if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); ret = -EINVAL; - goto free_netdev; + goto cleanup_clk; } /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ @@ -1982,7 +1982,7 @@ static int axienet_probe(struct platform_device *pdev) dev_err(&pdev->dev, "unable to get DMA resource\n"); of_node_put(np); - goto free_netdev; + goto cleanup_clk; } lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); @@ -2002,12 +2002,12 @@ static int axienet_probe(struct platform_device *pdev) if (IS_ERR(lp->dma_regs)) { dev_err(&pdev->dev, "could not map DMA regs\n"); ret = PTR_ERR(lp->dma_regs); - goto free_netdev; + goto cleanup_clk; } if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { dev_err(&pdev->dev, "could not determine irqs\n"); ret = -ENOMEM; - goto free_netdev; + goto cleanup_clk; } /* Autodetect the need for 64-bit DMA pointers. @@ -2037,7 +2037,7 @@ static int axienet_probe(struct platform_device *pdev) ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); - goto free_netdev; + goto cleanup_clk; } /* Check for Ethernet core IRQ (optional) */ @@ -2068,12 +2068,12 @@ static int axienet_probe(struct platform_device *pdev) if (!lp->phy_node) { dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); ret = -EINVAL; - goto free_netdev; + goto cleanup_mdio; } lp->pcs_phy = of_mdio_find_device(lp->phy_node); if (!lp->pcs_phy) { ret = -EPROBE_DEFER; - goto free_netdev; + goto cleanup_mdio; } lp->phylink_config.pcs_poll = true; } @@ -2087,17 +2087,30 @@ static int axienet_probe(struct platform_device *pdev) if (IS_ERR(lp->phylink)) { ret = PTR_ERR(lp->phylink); dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); - goto free_netdev; + goto cleanup_mdio; } ret = register_netdev(lp->ndev); if (ret) { dev_err(lp->dev, "register_netdev() error (%i)\n", ret); - goto free_netdev; + goto cleanup_phylink; } return 0; +cleanup_phylink: + phylink_destroy(lp->phylink); + +cleanup_mdio: + if (lp->pcs_phy) + put_device(&lp->pcs_phy->dev); + if (lp->mii_bus) + axienet_mdio_teardown(lp); + of_node_put(lp->phy_node); + +cleanup_clk: + clk_disable_unprepare(lp->clk); + free_netdev: free_netdev(ndev); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 71d6629e65c9..9f5b5614a150 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -171,11 +171,6 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len) goto out_drop; } - if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */ - msg = "oversized transmit packet!"; - goto out_drop; - } - if (p[0] > 5) { msg = "invalid KISS command"; goto out_drop; diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 36eeb80406f2..4690c6a59054 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -2167,7 +2167,6 @@ static void __exit scc_cleanup_driver(void) MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>"); MODULE_DESCRIPTION("AX.25 Device Driver for Z8530 based HDLC cards"); -MODULE_SUPPORTED_DEVICE("Z8530 based SCC cards for Amateur Radio"); MODULE_LICENSE("GPL"); module_init(scc_init_driver); module_exit(scc_cleanup_driver); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index e1a497d3c9ba..59ac04a610ad 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -229,7 +229,7 @@ int netvsc_send(struct net_device *net, bool xdp_tx); void netvsc_linkstatus_callback(struct net_device *net, struct rndis_message *resp, - void *data); + void *data, u32 data_buflen); int netvsc_recv_callback(struct net_device *net, struct netvsc_device *nvdev, struct netvsc_channel *nvchan); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 8176fa0c8b16..15f262b70489 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -744,7 +744,7 @@ static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb, */ void netvsc_linkstatus_callback(struct net_device *net, struct rndis_message *resp, - void *data) + void *data, u32 data_buflen) { struct rndis_indicate_status *indicate = &resp->msg.indicate_status; struct net_device_context *ndev_ctx = netdev_priv(net); @@ -765,11 +765,16 @@ void netvsc_linkstatus_callback(struct net_device *net, if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { u32 speed; - /* Validate status_buf_offset */ + /* Validate status_buf_offset and status_buflen. + * + * Certain (pre-Fe) implementations of Hyper-V's vSwitch didn't account + * for the status buffer field in resp->msg_len; perform the validation + * using data_buflen (>= resp->msg_len). + */ if (indicate->status_buflen < sizeof(speed) || indicate->status_buf_offset < sizeof(*indicate) || - resp->msg_len - RNDIS_HEADER_SIZE < indicate->status_buf_offset || - resp->msg_len - RNDIS_HEADER_SIZE - indicate->status_buf_offset + data_buflen - RNDIS_HEADER_SIZE < indicate->status_buf_offset || + data_buflen - RNDIS_HEADER_SIZE - indicate->status_buf_offset < indicate->status_buflen) { netdev_err(net, "invalid rndis_indicate_status packet\n"); return; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 123cc9d25f5e..c0e89e107d57 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -620,7 +620,7 @@ int rndis_filter_receive(struct net_device *ndev, case RNDIS_MSG_INDICATE: /* notification msgs */ - netvsc_linkstatus_callback(ndev, rndis_msg, data); + netvsc_linkstatus_callback(ndev, rndis_msg, data, buflen); break; default: netdev_err(ndev, diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index 35e35852c25c..d73b03a80ef8 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -175,21 +175,23 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); if (mem->offset > offset_max || ipa->mem_offset > offset_max - mem->offset) { - dev_err(dev, "IPv%c %s%s table region offset too large " - "(0x%04x + 0x%04x > 0x%04x)\n", - ipv6 ? '6' : '4', hashed ? "hashed " : "", - route ? "route" : "filter", - ipa->mem_offset, mem->offset, offset_max); + dev_err(dev, "IPv%c %s%s table region offset too large\n", + ipv6 ? '6' : '4', hashed ? "hashed " : "", + route ? "route" : "filter"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + ipa->mem_offset, mem->offset, offset_max); + return false; } if (mem->offset > ipa->mem_size || mem->size > ipa->mem_size - mem->offset) { - dev_err(dev, "IPv%c %s%s table region out of range " - "(0x%04x + 0x%04x > 0x%04x)\n", - ipv6 ? '6' : '4', hashed ? "hashed " : "", - route ? "route" : "filter", - mem->offset, mem->size, ipa->mem_size); + dev_err(dev, "IPv%c %s%s table region out of range\n", + ipv6 ? '6' : '4', hashed ? "hashed " : "", + route ? "route" : "filter"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + mem->offset, mem->size, ipa->mem_size); + return false; } @@ -205,22 +207,36 @@ static bool ipa_cmd_header_valid(struct ipa *ipa) u32 size_max; u32 size; + /* In ipa_cmd_hdr_init_local_add() we record the offset and size + * of the header table memory area. Make sure the offset and size + * fit in the fields that need to hold them, and that the entire + * range is within the overall IPA memory range. + */ offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK); if (mem->offset > offset_max || ipa->mem_offset > offset_max - mem->offset) { - dev_err(dev, "header table region offset too large " - "(0x%04x + 0x%04x > 0x%04x)\n", - ipa->mem_offset + mem->offset, offset_max); + dev_err(dev, "header table region offset too large\n"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + ipa->mem_offset, mem->offset, offset_max); + return false; } size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK); size = ipa->mem[IPA_MEM_MODEM_HEADER].size; size += ipa->mem[IPA_MEM_AP_HEADER].size; - if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) { - dev_err(dev, "header table region out of range " - "(0x%04x + 0x%04x > 0x%04x)\n", - mem->offset, size, ipa->mem_size); + + if (size > size_max) { + dev_err(dev, "header table region size too large\n"); + dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max); + + return false; + } + if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) { + dev_err(dev, "header table region out of range\n"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + mem->offset, size, ipa->mem_size); + return false; } diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index 2fc64483f275..e594bf3b600f 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -249,6 +249,7 @@ static const struct qmi_msg_handler ipa_server_msg_handlers[] = { .decoded_size = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ, .fn = ipa_server_driver_init_complete, }, + { }, }; /* Handle an INIT_DRIVER response message from the modem. */ @@ -269,6 +270,7 @@ static const struct qmi_msg_handler ipa_client_msg_handlers[] = { .decoded_size = IPA_QMI_INIT_DRIVER_RSP_SZ, .fn = ipa_client_init_driver, }, + { }, }; /* Return a pointer to an init modem driver request structure, which contains diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index aec92440eef1..659d3dceb687 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -294,6 +294,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) dev_net_set(dev, nsim_dev_net(nsim_dev)); ns = netdev_priv(dev); ns->netdev = dev; + u64_stats_init(&ns->syncp); ns->nsim_dev = nsim_dev; ns->nsim_dev_port = nsim_dev_port; ns->nsim_bus_dev = nsim_dev->nsim_bus_dev; diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index fa0be591ae79..82fe5f43f0e9 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -342,6 +342,10 @@ static int bcm54xx_config_init(struct phy_device *phydev) bcm54xx_adjust_rxrefclk(phydev); switch (BRCM_PHY_MODEL(phydev)) { + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + err = bcm54xx_config_clock_delay(phydev); + break; case PHY_ID_BCM54210E: err = bcm54210e_config_init(phydev); break; @@ -399,6 +403,11 @@ static int bcm54xx_resume(struct phy_device *phydev) if (ret < 0) return ret; + /* Upon exiting power down, the PHY remains in an internal reset state + * for 40us + */ + fsleep(40); + return bcm54xx_config_init(phydev); } diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index be1224b4447b..f7a2ec150e54 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -290,6 +290,7 @@ static int dp83822_config_intr(struct phy_device *phydev) static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev) { + bool trigger_machine = false; int irq_status; /* The MISR1 and MISR2 registers are holding the interrupt status in @@ -305,7 +306,7 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } if (irq_status & ((irq_status & GENMASK(7, 0)) << 8)) - goto trigger_machine; + trigger_machine = true; irq_status = phy_read(phydev, MII_DP83822_MISR2); if (irq_status < 0) { @@ -313,11 +314,11 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } if (irq_status & ((irq_status & GENMASK(7, 0)) << 8)) - goto trigger_machine; + trigger_machine = true; - return IRQ_NONE; + if (!trigger_machine) + return IRQ_NONE; -trigger_machine: phy_trigger_machine(phydev); return IRQ_HANDLED; diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 688fadffb249..7ea32fb77190 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -264,6 +264,7 @@ static int dp83811_config_intr(struct phy_device *phydev) static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev) { + bool trigger_machine = false; int irq_status; /* The INT_STAT registers 1, 2 and 3 are holding the interrupt status @@ -279,7 +280,7 @@ static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } if (irq_status & ((irq_status & GENMASK(7, 0)) << 8)) - goto trigger_machine; + trigger_machine = true; irq_status = phy_read(phydev, MII_DP83811_INT_STAT2); if (irq_status < 0) { @@ -287,7 +288,7 @@ static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } if (irq_status & ((irq_status & GENMASK(7, 0)) << 8)) - goto trigger_machine; + trigger_machine = true; irq_status = phy_read(phydev, MII_DP83811_INT_STAT3); if (irq_status < 0) { @@ -295,11 +296,11 @@ static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } if (irq_status & ((irq_status & GENMASK(7, 0)) << 8)) - goto trigger_machine; + trigger_machine = true; - return IRQ_NONE; + if (!trigger_machine) + return IRQ_NONE; -trigger_machine: phy_trigger_machine(phydev); return IRQ_HANDLED; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1be07e45d314..fc2e7cb5b2e5 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -276,14 +276,16 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, phydev->autoneg = autoneg; - phydev->speed = speed; + if (autoneg == AUTONEG_DISABLE) { + phydev->speed = speed; + phydev->duplex = duplex; + } linkmode_copy(phydev->advertising, advertising); linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->advertising, autoneg == AUTONEG_ENABLE); - phydev->duplex = duplex; phydev->master_slave_set = cmd->base.master_slave_cfg; phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ce495473cd5d..cc38e326405a 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -230,7 +230,6 @@ static struct phy_driver genphy_driver; static LIST_HEAD(phy_fixup_list); static DEFINE_MUTEX(phy_fixup_lock); -#ifdef CONFIG_PM static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) { struct device_driver *drv = phydev->mdio.dev.driver; @@ -270,7 +269,7 @@ out: return !phydev->suspended; } -static int mdio_bus_phy_suspend(struct device *dev) +static __maybe_unused int mdio_bus_phy_suspend(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); @@ -290,7 +289,7 @@ static int mdio_bus_phy_suspend(struct device *dev) return phy_suspend(phydev); } -static int mdio_bus_phy_resume(struct device *dev) +static __maybe_unused int mdio_bus_phy_resume(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); int ret; @@ -316,7 +315,6 @@ no_resume: static SIMPLE_DEV_PM_OPS(mdio_bus_phy_pm_ops, mdio_bus_phy_suspend, mdio_bus_phy_resume); -#endif /* CONFIG_PM */ /** * phy_register_fixup - creates a new phy_fixup and adds it to the list diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 053c92e02cd8..dc2800beacc3 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -476,7 +476,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode, state->interface); if (err < 0) - phylink_err(pl, "mac_prepare failed: %pe\n", + phylink_err(pl, "mac_finish failed: %pe\n", ERR_PTR(err)); } } diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 02e6bbb17b15..8d1f69dad603 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -387,6 +387,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i err = register_netdev(dev); if (err) { + /* Set disconnected flag so that disconnect() returns early. */ + pnd->disconnected = 1; usb_driver_release_interface(&usbpn_driver, data_intf); goto out; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 4087c9e33781..8acf30115428 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -851,17 +851,17 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ /* check if we got everything */ if (!ctx->data) { - dev_dbg(&intf->dev, "CDC Union missing and no IAD found\n"); + dev_err(&intf->dev, "CDC Union missing and no IAD found\n"); goto error; } if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) { if (!ctx->mbim_desc) { - dev_dbg(&intf->dev, "MBIM functional descriptor missing\n"); + dev_err(&intf->dev, "MBIM functional descriptor missing\n"); goto error; } } else { if (!ctx->ether_desc || !ctx->func_desc) { - dev_dbg(&intf->dev, "NCM or ECM functional descriptors missing\n"); + dev_err(&intf->dev, "NCM or ECM functional descriptors missing\n"); goto error; } } @@ -870,7 +870,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ if (ctx->data != ctx->control) { temp = usb_driver_claim_interface(driver, ctx->data, dev); if (temp) { - dev_dbg(&intf->dev, "failed to claim data intf\n"); + dev_err(&intf->dev, "failed to claim data intf\n"); goto error; } } @@ -926,7 +926,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ if (ctx->ether_desc) { temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress); if (temp) { - dev_dbg(&intf->dev, "failed to get mac address\n"); + dev_err(&intf->dev, "failed to get mac address\n"); goto error2; } dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 17a050521b86..6700f1970b24 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -429,13 +429,6 @@ static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, c goto err; } - /* we don't want to modify a running netdev */ - if (netif_running(dev->net)) { - netdev_err(dev->net, "Cannot change a running device\n"); - ret = -EBUSY; - goto err; - } - ret = qmimux_register_device(dev->net, mux_id); if (!ret) { info->flags |= QMI_WWAN_FLAG_MUX; @@ -465,13 +458,6 @@ static ssize_t del_mux_store(struct device *d, struct device_attribute *attr, c if (!rtnl_trylock()) return restart_syscall(); - /* we don't want to modify a running netdev */ - if (netif_running(dev->net)) { - netdev_err(dev->net, "Cannot change a running device\n"); - ret = -EBUSY; - goto err; - } - del_dev = qmimux_find_dev(dev, mux_id); if (!del_dev) { netdev_err(dev->net, "mux_id not present\n"); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index b246817f3405..20fb5638ac65 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3021,29 +3021,6 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts) device_set_wakeup_enable(&tp->udev->dev, false); } -static void r8153_mac_clk_spd(struct r8152 *tp, bool enable) -{ - /* MAC clock speed down */ - if (enable) { - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, - ALDPS_SPDWN_RATIO); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, - EEE_SPDWN_RATIO); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, - PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN | - U1U2_SPDWN_EN | L1_SPDWN_EN); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, - PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN | - TP100_SPDWN_EN | TP500_SPDWN_EN | EEE_SPDWN_EN | - TP1000_SPDWN_EN); - } else { - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); - } -} - static void r8153_u1u2en(struct r8152 *tp, bool enable) { u8 u1u2[8]; @@ -3338,11 +3315,9 @@ static void rtl8153_runtime_enable(struct r8152 *tp, bool enable) if (enable) { r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); - r8153_mac_clk_spd(tp, true); rtl_runtime_suspend_enable(tp, true); } else { rtl_runtime_suspend_enable(tp, false); - r8153_mac_clk_spd(tp, false); switch (tp->version) { case RTL_VER_03: @@ -4718,7 +4693,6 @@ static void r8153_first_init(struct r8152 *tp) { u32 ocp_data; - r8153_mac_clk_spd(tp, false); rxdy_gated_en(tp, true); r8153_teredo_off(tp); @@ -4769,8 +4743,6 @@ static void r8153_enter_oob(struct r8152 *tp) { u32 ocp_data; - r8153_mac_clk_spd(tp, true); - ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); @@ -5496,10 +5468,15 @@ static void r8153_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); + /* MAC clock speed down */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); + r8153_power_cut_en(tp, false); rtl_runtime_suspend_enable(tp, false); r8153_u1u2en(tp, true); - r8153_mac_clk_spd(tp, false); usb_enable_lpm(tp->udev); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); @@ -6576,7 +6553,10 @@ static int rtl_ops_init(struct r8152 *tp) ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; - tp->rx_buf_sz = 32 * 1024; + if (tp->udev->speed < USB_SPEED_SUPER) + tp->rx_buf_sz = 16 * 1024; + else + tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index b4c8080e6f87..f4f37ecfed58 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -887,7 +887,7 @@ int usbnet_open (struct net_device *net) // insist peer be connected if (info->check_connect && (retval = info->check_connect (dev)) < 0) { - netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval); + netif_err(dev, ifup, dev->net, "can't open; %d\n", retval); goto done; } diff --git a/drivers/net/veth.c b/drivers/net/veth.c index aa1a66ad2ce5..34e49c75db42 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -302,8 +302,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) if (rxq < rcv->real_num_rx_queues) { rq = &rcv_priv->rq[rxq]; rcv_xdp = rcu_access_pointer(rq->xdp_prog); - if (rcv_xdp) - skb_record_rx_queue(skb, rxq); + skb_record_rx_queue(skb, rxq); } skb_tx_timestamp(skb); diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index dca97cd7c4e7..7eac6a3e1cde 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -204,14 +204,18 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) priv->rx_skbuff = kcalloc(priv->rx_ring_size, sizeof(*priv->rx_skbuff), GFP_KERNEL); - if (!priv->rx_skbuff) + if (!priv->rx_skbuff) { + ret = -ENOMEM; goto free_ucc_pram; + } priv->tx_skbuff = kcalloc(priv->tx_ring_size, sizeof(*priv->tx_skbuff), GFP_KERNEL); - if (!priv->tx_skbuff) + if (!priv->tx_skbuff) { + ret = -ENOMEM; goto free_rx_skbuff; + } priv->skb_curtx = 0; priv->skb_dirtytx = 0; diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index 4aaa6388b9ee..5a6a945f6c81 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -23,6 +23,8 @@ struct x25_state { x25_hdlc_proto settings; + bool up; + spinlock_t up_lock; /* Protects "up" */ }; static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); @@ -104,6 +106,8 @@ static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb) static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) { + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); int result; /* There should be a pseudo header of 1 byte added by upper layers. @@ -114,11 +118,19 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } + spin_lock_bh(&x25st->up_lock); + if (!x25st->up) { + spin_unlock_bh(&x25st->up_lock); + kfree_skb(skb); + return NETDEV_TX_OK; + } + switch (skb->data[0]) { case X25_IFACE_DATA: /* Data to be transmitted */ skb_pull(skb, 1); if ((result = lapb_data_request(dev, skb)) != LAPB_OK) dev_kfree_skb(skb); + spin_unlock_bh(&x25st->up_lock); return NETDEV_TX_OK; case X25_IFACE_CONNECT: @@ -147,6 +159,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) break; } + spin_unlock_bh(&x25st->up_lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -164,6 +177,7 @@ static int x25_open(struct net_device *dev) .data_transmit = x25_data_transmit, }; hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); struct lapb_parms_struct params; int result; @@ -190,6 +204,10 @@ static int x25_open(struct net_device *dev) if (result != LAPB_OK) return -EINVAL; + spin_lock_bh(&x25st->up_lock); + x25st->up = true; + spin_unlock_bh(&x25st->up_lock); + return 0; } @@ -197,6 +215,13 @@ static int x25_open(struct net_device *dev) static void x25_close(struct net_device *dev) { + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); + + spin_lock_bh(&x25st->up_lock); + x25st->up = false; + spin_unlock_bh(&x25st->up_lock); + lapb_unregister(dev); } @@ -205,15 +230,28 @@ static void x25_close(struct net_device *dev) static int x25_rx(struct sk_buff *skb) { struct net_device *dev = skb->dev; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { dev->stats.rx_dropped++; return NET_RX_DROP; } - if (lapb_data_received(dev, skb) == LAPB_OK) + spin_lock_bh(&x25st->up_lock); + if (!x25st->up) { + spin_unlock_bh(&x25st->up_lock); + kfree_skb(skb); + dev->stats.rx_dropped++; + return NET_RX_DROP; + } + + if (lapb_data_received(dev, skb) == LAPB_OK) { + spin_unlock_bh(&x25st->up_lock); return NET_RX_SUCCESS; + } + spin_unlock_bh(&x25st->up_lock); dev->stats.rx_errors++; dev_kfree_skb_any(skb); return NET_RX_DROP; @@ -298,6 +336,8 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) return result; memcpy(&state(hdlc)->settings, &new_settings, size); + state(hdlc)->up = false; + spin_lock_init(&state(hdlc)->up_lock); /* There's no header_ops so hard_header_len should be 0. */ dev->hard_header_len = 0; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 605fe555e157..c3372498f4f1 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -292,7 +292,6 @@ static int lapbeth_open(struct net_device *dev) return -ENODEV; } - netif_start_queue(dev); return 0; } @@ -300,8 +299,6 @@ static int lapbeth_close(struct net_device *dev) { int err; - netif_stop_queue(dev); - if ((err = lapb_unregister(dev)) != LAPB_OK) pr_err("lapb_unregister error: %d\n", err); diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c index c41e72508d3d..2db9c948c0fc 100644 --- a/drivers/net/wireless/admtek/adm8211.c +++ b/drivers/net/wireless/admtek/adm8211.c @@ -28,7 +28,6 @@ MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); MODULE_AUTHOR("Jouni Malinen <j@w1.fi>"); MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211"); -MODULE_SUPPORTED_DEVICE("ADM8211"); MODULE_LICENSE("GPL"); static unsigned int tx_ring_size __read_mostly = 16; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index b391169576e2..faa2e678e63e 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -5450,8 +5450,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, } if (ab->hw_params.vdev_start_delay && - (arvif->vdev_type == WMI_VDEV_TYPE_AP || - arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) { + arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { param.vdev_id = arvif->vdev_id; param.peer_type = WMI_PEER_TYPE_DEFAULT; param.peer_addr = ar->mac_addr; diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index 1aca841cd147..7968fe4eda22 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -1687,8 +1687,8 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) req->mem_seg[i].size = ab->qmi.target_mem[i].size; req->mem_seg[i].type = ab->qmi.target_mem[i].type; ath11k_dbg(ab, ATH11K_DBG_QMI, - "qmi req mem_seg[%d] 0x%llx %u %u\n", i, - ab->qmi.target_mem[i].paddr, + "qmi req mem_seg[%d] %pad %u %u\n", i, + &ab->qmi.target_mem[i].paddr, ab->qmi.target_mem[i].size, ab->qmi.target_mem[i].type); } diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 4c6e57f9976d..cef17f33c69e 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -90,7 +90,6 @@ MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state"); MODULE_AUTHOR("Jiri Slaby"); MODULE_AUTHOR("Nick Kossifidis"); MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); -MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); static int ath5k_init(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 13b4f5f50f8a..ef6f5ea06c1f 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -177,7 +177,8 @@ struct ath_frame_info { s8 txq; u8 keyix; u8 rtscts_rate; - u8 retries : 7; + u8 retries : 6; + u8 dyn_smps : 1; u8 baw_tracked : 1; u8 tx_power; enum ath9k_key_type keytype:2; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index b66eeb577272..5abc2a5526ec 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -34,7 +34,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); -MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); static void ath9k_hw_set_clockrate(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 42a208787f5a..01f9c26f9bf3 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -37,7 +37,6 @@ static char *dev_info = "ath9k"; MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); -MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); static unsigned int ath9k_debug = ATH_DBG_DEFAULT; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index e60d4737fc6e..5691bd6eb82c 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1271,6 +1271,11 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, is_40, is_sgi, is_sp); if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; + if (rix >= 8 && fi->dyn_smps) { + info->rates[i].RateFlags |= + ATH9K_RATESERIES_RTS_CTS; + info->flags |= ATH9K_TXDESC_CTSENA; + } info->txpower[i] = ath_get_rate_txpower(sc, bf, rix, is_40, false); @@ -2114,6 +2119,7 @@ static void setup_frame_info(struct ieee80211_hw *hw, fi->keyix = an->ps_key; else fi->keyix = ATH9K_TXKEYIX_INVALID; + fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC; fi->keytype = keytype; fi->framelen = framelen; fi->tx_power = txpower; diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index 707fe66727f8..febce4e8b3dd 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -75,7 +75,6 @@ MODULE_AUTHOR("Simon Kelley"); MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("Atmel at76c50x wireless cards"); /* The name of the firmware file to be loaded over-rides any automatic selection */ diff --git a/drivers/net/wireless/atmel/atmel_cs.c b/drivers/net/wireless/atmel/atmel_cs.c index 368eebefa741..453bb84cb338 100644 --- a/drivers/net/wireless/atmel/atmel_cs.c +++ b/drivers/net/wireless/atmel/atmel_cs.c @@ -57,7 +57,6 @@ MODULE_AUTHOR("Simon Kelley"); MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards"); /*====================================================================*/ diff --git a/drivers/net/wireless/atmel/atmel_pci.c b/drivers/net/wireless/atmel/atmel_pci.c index 47f7ccb32414..f428dc79d916 100644 --- a/drivers/net/wireless/atmel/atmel_pci.c +++ b/drivers/net/wireless/atmel/atmel_pci.c @@ -16,7 +16,6 @@ MODULE_AUTHOR("Simon Kelley"); MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards"); static const struct pci_device_id card_ids[] = { { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID }, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index 818e523f6025..39f3af2d0439 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -87,7 +87,6 @@ static int n_adapters_found; MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); /* This needs to be adjusted when brcms_firmwares changes */ MODULE_FIRMWARE("brcm/bcm43xx-0.fw"); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c index 4c84c3001c3f..e87e68cc46e2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c @@ -12,7 +12,6 @@ MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver utilities."); -MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); struct sk_buff *brcmu_pkt_buf_get_skb(uint len) diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index e35e1380ae43..60db38c38960 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -251,7 +251,6 @@ MODULE_AUTHOR("Benjamin Reed"); MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet cards. " "Direct support for ISA/PCI/MPI cards and support for PCMCIA when used with airo_cs."); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350"); module_param_hw_array(io, int, ioport, NULL, 0); module_param_hw_array(irq, int, irq, NULL, 0); module_param_array(rates, int, NULL, 0); diff --git a/drivers/net/wireless/cisco/airo_cs.c b/drivers/net/wireless/cisco/airo_cs.c index 3718f958c0fc..fcfe4c6d62f0 100644 --- a/drivers/net/wireless/cisco/airo_cs.c +++ b/drivers/net/wireless/cisco/airo_cs.c @@ -47,7 +47,6 @@ MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet " "cards. This is the module that links the PCMCIA card " "with the airo module."); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards"); /*====================================================================*/ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c index fd070ca5e517..40f2109a097f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c @@ -271,12 +271,12 @@ static int iwl_pnvm_get_from_efi(struct iwl_trans *trans, err = efivar_entry_get(pnvm_efivar, NULL, &package_size, package); if (err) { IWL_DEBUG_FW(trans, - "PNVM UEFI variable not found %d (len %zd)\n", + "PNVM UEFI variable not found %d (len %lu)\n", err, package_size); goto out; } - IWL_DEBUG_FW(trans, "Read PNVM fro UEFI with size %zd\n", package_size); + IWL_DEBUG_FW(trans, "Read PNVM fro UEFI with size %lu\n", package_size); *data = kmemdup(package->data, *len, GFP_KERNEL); if (!*data) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index 868da7e79a45..e6d2e0994317 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -205,6 +205,8 @@ static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode, enum iwl_fw_ini_time_point tp_id, union iwl_dbg_tlv_tp_data *tp_data) { + if (!op_mode || !op_mode->ops || !op_mode->ops->time_point) + return; op_mode->ops->time_point(op_mode, tp_id, tp_data); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 15e2773ce7e7..5ee64f7f3c85 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1083,6 +1083,7 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."), }, }, + {} }; static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 314fec4a89ad..ffaf973dae94 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1106,6 +1106,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } +#if IS_ENABLED(CONFIG_IWLMVM) + /* * Workaround for problematic SnJ device: sometimes when * certain RF modules are connected to SnJ, the device ID @@ -1116,7 +1118,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_SNJ) iwl_trans->trans_cfg = &iwl_so_trans_cfg; -#if IS_ENABLED(CONFIG_IWLMVM) /* * special-case 7265D, it has the same PCI IDs. * diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 42426e25cac6..2bec97133119 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1129,6 +1129,8 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) iwl_pcie_rx_init_rxb_lists(rxq); + spin_unlock_bh(&rxq->lock); + if (!rxq->napi.poll) { int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll; @@ -1149,7 +1151,6 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) napi_enable(&rxq->napi); } - spin_unlock_bh(&rxq->lock); } /* move the pool to the default queue and allocator ownerships */ diff --git a/drivers/net/wireless/intersil/hostap/hostap_cs.c b/drivers/net/wireless/intersil/hostap/hostap_cs.c index 1a748670835a..ec7db2badc40 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_cs.c +++ b/drivers/net/wireless/intersil/hostap/hostap_cs.c @@ -26,7 +26,6 @@ static char *dev_info = "hostap_cs"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN " "cards (PC Card)."); -MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PC Card)"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/intersil/hostap/hostap_pci.c b/drivers/net/wireless/intersil/hostap/hostap_pci.c index 101887e6bd0f..52d77506effd 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_pci.c +++ b/drivers/net/wireless/intersil/hostap/hostap_pci.c @@ -27,7 +27,6 @@ static char *dev_info = "hostap_pci"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN " "PCI cards."); -MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/intersil/hostap/hostap_plx.c b/drivers/net/wireless/intersil/hostap/hostap_plx.c index 841cfc68ce84..58247290fcbc 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_plx.c +++ b/drivers/net/wireless/intersil/hostap/hostap_plx.c @@ -30,7 +30,6 @@ static char *dev_info = "hostap_plx"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN " "cards (PLX)."); -MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PLX)"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 19098b852d0a..2f27c43ad76d 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -345,7 +345,6 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, }; struct ieee80211_hw *hw; int len, n = 0, ret = -ENOMEM; - struct mt76_queue_entry e; struct mt76_txwi_cache *t; struct sk_buff *iter; dma_addr_t addr; @@ -387,6 +386,11 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, } tx_info.nbuf = n; + if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { + ret = -ENOMEM; + goto unmap; + } + dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, DMA_TO_DEVICE); ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info); @@ -395,11 +399,6 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, if (ret < 0) goto unmap; - if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { - ret = -ENOMEM; - goto unmap; - } - return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, tx_info.info, tx_info.skb, t); @@ -419,9 +418,7 @@ free: } #endif - e.skb = tx_info.skb; - e.txwi = t; - dev->drv->tx_complete_skb(dev, &e); + dev_kfree_skb(tx_info.skb); mt76_put_txwi(dev, t); return ret; } @@ -515,13 +512,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, { struct sk_buff *skb = q->rx_head; struct skb_shared_info *shinfo = skb_shinfo(skb); + int nr_frags = shinfo->nr_frags; - if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { + if (nr_frags < ARRAY_SIZE(shinfo->frags)) { struct page *page = virt_to_head_page(data); int offset = data - page_address(page) + q->buf_offset; - skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, - q->buf_size); + skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); } else { skb_free_frag(data); } @@ -530,7 +527,10 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, return; q->rx_head = NULL; - dev->drv->rx_skb(dev, q - dev->q_rx, skb); + if (nr_frags < ARRAY_SIZE(shinfo->frags)) + dev->drv->rx_skb(dev, q - dev->q_rx, skb); + else + dev_kfree_skb(skb); } static int diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index eb889f8d6fea..e5a258958ac9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -967,11 +967,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } txp->nbuf = nbuf; - /* pass partial skb header to fw */ - tx_info->buf[1].len = MT_CT_PARSE_LEN; - tx_info->buf[1].skip_unmap = true; - tx_info->nbuf = MT_CT_DMA_BUF_NUM; - txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST); if (!key) @@ -1009,6 +1004,11 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, txp->rept_wds_wcid = cpu_to_le16(0x3ff); tx_info->skb = DMA_DUMMY_DATA; + /* pass partial skb header to fw */ + tx_info->buf[1].len = MT_CT_PARSE_LEN; + tx_info->buf[1].skip_unmap = true; + tx_info->nbuf = MT_CT_DMA_BUF_NUM; + return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c index 7fb2170a9561..bd798df748ba 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c @@ -543,7 +543,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en) tx_cont->bw = CMD_CBW_20MHZ; break; default: - break; + return -EINVAL; } if (!en) { @@ -591,7 +591,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en) mode = MT_PHY_TYPE_HE_MU; break; default: - break; + return -EINVAL; } rateval = mode << 6 | rate_idx; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index db125cd22b91..b5cc72e7e81c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -405,10 +405,8 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb, if (wlan_idx >= MT76_N_WCIDS) return; wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); - if (!wcid) { - stats->tx_rate = rate; + if (!wcid) return; - } msta = container_of(wcid, struct mt7921_sta, wcid); stats = &msta->stats; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c index 8f860c14da58..dec6ffdf07c4 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c @@ -1821,7 +1821,6 @@ static const struct pci_device_id rt2400pci_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2400 PCI & PCMCIA Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2460 PCI & PCMCIA chipset based cards"); MODULE_DEVICE_TABLE(pci, rt2400pci_device_table); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c index e940443c52ad..8faa0a80e73a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c @@ -2119,7 +2119,6 @@ static const struct pci_device_id rt2500pci_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2500 PCI & PCMCIA Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2560 PCI & PCMCIA chipset based cards"); MODULE_DEVICE_TABLE(pci, rt2500pci_device_table); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index fce05fc88aaf..bb5ed6630645 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -1956,7 +1956,6 @@ static const struct usb_device_id rt2500usb_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2570 USB chipset based cards"); MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c index 9a33baaa6184..1fde0e767ce3 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c @@ -439,7 +439,6 @@ static const struct pci_device_id rt2800pci_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards"); MODULE_FIRMWARE(FIRMWARE_RT2860); MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 36ac18ca8082..b5c67f656cfd 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -1248,7 +1248,6 @@ static const struct usb_device_id rt2800usb_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2800 USB Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2870 USB chipset based cards"); MODULE_DEVICE_TABLE(usb, rt2800usb_device_table); MODULE_FIRMWARE(FIRMWARE_RT2870); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index 02da5dd37ddd..82cfc2aadc2b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -2993,8 +2993,6 @@ static const struct pci_device_id rt61pci_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT61 PCI & PCMCIA Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2561, RT2561s & RT2661 " - "PCI & PCMCIA chipset based cards"); MODULE_DEVICE_TABLE(pci, rt61pci_device_table); MODULE_FIRMWARE(FIRMWARE_RT2561); MODULE_FIRMWARE(FIRMWARE_RT2561s); diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index e69793773d87..5ff2c740c3ea 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -2513,7 +2513,6 @@ static const struct usb_device_id rt73usb_device_table[] = { MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT73 USB Wireless LAN driver."); -MODULE_SUPPORTED_DEVICE("Ralink RT2571W & RT2671 USB chipset based cards"); MODULE_DEVICE_TABLE(usb, rt73usb_device_table); MODULE_FIRMWARE(FIRMWARE_RT2571); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index 9a3d2439a8e7..d98483298555 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -441,6 +441,5 @@ module_init(rsi_91x_hal_module_init); module_exit(rsi_91x_hal_module_exit); MODULE_AUTHOR("Redpine Signals Inc"); MODULE_DESCRIPTION("Station driver for RSI 91x devices"); -MODULE_SUPPORTED_DEVICE("RSI-91x"); MODULE_VERSION("0.1"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 592e9dadcb55..fe0287b22a25 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -1571,7 +1571,6 @@ module_exit(rsi_module_exit); MODULE_AUTHOR("Redpine Signals Inc"); MODULE_DESCRIPTION("Common SDIO layer for RSI drivers"); -MODULE_SUPPORTED_DEVICE("RSI-91x"); MODULE_DEVICE_TABLE(sdio, rsi_dev_table); MODULE_FIRMWARE(FIRMWARE_RSI9113); MODULE_VERSION("0.1"); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index a4a533c2a783..3fbe2a3c1455 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -928,7 +928,6 @@ module_usb_driver(rsi_driver); MODULE_AUTHOR("Redpine Signals Inc"); MODULE_DESCRIPTION("Common USB layer for RSI drivers"); -MODULE_SUPPORTED_DEVICE("RSI-91x"); MODULE_DEVICE_TABLE(usb, rsi_dev_table); MODULE_FIRMWARE(FIRMWARE_RSI9113); MODULE_VERSION("0.1"); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index e5c73f819662..39a01c2a3058 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -557,8 +557,8 @@ check_frags: } if (skb_has_frag_list(skb) && !first_shinfo) { - first_shinfo = skb_shinfo(skb); - shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); + first_shinfo = shinfo; + shinfo = skb_shinfo(shinfo->frag_list); nr_frags = shinfo->nr_frags; goto check_frags; @@ -1343,11 +1343,21 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget) return 0; gnttab_batch_copy(queue->tx_copy_ops, nr_cops); - if (nr_mops != 0) + if (nr_mops != 0) { ret = gnttab_map_refs(queue->tx_map_ops, NULL, queue->pages_to_map, nr_mops); + if (ret) { + unsigned int i; + + netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n", + nr_mops, ret); + for (i = 0; i < nr_mops; ++i) + WARN_ON_ONCE(queue->tx_map_ops[i].status == + GNTST_okay); + } + } work_done = xenvif_tx_submit(queue); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index e68a8c4ac5a6..0896e21642be 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -380,6 +380,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) return true; nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; + nvme_req(req)->flags |= NVME_REQ_CANCELLED; blk_mq_complete_request(req); return true; } @@ -1225,28 +1226,12 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); } -static int nvme_keep_alive(struct nvme_ctrl *ctrl) -{ - struct request *rq; - - rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, - BLK_MQ_REQ_RESERVED); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - rq->timeout = ctrl->kato * HZ; - rq->end_io_data = ctrl; - - blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io); - - return 0; -} - static void nvme_keep_alive_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), struct nvme_ctrl, ka_work); bool comp_seen = ctrl->comp_seen; + struct request *rq; if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { dev_dbg(ctrl->device, @@ -1256,12 +1241,18 @@ static void nvme_keep_alive_work(struct work_struct *work) return; } - if (nvme_keep_alive(ctrl)) { + rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, + BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); + if (IS_ERR(rq)) { /* allocation failure, reset the controller */ - dev_err(ctrl->device, "keep-alive failed\n"); + dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); nvme_reset_ctrl(ctrl); return; } + + rq->timeout = ctrl->kato * HZ; + rq->end_io_data = ctrl; + blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io); } static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) @@ -1440,7 +1431,7 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, goto out_free_id; } - error = -ENODEV; + error = NVME_SC_INVALID_NS | NVME_SC_DNR; if ((*id)->ncap == 0) /* namespace not allocated or attached */ goto out_free_id; @@ -1963,30 +1954,18 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); } -static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) +/* + * Even though NVMe spec explicitly states that MDTS is not applicable to the + * write-zeroes, we are cautious and limit the size to the controllers + * max_hw_sectors value, which is based on the MDTS field and possibly other + * limiting factors. + */ +static void nvme_config_write_zeroes(struct request_queue *q, + struct nvme_ctrl *ctrl) { - u64 max_blocks; - - if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || - (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) - return; - /* - * Even though NVMe spec explicitly states that MDTS is not - * applicable to the write-zeroes:- "The restriction does not apply to - * commands that do not transfer data between the host and the - * controller (e.g., Write Uncorrectable ro Write Zeroes command).". - * In order to be more cautious use controller's max_hw_sectors value - * to configure the maximum sectors for the write-zeroes which is - * configured based on the controller's MDTS field in the - * nvme_init_identify() if available. - */ - if (ns->ctrl->max_hw_sectors == UINT_MAX) - max_blocks = (u64)USHRT_MAX + 1; - else - max_blocks = ns->ctrl->max_hw_sectors + 1; - - blk_queue_max_write_zeroes_sectors(disk->queue, - nvme_lba_to_sect(ns, max_blocks)); + if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && + !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) + blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors); } static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) @@ -2158,7 +2137,7 @@ static void nvme_update_disk_info(struct gendisk *disk, set_capacity_and_notify(disk, capacity); nvme_config_discard(disk, ns); - nvme_config_write_zeroes(disk, ns); + nvme_config_write_zeroes(disk->queue, ns->ctrl); set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || test_bit(NVME_NS_FORCE_RO, &ns->flags)); @@ -4038,7 +4017,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids) { struct nvme_id_ns *id; - int ret = -ENODEV; + int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; if (test_bit(NVME_NS_DEAD, &ns->flags)) goto out; @@ -4047,7 +4026,7 @@ static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids) if (ret) goto out; - ret = -ENODEV; + ret = NVME_SC_INVALID_NS | NVME_SC_DNR; if (!nvme_ns_ids_equal(&ns->head->ids, ids)) { dev_err(ns->ctrl->device, "identifiers changed for nsid %d\n", ns->head->ns_id); @@ -4065,7 +4044,7 @@ out: * * TODO: we should probably schedule a delayed retry here. */ - if (ret && ret != -ENOMEM && !(ret > 0 && !(ret & NVME_SC_DNR))) + if (ret > 0 && (ret & NVME_SC_DNR)) nvme_ns_remove(ns); } @@ -4095,6 +4074,12 @@ static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) nsid); break; } + if (!nvme_multi_css(ctrl)) { + dev_warn(ctrl->device, + "command set not reported for nsid: %d\n", + nsid); + break; + } nvme_alloc_ns(ctrl, nsid, &ids); break; default: diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 5dfd806fc2d2..604ab0e5a2ad 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -630,7 +630,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, opts->queue_size = NVMF_DEF_QUEUE_SIZE; opts->nr_io_queues = num_online_cpus(); opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; - opts->kato = NVME_DEFAULT_KATO; + opts->kato = 0; opts->duplicate_connect = false; opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO; opts->hdr_digest = false; @@ -893,6 +893,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, opts->nr_write_queues = 0; opts->nr_poll_queues = 0; opts->duplicate_connect = true; + } else { + if (!opts->kato) + opts->kato = NVME_DEFAULT_KATO; } if (ctrl_loss_tmo < 0) { opts->max_reconnects = -1; diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 733010d2eafd..888b108d87a4 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -19,6 +19,13 @@ #define NVMF_DEF_FAIL_FAST_TMO -1 /* + * Reserved one command for internal usage. This command is used for sending + * the connect command, as well as for the keep alive command on the admin + * queue once live. + */ +#define NVMF_RESERVED_TAGS 1 + +/* * Define a host as seen by the target. We allocate one at boot, but also * allow the override it when creating controllers. This is both to provide * persistence of the Host NQN over multiple boots, and to allow using diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 20dadd86e981..6ffa8de2a0d7 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1956,7 +1956,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) sizeof(op->rsp_iu), DMA_FROM_DEVICE); if (opstate == FCPOP_STATE_ABORTED) - status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); + status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1); else if (freq->status) { status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); dev_info(ctrl->ctrl.device, @@ -2055,7 +2055,7 @@ done: nvme_fc_complete_rq(rq); check_error: - if (terminate_assoc) + if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) queue_work(nvme_reset_wq, &ctrl->ioerr_work); } @@ -2443,6 +2443,7 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); + op->nreq.flags |= NVME_REQ_CANCELLED; __nvme_fc_abort_op(ctrl, op); return true; } @@ -2862,7 +2863,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ctrl->tag_set.ops = &nvme_fc_mq_ops; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; - ctrl->tag_set.reserved_tags = 1; /* fabric connect */ + ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ctrl->tag_set.cmd_size = @@ -3484,7 +3485,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; - ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ + ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->admin_tag_set.cmd_size = struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 8f9e96986780..0a586d712920 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -248,6 +248,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) if (IS_ERR(hwmon)) { dev_warn(dev, "Failed to instantiate hwmon device\n"); kfree(data); + return PTR_ERR(hwmon); } ctrl->hwmon_device = hwmon; return 0; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 38b0d694dfc9..7249ae74f71f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3234,7 +3234,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ - .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ @@ -3245,9 +3246,13 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_DISABLE_WRITE_ZEROES| NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ @@ -3265,6 +3270,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 53ac4d7442ba..be905d4fdb47 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -736,8 +736,11 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) return ret; ctrl->ctrl.queue_count = nr_io_queues + 1; - if (ctrl->ctrl.queue_count < 2) - return 0; + if (ctrl->ctrl.queue_count < 2) { + dev_err(ctrl->ctrl.device, + "unable to set any I/O queues\n"); + return -ENOMEM; + } dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); @@ -798,7 +801,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_rdma_admin_mq_ops; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; - set->reserved_tags = 2; /* connect + keep-alive */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->cmd_size = sizeof(struct nvme_rdma_request) + NVME_RDMA_DATA_SGL_SIZE; @@ -811,7 +814,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_rdma_mq_ops; set->queue_depth = nctrl->sqsize + 1; - set->reserved_tags = 1; /* fabric connect */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE; set->cmd_size = sizeof(struct nvme_rdma_request) + diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 69f59d2c5799..a0f00cb8f9f3 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -287,7 +287,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, * directly, otherwise queue io_work. Also, only do that if we * are on the same cpu, so we don't introduce contention. */ - if (queue->io_cpu == __smp_processor_id() && + if (queue->io_cpu == raw_smp_processor_id() && sync && empty && mutex_trylock(&queue->send_mutex)) { queue->more_requests = !last; nvme_tcp_send_all(queue); @@ -568,6 +568,13 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, req->pdu_len = le32_to_cpu(pdu->r2t_length); req->pdu_sent = 0; + if (unlikely(!req->pdu_len)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len is %u, probably a bug...\n", + rq->tag, req->pdu_len); + return -EPROTO; + } + if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { dev_err(queue->ctrl->ctrl.device, "req %d r2t len %u exceeded data len %u (%zu sent)\n", @@ -1575,7 +1582,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_tcp_admin_mq_ops; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; - set->reserved_tags = 2; /* connect + keep-alive */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_BLOCKING; set->cmd_size = sizeof(struct nvme_tcp_request); @@ -1587,7 +1594,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_tcp_mq_ops; set->queue_depth = nctrl->sqsize + 1; - set->reserved_tags = 1; /* fabric connect */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; set->cmd_size = sizeof(struct nvme_tcp_request); @@ -1745,8 +1752,11 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) return ret; ctrl->queue_count = nr_io_queues + 1; - if (ctrl->queue_count < 2) - return 0; + if (ctrl->queue_count < 2) { + dev_err(ctrl->device, + "unable to set any I/O queues\n"); + return -ENOMEM; + } dev_info(ctrl->device, "creating %d I/O queues.\n", nr_io_queues); diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index c7e3ec561ba0..bc2f344f0ae0 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -9,7 +9,13 @@ int nvme_revalidate_zones(struct nvme_ns *ns) { - return blk_revalidate_disk_zones(ns->disk, NULL); + struct request_queue *q = ns->queue; + int ret; + + ret = blk_revalidate_disk_zones(ns->disk, NULL); + if (!ret) + blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); + return ret; } static int nvme_set_max_append(struct nvme_ctrl *ctrl) @@ -107,7 +113,6 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1); blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1); - blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); free_data: kfree(id); return status; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index bc6a774f2124..fe6b8aa90b53 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -313,27 +313,40 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); } -static void nvmet_id_set_model_number(struct nvme_id_ctrl *id, - struct nvmet_subsys *subsys) +static u16 nvmet_set_model_number(struct nvmet_subsys *subsys) { - const char *model = NVMET_DEFAULT_CTRL_MODEL; - struct nvmet_subsys_model *subsys_model; + u16 status = 0; + + mutex_lock(&subsys->lock); + if (!subsys->model_number) { + subsys->model_number = + kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL); + if (!subsys->model_number) + status = NVME_SC_INTERNAL; + } + mutex_unlock(&subsys->lock); - rcu_read_lock(); - subsys_model = rcu_dereference(subsys->model); - if (subsys_model) - model = subsys_model->number; - memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' '); - rcu_read_unlock(); + return status; } static void nvmet_execute_identify_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_subsys *subsys = ctrl->subsys; struct nvme_id_ctrl *id; u32 cmd_capsule_size; u16 status = 0; + /* + * If there is no model number yet, set it now. It will then remain + * stable for the life time of the subsystem. + */ + if (!subsys->model_number) { + status = nvmet_set_model_number(subsys); + if (status) + goto out; + } + id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) { status = NVME_SC_INTERNAL; @@ -347,7 +360,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) memset(id->sn, ' ', sizeof(id->sn)); bin2hex(id->sn, &ctrl->subsys->serial, min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); - nvmet_id_set_model_number(id, ctrl->subsys); + memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number, + strlen(subsys->model_number), ' '); memcpy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE), ' '); diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 635a7cb45d0b..e5dbd1923b7b 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1118,16 +1118,12 @@ static ssize_t nvmet_subsys_attr_model_show(struct config_item *item, char *page) { struct nvmet_subsys *subsys = to_subsys(item); - struct nvmet_subsys_model *subsys_model; - char *model = NVMET_DEFAULT_CTRL_MODEL; int ret; - rcu_read_lock(); - subsys_model = rcu_dereference(subsys->model); - if (subsys_model) - model = subsys_model->number; - ret = snprintf(page, PAGE_SIZE, "%s\n", model); - rcu_read_unlock(); + mutex_lock(&subsys->lock); + ret = snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number ? + subsys->model_number : NVMET_DEFAULT_CTRL_MODEL); + mutex_unlock(&subsys->lock); return ret; } @@ -1138,14 +1134,17 @@ static bool nvmet_is_ascii(const char c) return c >= 0x20 && c <= 0x7e; } -static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, - const char *page, size_t count) +static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, + const char *page, size_t count) { - struct nvmet_subsys *subsys = to_subsys(item); - struct nvmet_subsys_model *new_model; - char *new_model_number; int pos = 0, len; + if (subsys->model_number) { + pr_err("Can't set model number. %s is already assigned\n", + subsys->model_number); + return -EINVAL; + } + len = strcspn(page, "\n"); if (!len) return -EINVAL; @@ -1155,28 +1154,25 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, return -EINVAL; } - new_model_number = kmemdup_nul(page, len, GFP_KERNEL); - if (!new_model_number) + subsys->model_number = kmemdup_nul(page, len, GFP_KERNEL); + if (!subsys->model_number) return -ENOMEM; + return count; +} - new_model = kzalloc(sizeof(*new_model) + len + 1, GFP_KERNEL); - if (!new_model) { - kfree(new_model_number); - return -ENOMEM; - } - memcpy(new_model->number, new_model_number, len); +static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_subsys *subsys = to_subsys(item); + ssize_t ret; down_write(&nvmet_config_sem); mutex_lock(&subsys->lock); - new_model = rcu_replace_pointer(subsys->model, new_model, - mutex_is_locked(&subsys->lock)); + ret = nvmet_subsys_attr_model_store_locked(subsys, page, count); mutex_unlock(&subsys->lock); up_write(&nvmet_config_sem); - kfree_rcu(new_model, rcuhead); - kfree(new_model_number); - - return count; + return ret; } CONFIGFS_ATTR(nvmet_subsys_, attr_model); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 67bbf0e3b507..a027433b8be8 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1118,9 +1118,20 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) { lockdep_assert_held(&ctrl->lock); - if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || - nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || - nvmet_cc_mps(ctrl->cc) != 0 || + /* + * Only I/O controllers should verify iosqes,iocqes. + * Strictly speaking, the spec says a discovery controller + * should verify iosqes,iocqes are zeroed, however that + * would break backwards compatibility, so don't enforce it. + */ + if (ctrl->subsys->type != NVME_NQN_DISC && + (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || + nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { + ctrl->csts = NVME_CSTS_CFS; + return; + } + + if (nvmet_cc_mps(ctrl->cc) != 0 || nvmet_cc_ams(ctrl->cc) != 0 || nvmet_cc_css(ctrl->cc) != 0) { ctrl->csts = NVME_CSTS_CFS; @@ -1532,7 +1543,7 @@ static void nvmet_subsys_free(struct kref *ref) nvmet_passthru_subsys_free(subsys); kfree(subsys->subsysnqn); - kfree_rcu(subsys->model, rcuhead); + kfree(subsys->model_number); kfree(subsys); } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index cb6f86572b24..3e189e753bcf 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -349,7 +349,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops; ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; - ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ + ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); @@ -520,7 +520,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ctrl->tag_set.ops = &nvme_loop_mq_ops; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; - ctrl->tag_set.reserved_tags = 1; /* fabric connect */ + ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index cdfa537b1c0a..4b84edb49f22 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -208,11 +208,6 @@ struct nvmet_ctrl { bool pi_support; }; -struct nvmet_subsys_model { - struct rcu_head rcuhead; - char number[]; -}; - struct nvmet_subsys { enum nvme_subsys_type type; @@ -242,7 +237,7 @@ struct nvmet_subsys { struct config_group namespaces_group; struct config_group allowed_hosts_group; - struct nvmet_subsys_model __rcu *model; + char *model_number; #ifdef CONFIG_NVME_TARGET_PASSTHRU struct nvme_ctrl *passthru_ctrl; diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 26c587ccd152..2798944899b7 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -50,9 +50,9 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) /* * nvmet_passthru_map_sg is limitted to using a single bio so limit - * the mdts based on BIO_MAX_PAGES as well + * the mdts based on BIO_MAX_VECS as well */ - max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9), + max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9), max_hw_sectors); page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; @@ -191,7 +191,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) struct bio *bio; int i; - if (req->sg_cnt > BIO_MAX_PAGES) + if (req->sg_cnt > BIO_MAX_VECS) return -EINVAL; if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) { diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 06b6b742bb21..6c1f3ab7649c 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -802,9 +802,8 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc) nvmet_req_uninit(&rsp->req); nvmet_rdma_release_rsp(rsp); if (wc->status != IB_WC_WR_FLUSH_ERR) { - pr_info("RDMA WRITE for CQE 0x%p failed with status %s (%d).\n", - wc->wr_cqe, ib_wc_status_msg(wc->status), - wc->status); + pr_info("RDMA WRITE for CQE failed with status %s (%d).\n", + ib_wc_status_msg(wc->status), wc->status); nvmet_rdma_error_comp(queue); } return; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 8b0485ada315..d658c6e8263a 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1098,11 +1098,11 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) cmd->rbytes_done += ret; } + nvmet_tcp_unmap_pdu_iovec(cmd); if (queue->data_digest) { nvmet_tcp_prep_recv_ddgst(cmd); return 0; } - nvmet_tcp_unmap_pdu_iovec(cmd); if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && cmd->rbytes_done == cmd->req.transfer_len) { diff --git a/drivers/opp/core.c b/drivers/opp/core.c index c2689386a906..1556998425d5 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1492,7 +1492,11 @@ static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, mutex_lock(&opp_table->lock); list_for_each_entry(temp, &opp_table->opp_list, node) { - if (dynamic == temp->dynamic) { + /* + * Refcount must be dropped only once for each OPP by OPP core, + * do that with help of "removed" flag. + */ + if (!temp->removed && dynamic == temp->dynamic) { opp = temp; break; } @@ -1502,10 +1506,27 @@ static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, return opp; } -bool _opp_remove_all_static(struct opp_table *opp_table) +/* + * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to + * happen lock less to avoid circular dependency issues. This routine must be + * called without the opp_table->lock held. + */ +static void _opp_remove_all(struct opp_table *opp_table, bool dynamic) { struct dev_pm_opp *opp; + while ((opp = _opp_get_next(opp_table, dynamic))) { + opp->removed = true; + dev_pm_opp_put(opp); + + /* Drop the references taken by dev_pm_opp_add() */ + if (dynamic) + dev_pm_opp_put_opp_table(opp_table); + } +} + +bool _opp_remove_all_static(struct opp_table *opp_table) +{ mutex_lock(&opp_table->lock); if (!opp_table->parsed_static_opps) { @@ -1520,13 +1541,7 @@ bool _opp_remove_all_static(struct opp_table *opp_table) mutex_unlock(&opp_table->lock); - /* - * Can't remove the OPP from under the lock, debugfs removal needs to - * happen lock less to avoid circular dependency issues. - */ - while ((opp = _opp_get_next(opp_table, false))) - dev_pm_opp_put(opp); - + _opp_remove_all(opp_table, false); return true; } @@ -1539,25 +1554,12 @@ bool _opp_remove_all_static(struct opp_table *opp_table) void dev_pm_opp_remove_all_dynamic(struct device *dev) { struct opp_table *opp_table; - struct dev_pm_opp *opp; - int count = 0; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return; - /* - * Can't remove the OPP from under the lock, debugfs removal needs to - * happen lock less to avoid circular dependency issues. - */ - while ((opp = _opp_get_next(opp_table, true))) { - dev_pm_opp_put(opp); - count++; - } - - /* Drop the references taken by dev_pm_opp_add() */ - while (count--) - dev_pm_opp_put_opp_table(opp_table); + _opp_remove_all(opp_table, true); /* Drop the reference taken by _find_opp_table() */ dev_pm_opp_put_opp_table(opp_table); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 50fb9dced3c5..407c3bfe51d9 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -56,6 +56,7 @@ extern struct list_head opp_tables, lazy_opp_tables; * @dynamic: not-created from static DT entries. * @turbo: true if turbo (boost) OPP * @suspend: true if suspend OPP + * @removed: flag indicating that OPP's reference is dropped by OPP core. * @pstate: Device's power domain's performance state. * @rate: Frequency in hertz * @level: Performance level @@ -78,6 +79,7 @@ struct dev_pm_opp { bool dynamic; bool turbo; bool suspend; + bool removed; unsigned int pstate; unsigned long rate; unsigned int level; diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c index 1e88bcfe0d7b..84d5701d606c 100644 --- a/drivers/parport/parport_amiga.c +++ b/drivers/parport/parport_amiga.c @@ -241,6 +241,5 @@ module_platform_driver_probe(amiga_parallel_driver, amiga_parallel_probe); MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port"); -MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:amiga-parallel"); diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c index 2ff0fe053e6e..1623f010cdcc 100644 --- a/drivers/parport/parport_atari.c +++ b/drivers/parport/parport_atari.c @@ -218,7 +218,6 @@ static void __exit parport_atari_exit(void) MODULE_AUTHOR("Andreas Schwab"); MODULE_DESCRIPTION("Parport Driver for Atari builtin Port"); -MODULE_SUPPORTED_DEVICE("Atari builtin Parallel Port"); MODULE_LICENSE("GPL"); module_init(parport_atari_init) diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index 9228e8f90309..1e43b3f399a8 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c @@ -41,7 +41,6 @@ MODULE_AUTHOR("Helge Deller <deller@gmx.de>"); MODULE_DESCRIPTION("HP-PARISC PC-style parallel port driver"); -MODULE_SUPPORTED_DEVICE("integrated PC-style parallel port"); MODULE_LICENSE("GPL"); diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c index d6bbe8446301..f4d0da741e85 100644 --- a/drivers/parport/parport_mfc3.c +++ b/drivers/parport/parport_mfc3.c @@ -359,7 +359,6 @@ static void __exit parport_mfc3_exit(void) MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port"); -MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port"); MODULE_LICENSE("GPL"); module_init(parport_mfc3_init) diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c index e840c1b5ab90..865fc41dbb6c 100644 --- a/drivers/parport/parport_sunbpp.c +++ b/drivers/parport/parport_sunbpp.c @@ -377,6 +377,5 @@ module_platform_driver(bpp_sbus_driver); MODULE_AUTHOR("Derrick J Brashear"); MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port"); -MODULE_SUPPORTED_DEVICE("Sparc Bidirectional Parallel Port"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c index cdbfa5df3a51..dbfa0b55d31a 100644 --- a/drivers/pci/hotplug/rpadlpar_sysfs.c +++ b/drivers/pci/hotplug/rpadlpar_sysfs.c @@ -34,12 +34,11 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, if (nbytes >= MAX_DRC_NAME_LEN) return 0; - memcpy(drc_name, buf, nbytes); + strscpy(drc_name, buf, nbytes + 1); end = strchr(drc_name, '\n'); - if (!end) - end = &drc_name[nbytes]; - *end = '\0'; + if (end) + *end = '\0'; rc = dlpar_add_slot(drc_name); if (rc) @@ -65,12 +64,11 @@ static ssize_t remove_slot_store(struct kobject *kobj, if (nbytes >= MAX_DRC_NAME_LEN) return 0; - memcpy(drc_name, buf, nbytes); + strscpy(drc_name, buf, nbytes + 1); end = strchr(drc_name, '\n'); - if (!end) - end = &drc_name[nbytes]; - *end = '\0'; + if (end) + *end = '\0'; rc = dlpar_remove_slot(drc_name); if (rc) diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index c9e790c74051..a047c421debe 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -93,8 +93,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) pci_dev_put(pdev); return -EBUSY; } + pci_dev_put(pdev); - zpci_remove_device(zdev); + zpci_remove_device(zdev, false); rc = zpci_disable_device(zdev); if (rc) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 16a17215f633..e4d4e399004b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1870,20 +1870,10 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) int err; int i, bars = 0; - /* - * Power state could be unknown at this point, either due to a fresh - * boot or a device removal call. So get the current power state - * so that things like MSI message writing will behave as expected - * (e.g. if the device really is in D0 at enable time). - */ - if (dev->pm_cap) { - u16 pmcsr; - pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); - dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); - } - - if (atomic_inc_return(&dev->enable_cnt) > 1) + if (atomic_inc_return(&dev->enable_cnt) > 1) { + pci_update_current_state(dev, dev->current_state); return 0; /* already enabled */ + } bridge = pci_upstream_bridge(dev); if (bridge) diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index c6fe0cfec0f6..2d7502648219 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -26,7 +26,7 @@ #include <xen/platform_pci.h> #include <asm/xen/swiotlb-xen.h> -#define INVALID_GRANT_REF (0) + #define INVALID_EVTCHN (-1) struct pci_bus_entry { @@ -42,7 +42,7 @@ struct pcifront_device { struct list_head root_buses; int evtchn; - int gnt_ref; + grant_ref_t gnt_ref; int irq; diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c index 66ad5b3ece19..f2a85500258d 100644 --- a/drivers/perf/arm_dmc620_pmu.c +++ b/drivers/perf/arm_dmc620_pmu.c @@ -681,6 +681,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev) if (!name) { dev_err(&pdev->dev, "Create name failed, PMU @%pa\n", &res->start); + ret = -ENOMEM; goto out_teardown_dev; } diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 8085782cd8f9..9f3361c13ded 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -1357,6 +1357,7 @@ static int intel_pinctrl_add_padgroups_by_gpps(struct intel_pinctrl *pctrl, gpps[i].gpio_base = 0; break; case INTEL_GPIO_BASE_NOMAP: + break; default: break; } @@ -1393,6 +1394,7 @@ static int intel_pinctrl_add_padgroups_by_size(struct intel_pinctrl *pctrl, gpps[i].size = min(gpp_size, npins); npins -= gpps[i].size; + gpps[i].gpio_base = gpps[i].base; gpps[i].padown_num = padown_num; /* @@ -1491,8 +1493,13 @@ static int intel_pinctrl_probe(struct platform_device *pdev, if (IS_ERR(regs)) return PTR_ERR(regs); - /* Determine community features based on the revision */ + /* + * Determine community features based on the revision. + * A value of all ones means the device is not present. + */ value = readl(regs + REVID); + if (value == ~0u) + return -ENODEV; if (((value & REVID_MASK) >> REVID_SHIFT) >= 0x94) { community->features |= PINCTRL_FEATURE_DEBOUNCE; community->features |= PINCTRL_FEATURE_1K_PD; diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c index f35edb0eac40..c12fa57ebd12 100644 --- a/drivers/pinctrl/pinctrl-microchip-sgpio.c +++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c @@ -572,7 +572,7 @@ static void microchip_sgpio_irq_settype(struct irq_data *data, /* Type value spread over 2 registers sets: low, high bit */ sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, addr.bit, BIT(addr.port), (!!(type & 0x1)) << addr.port); - sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER + SGPIO_MAX_BITS, addr.bit, + sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, SGPIO_MAX_BITS + addr.bit, BIT(addr.port), (!!(type & 0x2)) << addr.port); if (type == SGPIO_INT_TRG_LEVEL) diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index aa1a1c850d05..53a0badc6b03 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -3727,12 +3727,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev) static int __maybe_unused rockchip_pinctrl_resume(struct device *dev) { struct rockchip_pinctrl *info = dev_get_drvdata(dev); - int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, - rk3288_grf_gpio6c_iomux | - GPIO6C6_SEL_WRITE_ENABLE); + int ret; - if (ret) - return ret; + if (info->ctrl->type == RK3288) { + ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, + rk3288_grf_gpio6c_iomux | + GPIO6C6_SEL_WRITE_ENABLE); + if (ret) + return ret; + } return pinctrl_force_default(info->pctl_dev); } diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c index 369ee20a7ea9..2f19ab4db720 100644 --- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c @@ -392,7 +392,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group, unsigned long *configs, unsigned int nconfs) { struct lpi_pinctrl *pctrl = dev_get_drvdata(pctldev->dev); - unsigned int param, arg, pullup, strength; + unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2; bool value, output_enabled = false; const struct lpi_pingroup *g; unsigned long sval; diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c index 8daccd530285..9d41abfca37e 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc7280.c +++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c @@ -1439,14 +1439,14 @@ static const struct msm_pingroup sc7280_groups[] = { [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _), [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _), [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _), - [175] = UFS_RESET(ufs_reset, 0x1be000), - [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1b3000, 15, 0), - [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1b3000, 13, 6), - [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1b3000, 11, 3), - [179] = SDC_QDSD_PINGROUP(sdc1_data, 0x1b3000, 9, 0), - [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1b4000, 14, 6), - [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1b4000, 11, 3), - [182] = SDC_QDSD_PINGROUP(sdc2_data, 0x1b4000, 9, 0), + [175] = UFS_RESET(ufs_reset, 0xbe000), + [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6), + [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6), + [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xb3000, 11, 3), + [179] = SDC_QDSD_PINGROUP(sdc1_data, 0xb3000, 9, 0), + [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0xb4000, 14, 6), + [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xb4000, 11, 3), + [182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0), }; static const struct msm_pinctrl_soc_data sc7280_pinctrl = { diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c index 2b5b0e2b03ad..5aaf57b40407 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdx55.c +++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c @@ -423,7 +423,7 @@ static const char * const gpio_groups[] = { static const char * const qdss_stm_groups[] = { "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13", - "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19" "gpio20", "gpio21", "gpio22", + "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", "gpio23", "gpio44", "gpio45", "gpio52", "gpio53", "gpio56", "gpio57", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", }; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index ad4e630e73e2..461ec61530eb 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1173,15 +1173,20 @@ config INTEL_PMC_CORE depends on PCI help The Intel Platform Controller Hub for Intel Core SoCs provides access - to Power Management Controller registers via a PCI interface. This + to Power Management Controller registers via various interfaces. This driver can utilize debugging capabilities and supported features as - exposed by the Power Management Controller. + exposed by the Power Management Controller. It also may perform some + tasks in the PMC in order to enable transition into the SLPS0 state. + It should be selected on all Intel platforms supported by the driver. Supported features: - SLP_S0_RESIDENCY counter - PCH IP Power Gating status - - LTR Ignore + - LTR Ignore / LTR Show - MPHY/PLL gating status (Sunrisepoint PCH only) + - SLPS0 Debug registers (Cannonlake/Icelake PCH) + - Low Power Mode registers (Tigerlake and beyond) + - PMC quirks as needed to enable SLPS0/S0ix config INTEL_PMT_CLASS tristate diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c index 80f4b7785c6c..091e48c217ed 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c @@ -185,5 +185,8 @@ void exit_enum_attributes(void) sysfs_remove_group(wmi_priv.enumeration_data[instance_id].attr_name_kobj, &enumeration_attr_group); } + wmi_priv.enumeration_instances_count = 0; + kfree(wmi_priv.enumeration_data); + wmi_priv.enumeration_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c index 75aedbb733be..8a49ba6e44f9 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c @@ -175,5 +175,8 @@ void exit_int_attributes(void) sysfs_remove_group(wmi_priv.integer_data[instance_id].attr_name_kobj, &integer_attr_group); } + wmi_priv.integer_instances_count = 0; + kfree(wmi_priv.integer_data); + wmi_priv.integer_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c index 3abcd95477c0..834b3e82ad9f 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c @@ -183,5 +183,8 @@ void exit_po_attributes(void) sysfs_remove_group(wmi_priv.po_data[instance_id].attr_name_kobj, &po_attr_group); } + wmi_priv.po_instances_count = 0; + kfree(wmi_priv.po_data); + wmi_priv.po_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c index ac75dce88a4c..552537852459 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c @@ -155,5 +155,8 @@ void exit_str_attributes(void) sysfs_remove_group(wmi_priv.str_data[instance_id].attr_name_kobj, &str_attr_group); } + wmi_priv.str_instances_count = 0; + kfree(wmi_priv.str_data); + wmi_priv.str_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c index cb81010ba1a2..7410ccae650c 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c @@ -210,25 +210,17 @@ static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot); */ static int create_attributes_level_sysfs_files(void) { - int ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); + int ret; - if (ret) { - pr_debug("could not create reset_bios file\n"); + ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); + if (ret) return ret; - } ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); - if (ret) { - pr_debug("could not create changing_pending_reboot file\n"); - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); - } - return ret; -} + if (ret) + return ret; -static void release_reset_bios_data(void) -{ - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); + return 0; } static ssize_t wmi_sysman_attr_show(struct kobject *kobj, struct attribute *attr, @@ -373,8 +365,6 @@ static void destroy_attribute_objs(struct kset *kset) */ static void release_attributes_data(void) { - release_reset_bios_data(); - mutex_lock(&wmi_priv.mutex); exit_enum_attributes(); exit_int_attributes(); @@ -386,11 +376,13 @@ static void release_attributes_data(void) wmi_priv.authentication_dir_kset = NULL; } if (wmi_priv.main_dir_kset) { + sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); + sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); destroy_attribute_objs(wmi_priv.main_dir_kset); kset_unregister(wmi_priv.main_dir_kset); + wmi_priv.main_dir_kset = NULL; } mutex_unlock(&wmi_priv.mutex); - } /** @@ -497,7 +489,6 @@ nextobj: err_attr_init: mutex_unlock(&wmi_priv.mutex); - release_attributes_data(); kfree(obj); return retval; } @@ -513,102 +504,91 @@ static int __init sysman_init(void) } ret = init_bios_attr_set_interface(); - if (ret || !wmi_priv.bios_attr_wdev) { - pr_debug("failed to initialize set interface\n"); - goto fail_set_interface; - } + if (ret) + return ret; ret = init_bios_attr_pass_interface(); - if (ret || !wmi_priv.password_attr_wdev) { - pr_debug("failed to initialize pass interface\n"); - goto fail_pass_interface; + if (ret) + goto err_exit_bios_attr_set_interface; + + if (!wmi_priv.bios_attr_wdev || !wmi_priv.password_attr_wdev) { + pr_debug("failed to find set or pass interface\n"); + ret = -ENODEV; + goto err_exit_bios_attr_pass_interface; } ret = class_register(&firmware_attributes_class); if (ret) - goto fail_class; + goto err_exit_bios_attr_pass_interface; wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), NULL, "%s", DRIVER_NAME); if (IS_ERR(wmi_priv.class_dev)) { ret = PTR_ERR(wmi_priv.class_dev); - goto fail_classdev; + goto err_unregister_class; } wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL, &wmi_priv.class_dev->kobj); if (!wmi_priv.main_dir_kset) { ret = -ENOMEM; - goto fail_main_kset; + goto err_destroy_classdev; } wmi_priv.authentication_dir_kset = kset_create_and_add("authentication", NULL, &wmi_priv.class_dev->kobj); if (!wmi_priv.authentication_dir_kset) { ret = -ENOMEM; - goto fail_authentication_kset; + goto err_release_attributes_data; } ret = create_attributes_level_sysfs_files(); if (ret) { pr_debug("could not create reset BIOS attribute\n"); - goto fail_reset_bios; + goto err_release_attributes_data; } ret = init_bios_attributes(ENUM, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate enumeration type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } ret = init_bios_attributes(INT, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate integer type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } ret = init_bios_attributes(STR, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate string type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } ret = init_bios_attributes(PO, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate pass object type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } return 0; -fail_create_group: +err_release_attributes_data: release_attributes_data(); -fail_reset_bios: - if (wmi_priv.authentication_dir_kset) { - kset_unregister(wmi_priv.authentication_dir_kset); - wmi_priv.authentication_dir_kset = NULL; - } - -fail_authentication_kset: - if (wmi_priv.main_dir_kset) { - kset_unregister(wmi_priv.main_dir_kset); - wmi_priv.main_dir_kset = NULL; - } - -fail_main_kset: +err_destroy_classdev: device_destroy(&firmware_attributes_class, MKDEV(0, 0)); -fail_classdev: +err_unregister_class: class_unregister(&firmware_attributes_class); -fail_class: +err_exit_bios_attr_pass_interface: exit_bios_attr_pass_interface(); -fail_pass_interface: +err_exit_bios_attr_set_interface: exit_bios_attr_set_interface(); -fail_set_interface: return ret; } diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 2f5b8d09143e..57cc92891a57 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -90,6 +90,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"), }, }, + { + .ident = "Lenovo ThinkPad X1 Tablet Gen 2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 8a8017f9ca91..3fdf4cbec9ad 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -48,8 +48,16 @@ static const struct key_entry intel_vbtn_keymap[] = { }; static const struct key_entry intel_vbtn_switchmap[] = { - { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ - { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ + /* + * SW_DOCK should only be reported for docking stations, but DSDTs using the + * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set + * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0). + * This causes userspace to think the laptop is docked to a port-replicator + * and to disable suspend-on-lid-close, which is undesirable. + * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting. + */ + { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ + { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */ { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */ { KE_END } diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index ee2f757515b0..b5888aeb4bcf 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -863,34 +863,45 @@ out_unlock: } DEFINE_SHOW_ATTRIBUTE(pmc_core_pll); -static ssize_t pmc_core_ltr_ignore_write(struct file *file, - const char __user *userbuf, - size_t count, loff_t *ppos) +static int pmc_core_send_ltr_ignore(u32 value) { struct pmc_dev *pmcdev = &pmc; const struct pmc_reg_map *map = pmcdev->map; - u32 val, buf_size, fd; - int err; - - buf_size = count < 64 ? count : 64; - - err = kstrtou32_from_user(userbuf, buf_size, 10, &val); - if (err) - return err; + u32 reg; + int err = 0; mutex_lock(&pmcdev->lock); - if (val > map->ltr_ignore_max) { + if (value > map->ltr_ignore_max) { err = -EINVAL; goto out_unlock; } - fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); - fd |= (1U << val); - pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd); + reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); + reg |= BIT(value); + pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg); out_unlock: mutex_unlock(&pmcdev->lock); + + return err; +} + +static ssize_t pmc_core_ltr_ignore_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + u32 buf_size, value; + int err; + + buf_size = min_t(u32, count, 64); + + err = kstrtou32_from_user(userbuf, buf_size, 10, &value); + if (err) + return err; + + err = pmc_core_send_ltr_ignore(value); + return err == 0 ? count : err; } @@ -1244,6 +1255,15 @@ static int pmc_core_probe(struct platform_device *pdev) pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); dmi_check_system(pmc_core_dmi_table); + /* + * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when + * a cable is attached. Tell the PMC to ignore it. + */ + if (pmcdev->map == &tgl_reg_map) { + dev_dbg(&pdev->dev, "ignoring GBE LTR\n"); + pmc_core_send_ltr_ignore(3); + } + pmc_core_dbgfs_register(pmcdev); device_initialized = true; diff --git a/drivers/platform/x86/intel_pmt_class.c b/drivers/platform/x86/intel_pmt_class.c index c8939fba4509..ee2b3bbeb83d 100644 --- a/drivers/platform/x86/intel_pmt_class.c +++ b/drivers/platform/x86/intel_pmt_class.c @@ -173,7 +173,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns, struct device *parent) { - struct resource res; + struct resource res = {0}; struct device *dev; int ret; diff --git a/drivers/platform/x86/intel_pmt_crashlog.c b/drivers/platform/x86/intel_pmt_crashlog.c index 97dd749c8290..92d315a16cfd 100644 --- a/drivers/platform/x86/intel_pmt_crashlog.c +++ b/drivers/platform/x86/intel_pmt_crashlog.c @@ -23,18 +23,17 @@ #define CRASH_TYPE_OOBMSM 1 /* Control Flags */ -#define CRASHLOG_FLAG_DISABLE BIT(27) +#define CRASHLOG_FLAG_DISABLE BIT(28) /* - * Bits 28 and 29 control the state of bit 31. + * Bits 29 and 30 control the state of bit 31. * - * Bit 28 will clear bit 31, if set, allowing a new crashlog to be captured. - * Bit 29 will immediately trigger a crashlog to be generated, setting bit 31. - * Bit 30 is read-only and reserved as 0. + * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured. + * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31. * Bit 31 is the read-only status with a 1 indicating log is complete. */ -#define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(28) -#define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(29) +#define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(29) +#define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(30) #define CRASHLOG_FLAG_TRIGGER_COMPLETE BIT(31) #define CRASHLOG_FLAG_TRIGGER_MASK GENMASK(31, 28) diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index b881044b31b0..0d9e2ddbf904 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -4081,13 +4081,19 @@ static bool hotkey_notify_6xxx(const u32 hkey, case TP_HKEY_EV_KEY_NUMLOCK: case TP_HKEY_EV_KEY_FN: - case TP_HKEY_EV_KEY_FN_ESC: /* key press events, we just ignore them as long as the EC * is still reporting them in the normal keyboard stream */ *send_acpi_ev = false; *ignore_acpi_ev = true; return true; + case TP_HKEY_EV_KEY_FN_ESC: + /* Get the media key status to foce the status LED to update */ + acpi_evalf(hkey_handle, NULL, "GMKS", "v"); + *send_acpi_ev = false; + *ignore_acpi_ev = true; + return true; + case TP_HKEY_EV_TABLET_CHANGED: tpacpi_input_send_tabletsw(); hotkey_tablet_mode_notify_change(); @@ -9845,6 +9851,11 @@ static struct ibm_struct lcdshadow_driver_data = { * Thinkpad sensor interfaces */ +#define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */ +#define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */ +#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */ +#define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */ + #define DYTC_CMD_GET 2 /* To get current IC function and mode */ #define DYTC_GET_LAPMODE_BIT 17 /* Set when in lapmode */ @@ -9855,6 +9866,7 @@ static bool has_palmsensor; static bool has_lapsensor; static bool palm_state; static bool lap_state; +static int dytc_version; static int dytc_command(int command, int *output) { @@ -9869,6 +9881,33 @@ static int dytc_command(int command, int *output) return 0; } +static int dytc_get_version(void) +{ + int err, output; + + /* Check if we've been called before - and just return cached value */ + if (dytc_version) + return dytc_version; + + /* Otherwise query DYTC and extract version information */ + err = dytc_command(DYTC_CMD_QUERY, &output); + /* + * If support isn't available (ENODEV) then don't return an error + * and don't create the sysfs group + */ + if (err == -ENODEV) + return 0; + /* For all other errors we can flag the failure */ + if (err) + return err; + + /* Check DYTC is enabled and supports mode setting */ + if (output & BIT(DYTC_QUERY_ENABLE_BIT)) + dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF; + + return 0; +} + static int lapsensor_get(bool *present, bool *state) { int output, err; @@ -9974,7 +10013,18 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm) if (err) return err; } - if (has_lapsensor) { + + /* Check if we know the DYTC version, if we don't then get it */ + if (!dytc_version) { + err = dytc_get_version(); + if (err) + return err; + } + /* + * Platforms before DYTC version 5 claim to have a lap sensor, but it doesn't work, so we + * ignore them + */ + if (has_lapsensor && (dytc_version >= 5)) { err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_dytc_lapmode.attr); if (err) return err; @@ -9999,14 +10049,9 @@ static struct ibm_struct proxsensor_driver_data = { * DYTC Platform Profile interface */ -#define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */ #define DYTC_CMD_SET 1 /* To enable/disable IC function mode */ #define DYTC_CMD_RESET 0x1ff /* To reset back to default */ -#define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */ -#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */ -#define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */ - #define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */ #define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */ @@ -10142,8 +10187,13 @@ static int dytc_profile_set(struct platform_profile_handler *pprof, return err; if (profile == PLATFORM_PROFILE_BALANCED) { - /* To get back to balanced mode we just issue a reset command */ - err = dytc_command(DYTC_CMD_RESET, &output); + /* + * To get back to balanced mode we need to issue a reset command. + * Note we still need to disable CQL mode before hand and re-enable + * it afterwards, otherwise dytc_lapmode gets reset to 0 and stays + * stuck at 0 for aprox. 30 minutes. + */ + err = dytc_cql_command(DYTC_CMD_RESET, &output); if (err) goto unlock; } else { @@ -10211,28 +10261,28 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) if (err) return err; + /* Check if we know the DYTC version, if we don't then get it */ + if (!dytc_version) { + err = dytc_get_version(); + if (err) + return err; + } /* Check DYTC is enabled and supports mode setting */ - if (output & BIT(DYTC_QUERY_ENABLE_BIT)) { - /* Only DYTC v5.0 and later has this feature. */ - int dytc_version; - - dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF; - if (dytc_version >= 5) { - dbg_printk(TPACPI_DBG_INIT, - "DYTC version %d: thermal mode available\n", dytc_version); - /* Create platform_profile structure and register */ - err = platform_profile_register(&dytc_profile); - /* - * If for some reason platform_profiles aren't enabled - * don't quit terminally. - */ - if (err) - return 0; + if (dytc_version >= 5) { + dbg_printk(TPACPI_DBG_INIT, + "DYTC version %d: thermal mode available\n", dytc_version); + /* Create platform_profile structure and register */ + err = platform_profile_register(&dytc_profile); + /* + * If for some reason platform_profiles aren't enabled + * don't quit terminally. + */ + if (err) + return 0; - dytc_profile_available = true; - /* Ensure initial values are correct */ - dytc_profile_refresh(); - } + dytc_profile_available = true; + /* Ensure initial values are correct */ + dytc_profile_refresh(); } return 0; } diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c index beb5f74944cd..08f4cf0ad9e3 100644 --- a/drivers/ptp/ptp_qoriq.c +++ b/drivers/ptp/ptp_qoriq.c @@ -189,15 +189,16 @@ int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) tmr_add = ptp_qoriq->tmr_add; adj = tmr_add; - /* calculate diff as adj*(scaled_ppm/65536)/1000000 - * and round() to the nearest integer + /* + * Calculate diff and round() to the nearest integer + * + * diff = adj * (ppb / 1000000000) + * = adj * scaled_ppm / 65536000000 */ - adj *= scaled_ppm; - diff = div_u64(adj, 8000000); - diff = (diff >> 13) + ((diff >> 12) & 1); + diff = mul_u64_u64_div_u64(adj, scaled_ppm, 32768000000); + diff = DIV64_U64_ROUND_UP(diff, 2); tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; - ptp_qoriq->write(®s->ctrl_regs->tmr_add, tmr_add); return 0; diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c index d49a1534d8e9..9edc34981ee0 100644 --- a/drivers/regulator/mt6315-regulator.c +++ b/drivers/regulator/mt6315-regulator.c @@ -41,7 +41,7 @@ struct mt6315_chip { .type = REGULATOR_VOLTAGE, \ .id = _bid, \ .owner = THIS_MODULE, \ - .n_voltages = 0xbf, \ + .n_voltages = 0xc0, \ .linear_ranges = mt_volt_range1, \ .n_linear_ranges = ARRAY_SIZE(mt_volt_range1), \ .vsel_reg = _vsel, \ @@ -69,7 +69,7 @@ static unsigned int mt6315_map_mode(u32 mode) case MT6315_BUCK_MODE_LP: return REGULATOR_MODE_IDLE; default: - return -EINVAL; + return REGULATOR_MODE_INVALID; } } diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c index 833d398c6aa2..2f7ee212cb8c 100644 --- a/drivers/regulator/pca9450-regulator.c +++ b/drivers/regulator/pca9450-regulator.c @@ -797,6 +797,14 @@ static int pca9450_i2c_probe(struct i2c_client *i2c, return ret; } + /* Clear PRESET_EN bit in BUCK123_DVS to use DVS registers */ + ret = regmap_clear_bits(pca9450->regmap, PCA9450_REG_BUCK123_DVS, + BUCK123_PRESET_EN); + if (ret) { + dev_err(&i2c->dev, "Failed to clear PRESET_EN bit: %d\n", ret); + return ret; + } + /* Set reset behavior on assertion of WDOG_B signal */ ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_RESET_CTRL, WDOG_B_CFG_MASK, WDOG_B_CFG_COLD_LDO12); @@ -814,7 +822,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c, if (IS_ERR(pca9450->sd_vsel_gpio)) { dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n"); - return ret; + return PTR_ERR(pca9450->sd_vsel_gpio); } dev_info(&i2c->dev, "%s probed.\n", diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index 79a554f1029d..65a108c9121f 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -726,8 +726,8 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = { static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000), - .n_voltages = 5, + .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000), + .n_voltages = 236, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, }; @@ -901,7 +901,7 @@ static const struct rpmh_vreg_init_data pm8350_vreg_data[] = { }; static const struct rpmh_vreg_init_data pm8350c_vreg_data[] = { - RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps510, "vdd-s1"), + RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps515, "vdd-s1"), RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"), RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps510, "vdd-s3"), RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps510, "vdd-s4"), diff --git a/drivers/regulator/rt4831-regulator.c b/drivers/regulator/rt4831-regulator.c index 3d4695ded629..e3aaac90d238 100644 --- a/drivers/regulator/rt4831-regulator.c +++ b/drivers/regulator/rt4831-regulator.c @@ -153,9 +153,9 @@ static int rt4831_regulator_probe(struct platform_device *pdev) int i, ret; regmap = dev_get_regmap(pdev->dev.parent, NULL); - if (IS_ERR(regmap)) { + if (!regmap) { dev_err(&pdev->dev, "Failed to init regmap\n"); - return PTR_ERR(regmap); + return -ENODEV; } /* Configure DSV mode to normal by default */ diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 28c04a4efa66..3a945abf268c 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -63,7 +63,6 @@ void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); MODULE_DESCRIPTION("Linux on S/390 DASD device driver," " Copyright IBM Corp. 2000"); -MODULE_SUPPORTED_DEVICE("dasd"); MODULE_LICENSE("GPL"); /* @@ -3052,7 +3051,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, basedev = block->base; spin_lock_irq(&dq->lock); - if (basedev->state < DASD_STATE_READY) { + if (basedev->state < DASD_STATE_READY || + test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) { DBF_DEV_EVENT(DBF_ERR, basedev, "device not ready for request %p", req); rc = BLK_STS_IOERR; @@ -3487,8 +3487,6 @@ void dasd_generic_remove(struct ccw_device *cdev) struct dasd_device *device; struct dasd_block *block; - cdev->handler = NULL; - device = dasd_device_from_cdev(cdev); if (IS_ERR(device)) { dasd_remove_sysfs_files(cdev); @@ -3507,6 +3505,7 @@ void dasd_generic_remove(struct ccw_device *cdev) * no quite down yet. */ dasd_set_target_state(device, DASD_STATE_NEW); + cdev->handler = NULL; /* dasd_delete_device destroys the device reference. */ block = device->block; dasd_delete_device(device); diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 15692449a1c3..307a80f85c07 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -424,8 +424,10 @@ tty3270_update(struct timer_list *t) * last output position matches the start address * of this line. */ - if (s->string[1] == sba[0] && s->string[2] == sba[1]) - str += 3, len -= 3; + if (s->string[1] == sba[0] && s->string[2] == sba[1]) { + str += 3; + len -= 3; + } if (raw3270_request_add_data(wrq, str, len) != 0) break; list_del_init(&s->update); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 1515fdc3c1ab..bd3c724bf695 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/debugfs.h> +#include <linux/reboot.h> #include <asm/asm-offsets.h> #include <asm/ipl.h> @@ -238,6 +239,28 @@ static int __init zcore_reipl_init(void) return 0; } +static int zcore_reboot_and_on_panic_handler(struct notifier_block *self, + unsigned long event, + void *data) +{ + if (hsa_available) + release_hsa(); + + return NOTIFY_OK; +} + +static struct notifier_block zcore_reboot_notifier = { + .notifier_call = zcore_reboot_and_on_panic_handler, + /* we need to be notified before reipl and kdump */ + .priority = INT_MAX, +}; + +static struct notifier_block zcore_on_panic_notifier = { + .notifier_call = zcore_reboot_and_on_panic_handler, + /* we need to be notified before reipl and kdump */ + .priority = INT_MAX, +}; + static int __init zcore_init(void) { unsigned char arch; @@ -293,28 +316,15 @@ static int __init zcore_init(void) goto fail; zcore_dir = debugfs_create_dir("zcore" , NULL); - if (!zcore_dir) { - rc = -ENOMEM; - goto fail; - } zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir, NULL, &zcore_reipl_fops); - if (!zcore_reipl_file) { - rc = -ENOMEM; - goto fail_dir; - } zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir, NULL, &zcore_hsa_fops); - if (!zcore_hsa_file) { - rc = -ENOMEM; - goto fail_reipl_file; - } - return 0; -fail_reipl_file: - debugfs_remove(zcore_reipl_file); -fail_dir: - debugfs_remove(zcore_dir); + register_reboot_notifier(&zcore_reboot_notifier); + atomic_notifier_chain_register(&panic_notifier_list, &zcore_on_panic_notifier); + + return 0; fail: diag308(DIAG308_REL_HSA, NULL); return rc; diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 6420b197bb05..05e136cfb8be 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) orb = &private->orb; cc = stsch(sch->schid, &schib); - printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " + printk(KERN_WARNING "cio: ccw device timeout occurred at %lx, " "device information:\n", get_tod_clock()); printk(KERN_WARNING "cio: orb:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index 68106be4ba7a..767ac41686fe 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -543,7 +543,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, if (ret) return ret; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } case VFIO_DEVICE_GET_REGION_INFO: { @@ -561,7 +561,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, if (ret) return ret; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } case VFIO_DEVICE_GET_IRQ_INFO: { @@ -582,7 +582,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, if (info.count == -1) return -EINVAL; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } case VFIO_DEVICE_SET_IRQS: { diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 41fc2e4135fe..1ffdd411201c 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -1286,7 +1286,7 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg) info.num_regions = 0; info.num_irqs = 0; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index a1da83b0b0ef..91acff493612 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -436,7 +436,7 @@ struct qeth_qdio_out_buffer { int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; struct qeth_qdio_out_q *q; - struct qeth_qdio_out_buffer *next_pending; + struct list_head list_entry; }; struct qeth_card; @@ -500,6 +500,7 @@ struct qeth_qdio_out_q { struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qdio_outbuf_state *bufstates; /* convenience pointer */ + struct list_head pending_bufs; struct qeth_out_q_stats stats; spinlock_t lock; unsigned int priority; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index b71b8902d1c4..a814698387bc 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -73,8 +73,6 @@ static void qeth_free_qdio_queues(struct qeth_card *card); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); -static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, - int budget); static void qeth_close_dev_handler(struct work_struct *work) { @@ -465,41 +463,6 @@ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, return n; } -static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, - int forced_cleanup) -{ - if (q->card->options.cq != QETH_CQ_ENABLED) - return; - - if (q->bufs[bidx]->next_pending != NULL) { - struct qeth_qdio_out_buffer *head = q->bufs[bidx]; - struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; - - while (c) { - if (forced_cleanup || - atomic_read(&c->state) == QETH_QDIO_BUF_EMPTY) { - struct qeth_qdio_out_buffer *f = c; - - QETH_CARD_TEXT(f->q->card, 5, "fp"); - QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); - /* release here to avoid interleaving between - outbound tasklet and inbound tasklet - regarding notifications and lifecycle */ - qeth_tx_complete_buf(c, forced_cleanup, 0); - - c = f->next_pending; - WARN_ON_ONCE(head->next_pending != f); - head->next_pending = c; - kmem_cache_free(qeth_qdio_outbuf_cache, f); - } else { - head = c; - c = c->next_pending; - } - - } - } -} - static void qeth_qdio_handle_aob(struct qeth_card *card, unsigned long phys_aob_addr) { @@ -507,6 +470,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob; struct qeth_qdio_out_buffer *buffer; enum iucv_tx_notify notification; + struct qeth_qdio_out_q *queue; unsigned int i; aob = (struct qaob *) phys_to_virt(phys_aob_addr); @@ -537,7 +501,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, qeth_notify_skbs(buffer->q, buffer, notification); /* Free dangling allocations. The attached skbs are handled by - * qeth_cleanup_handled_pending(). + * qeth_tx_complete_pending_bufs(). */ for (i = 0; i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); @@ -549,7 +513,9 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, buffer->is_header[i] = 0; } + queue = buffer->q; atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY); + napi_schedule(&queue->napi); break; default: WARN_ON_ONCE(1); @@ -1424,9 +1390,6 @@ static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, struct qeth_qdio_out_q *queue = buf->q; struct sk_buff *skb; - if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) - qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR); - /* Empty buffer? */ if (buf->next_element_to_fill == 0) return; @@ -1488,14 +1451,38 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } +static void qeth_tx_complete_pending_bufs(struct qeth_card *card, + struct qeth_qdio_out_q *queue, + bool drain) +{ + struct qeth_qdio_out_buffer *buf, *tmp; + + list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) { + if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) { + QETH_CARD_TEXT(card, 5, "fp"); + QETH_CARD_TEXT_(card, 5, "%lx", (long) buf); + + if (drain) + qeth_notify_skbs(queue, buf, + TX_NOTIFY_GENERALERROR); + qeth_tx_complete_buf(buf, drain, 0); + + list_del(&buf->list_entry); + kmem_cache_free(qeth_qdio_outbuf_cache, buf); + } + } +} + static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) { int j; + qeth_tx_complete_pending_bufs(q->card, q, true); + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (!q->bufs[j]) continue; - qeth_cleanup_handled_pending(q, j, 1); + qeth_clear_output_buffer(q, q->bufs[j], true, 0); if (free) { kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); @@ -2615,7 +2602,6 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) skb_queue_head_init(&newbuf->skb_list); lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); newbuf->q = q; - newbuf->next_pending = q->bufs[bidx]; atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); q->bufs[bidx] = newbuf; return 0; @@ -2634,15 +2620,28 @@ static void qeth_free_output_queue(struct qeth_qdio_out_q *q) static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) { struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); + unsigned int i; if (!q) return NULL; - if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { - kfree(q); - return NULL; + if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) + goto err_qdio_bufs; + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { + if (qeth_init_qdio_out_buf(q, i)) + goto err_out_bufs; } + return q; + +err_out_bufs: + while (i > 0) + kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]); + qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); +err_qdio_bufs: + kfree(q); + return NULL; } static void qeth_tx_completion_timer(struct timer_list *timer) @@ -2655,7 +2654,7 @@ static void qeth_tx_completion_timer(struct timer_list *timer) static int qeth_alloc_qdio_queues(struct qeth_card *card) { - int i, j; + unsigned int i; QETH_CARD_TEXT(card, 2, "allcqdbf"); @@ -2684,18 +2683,12 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card) card->qdio.out_qs[i] = queue; queue->card = card; queue->queue_no = i; + INIT_LIST_HEAD(&queue->pending_bufs); spin_lock_init(&queue->lock); timer_setup(&queue->timer, qeth_tx_completion_timer, 0); queue->coalesce_usecs = QETH_TX_COALESCE_USECS; queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; - - /* give outbound qeth_qdio_buffers their qdio_buffers */ - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { - WARN_ON(queue->bufs[j]); - if (qeth_init_qdio_out_buf(queue, j)) - goto out_freeoutqbufs; - } } /* completion */ @@ -2704,13 +2697,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card) return 0; -out_freeoutqbufs: - while (j > 0) { - --j; - kmem_cache_free(qeth_qdio_outbuf_cache, - card->qdio.out_qs[i]->bufs[j]); - card->qdio.out_qs[i]->bufs[j] = NULL; - } out_freeoutq: while (i > 0) { qeth_free_output_queue(card->qdio.out_qs[--i]); @@ -6107,6 +6093,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, qeth_schedule_recovery(card); } + list_add(&buffer->list_entry, + &queue->pending_bufs); /* Skip clearing the buffer: */ return; case QETH_QDIO_BUF_QAOB_OK: @@ -6162,6 +6150,8 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget) unsigned int bytes = 0; int completed; + qeth_tx_complete_pending_bufs(card, queue, false); + if (qeth_out_queue_is_empty(queue)) { napi_complete(napi); return 0; @@ -6194,7 +6184,6 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget) qeth_handle_send_error(card, buffer, error); qeth_iqd_tx_complete(queue, bidx, error, budget); - qeth_cleanup_handled_pending(queue, bidx, false); } netdev_tx_completed_queue(txq, packets, bytes); @@ -7249,9 +7238,7 @@ int qeth_open(struct net_device *dev) card->data.state = CH_STATE_UP; netif_tx_start_all_queues(dev); - napi_enable(&card->napi); local_bh_disable(); - napi_schedule(&card->napi); if (IS_IQD(card)) { struct qeth_qdio_out_q *queue; unsigned int i; @@ -7263,8 +7250,12 @@ int qeth_open(struct net_device *dev) napi_schedule(&queue->napi); } } + + napi_enable(&card->napi); + napi_schedule(&card->napi); /* kick-start the NAPI softirq: */ local_bh_enable(); + return 0; } EXPORT_SYMBOL_GPL(qeth_open); @@ -7274,6 +7265,11 @@ int qeth_stop(struct net_device *dev) struct qeth_card *card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "qethstop"); + + napi_disable(&card->napi); + cancel_delayed_work_sync(&card->buffer_reclaim_work); + qdio_stop_irq(CARD_DDEV(card)); + if (IS_IQD(card)) { struct qeth_qdio_out_q *queue; unsigned int i; @@ -7294,10 +7290,6 @@ int qeth_stop(struct net_device *dev) netif_tx_disable(dev); } - napi_disable(&card->napi); - cancel_delayed_work_sync(&card->buffer_reclaim_work); - qdio_stop_irq(CARD_DDEV(card)); - return 0; } EXPORT_SYMBOL_GPL(qeth_stop); diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 00e72b97d0b6..d93595b39afa 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c @@ -50,7 +50,6 @@ MODULE_PARM_DESC(sol_compat, MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); MODULE_DESCRIPTION("7-Segment Display driver for Sun Microsystems CP1400/1500"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("d7s"); struct d7s { void __iomem *regs; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 38369766511c..f135a10f582b 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -80,7 +80,6 @@ MODULE_AUTHOR("Hewlett-Packard Company"); MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ HPSA_DRIVER_VERSION); -MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); MODULE_VERSION(HPSA_DRIVER_VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("cciss"); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 755313b766b9..61831f2fdb30 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -21,6 +21,7 @@ #include <linux/bsg-lib.h> #include <asm/firmware.h> #include <asm/irq.h> +#include <asm/rtas.h> #include <asm/vio.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -158,6 +159,9 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *); static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *); static void ibmvfc_tgt_move_login(struct ibmvfc_target *); +static void ibmvfc_release_sub_crqs(struct ibmvfc_host *); +static void ibmvfc_init_sub_crqs(struct ibmvfc_host *); + static const char *unknown_error = "unknown error"; static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba, @@ -899,6 +903,9 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) { int rc = 0; struct vio_dev *vdev = to_vio_dev(vhost->dev); + unsigned long flags; + + ibmvfc_release_sub_crqs(vhost); /* Re-enable the CRQ */ do { @@ -910,6 +917,15 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) if (rc) dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc); + spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(vhost->crq.q_lock); + vhost->do_enquiry = 1; + vhost->using_channels = 0; + spin_unlock(vhost->crq.q_lock); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + ibmvfc_init_sub_crqs(vhost); + return rc; } @@ -926,8 +942,8 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) unsigned long flags; struct vio_dev *vdev = to_vio_dev(vhost->dev); struct ibmvfc_queue *crq = &vhost->crq; - struct ibmvfc_queue *scrq; - int i; + + ibmvfc_release_sub_crqs(vhost); /* Close the CRQ */ do { @@ -947,16 +963,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) memset(crq->msgs.crq, 0, PAGE_SIZE); crq->cur = 0; - if (vhost->scsi_scrqs.scrqs) { - for (i = 0; i < nr_scsi_hw_queues; i++) { - scrq = &vhost->scsi_scrqs.scrqs[i]; - spin_lock(scrq->q_lock); - memset(scrq->msgs.scrq, 0, PAGE_SIZE); - scrq->cur = 0; - spin_unlock(scrq->q_lock); - } - } - /* And re-open it again */ rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, crq->msg_token, PAGE_SIZE); @@ -966,9 +972,12 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) dev_warn(vhost->dev, "Partner adapter not ready\n"); else if (rc != 0) dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc); + spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_init_sub_crqs(vhost); + return rc; } @@ -2363,6 +2372,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device) } /** + * ibmvfc_event_is_free - Check if event is free or not + * @evt: ibmvfc event struct + * + * Returns: + * true / false + **/ +static bool ibmvfc_event_is_free(struct ibmvfc_event *evt) +{ + struct ibmvfc_event *loop_evt; + + list_for_each_entry(loop_evt, &evt->queue->free, queue_list) + if (loop_evt == evt) + return true; + + return false; +} + +/** * ibmvfc_wait_for_ops - Wait for ops to complete * @vhost: ibmvfc host struct * @device: device to match (starget or sdev) @@ -2376,35 +2403,58 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device, { struct ibmvfc_event *evt; DECLARE_COMPLETION_ONSTACK(comp); - int wait; + int wait, i, q_index, q_size; unsigned long flags; signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ; + struct ibmvfc_queue *queues; ENTER; + if (vhost->mq_enabled && vhost->using_channels) { + queues = vhost->scsi_scrqs.scrqs; + q_size = vhost->scsi_scrqs.active_queues; + } else { + queues = &vhost->crq; + q_size = 1; + } + do { wait = 0; - spin_lock_irqsave(&vhost->crq.l_lock, flags); - list_for_each_entry(evt, &vhost->crq.sent, queue_list) { - if (match(evt, device)) { - evt->eh_comp = ∁ - wait++; + spin_lock_irqsave(vhost->host->host_lock, flags); + for (q_index = 0; q_index < q_size; q_index++) { + spin_lock(&queues[q_index].l_lock); + for (i = 0; i < queues[q_index].evt_pool.size; i++) { + evt = &queues[q_index].evt_pool.events[i]; + if (!ibmvfc_event_is_free(evt)) { + if (match(evt, device)) { + evt->eh_comp = ∁ + wait++; + } + } } + spin_unlock(&queues[q_index].l_lock); } - spin_unlock_irqrestore(&vhost->crq.l_lock, flags); + spin_unlock_irqrestore(vhost->host->host_lock, flags); if (wait) { timeout = wait_for_completion_timeout(&comp, timeout); if (!timeout) { wait = 0; - spin_lock_irqsave(&vhost->crq.l_lock, flags); - list_for_each_entry(evt, &vhost->crq.sent, queue_list) { - if (match(evt, device)) { - evt->eh_comp = NULL; - wait++; + spin_lock_irqsave(vhost->host->host_lock, flags); + for (q_index = 0; q_index < q_size; q_index++) { + spin_lock(&queues[q_index].l_lock); + for (i = 0; i < queues[q_index].evt_pool.size; i++) { + evt = &queues[q_index].evt_pool.events[i]; + if (!ibmvfc_event_is_free(evt)) { + if (match(evt, device)) { + evt->eh_comp = NULL; + wait++; + } + } } + spin_unlock(&queues[q_index].l_lock); } - spin_unlock_irqrestore(&vhost->crq.l_lock, flags); + spin_unlock_irqrestore(vhost->host->host_lock, flags); if (wait) dev_err(vhost->dev, "Timed out waiting for aborted commands\n"); LEAVE; @@ -5642,7 +5692,8 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE, &scrq->cookie, &scrq->hw_irq); - if (rc) { + /* H_CLOSED indicates successful register, but no CRQ partner */ + if (rc && rc != H_CLOSED) { dev_warn(dev, "Error registering sub-crq: %d\n", rc); if (rc == H_PARAMETER) dev_warn_once(dev, "Firmware may not support MQ\n"); @@ -5675,8 +5726,8 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, irq_failed: do { - plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); - } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); + } while (rtas_busy_delay(rc)); reg_failed: ibmvfc_free_queue(vhost, scrq); LEAVE; @@ -5694,6 +5745,7 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index) free_irq(scrq->irq, scrq); irq_dispose_mapping(scrq->irq); + scrq->irq = 0; do { rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, @@ -5707,17 +5759,21 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index) LEAVE; } -static int ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) +static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) { int i, j; ENTER; + if (!vhost->mq_enabled) + return; vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues, sizeof(*vhost->scsi_scrqs.scrqs), GFP_KERNEL); - if (!vhost->scsi_scrqs.scrqs) - return -1; + if (!vhost->scsi_scrqs.scrqs) { + vhost->do_enquiry = 0; + return; + } for (i = 0; i < nr_scsi_hw_queues; i++) { if (ibmvfc_register_scsi_channel(vhost, i)) { @@ -5726,13 +5782,12 @@ static int ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) kfree(vhost->scsi_scrqs.scrqs); vhost->scsi_scrqs.scrqs = NULL; vhost->scsi_scrqs.active_queues = 0; - LEAVE; - return -1; + vhost->do_enquiry = 0; + break; } } LEAVE; - return 0; } static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost) @@ -5770,6 +5825,8 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost) vhost->disc_buf_dma); dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf), vhost->login_buf, vhost->login_buf_dma); + dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf), + vhost->channel_setup_buf, vhost->channel_setup_dma); dma_pool_destroy(vhost->sg_pool); ibmvfc_free_queue(vhost, async_q); LEAVE; @@ -5999,11 +6056,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) goto remove_shost; } - if (vhost->mq_enabled) { - rc = ibmvfc_init_sub_crqs(vhost); - if (rc) - dev_warn(dev, "Failed to allocate Sub-CRQs. rc=%d\n", rc); - } + ibmvfc_init_sub_crqs(vhost); if (shost_to_fc_host(shost)->rqst_q) blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1); @@ -6038,7 +6091,7 @@ out: * Return value: * 0 **/ -static int ibmvfc_remove(struct vio_dev *vdev) +static void ibmvfc_remove(struct vio_dev *vdev) { struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev); LIST_HEAD(purge); @@ -6070,7 +6123,6 @@ static int ibmvfc_remove(struct vio_dev *vdev) spin_unlock(&ibmvfc_driver_lock); scsi_host_put(vhost->host); LEAVE; - return 0; } /** diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 29fcc44be2d5..77fafb1bc173 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -2335,7 +2335,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) return -1; } -static int ibmvscsi_remove(struct vio_dev *vdev) +static void ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); @@ -2356,8 +2356,6 @@ static int ibmvscsi_remove(struct vio_dev *vdev) spin_unlock(&ibmvscsi_driver_lock); scsi_host_put(hostdata->host); - - return 0; } /** diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index cc3908c2d2f9..9abd9e253af6 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -3595,7 +3595,7 @@ free_adapter: return rc; } -static int ibmvscsis_remove(struct vio_dev *vdev) +static void ibmvscsis_remove(struct vio_dev *vdev) { struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev); @@ -3622,8 +3622,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev) list_del(&vscsi->list); spin_unlock_bh(&ibmvscsis_dev_lock); kfree(vscsi); - - return 0; } static ssize_t system_id_show(struct device *dev, diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 7ad11e42306d..04633e5157e9 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -3430,125 +3430,125 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, switch(param) { case ISCSI_PARAM_FAST_ABORT: - len = sprintf(buf, "%d\n", session->fast_abort); + len = sysfs_emit(buf, "%d\n", session->fast_abort); break; case ISCSI_PARAM_ABORT_TMO: - len = sprintf(buf, "%d\n", session->abort_timeout); + len = sysfs_emit(buf, "%d\n", session->abort_timeout); break; case ISCSI_PARAM_LU_RESET_TMO: - len = sprintf(buf, "%d\n", session->lu_reset_timeout); + len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout); break; case ISCSI_PARAM_TGT_RESET_TMO: - len = sprintf(buf, "%d\n", session->tgt_reset_timeout); + len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout); break; case ISCSI_PARAM_INITIAL_R2T_EN: - len = sprintf(buf, "%d\n", session->initial_r2t_en); + len = sysfs_emit(buf, "%d\n", session->initial_r2t_en); break; case ISCSI_PARAM_MAX_R2T: - len = sprintf(buf, "%hu\n", session->max_r2t); + len = sysfs_emit(buf, "%hu\n", session->max_r2t); break; case ISCSI_PARAM_IMM_DATA_EN: - len = sprintf(buf, "%d\n", session->imm_data_en); + len = sysfs_emit(buf, "%d\n", session->imm_data_en); break; case ISCSI_PARAM_FIRST_BURST: - len = sprintf(buf, "%u\n", session->first_burst); + len = sysfs_emit(buf, "%u\n", session->first_burst); break; case ISCSI_PARAM_MAX_BURST: - len = sprintf(buf, "%u\n", session->max_burst); + len = sysfs_emit(buf, "%u\n", session->max_burst); break; case ISCSI_PARAM_PDU_INORDER_EN: - len = sprintf(buf, "%d\n", session->pdu_inorder_en); + len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en); break; case ISCSI_PARAM_DATASEQ_INORDER_EN: - len = sprintf(buf, "%d\n", session->dataseq_inorder_en); + len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en); break; case ISCSI_PARAM_DEF_TASKMGMT_TMO: - len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo); + len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo); break; case ISCSI_PARAM_ERL: - len = sprintf(buf, "%d\n", session->erl); + len = sysfs_emit(buf, "%d\n", session->erl); break; case ISCSI_PARAM_TARGET_NAME: - len = sprintf(buf, "%s\n", session->targetname); + len = sysfs_emit(buf, "%s\n", session->targetname); break; case ISCSI_PARAM_TARGET_ALIAS: - len = sprintf(buf, "%s\n", session->targetalias); + len = sysfs_emit(buf, "%s\n", session->targetalias); break; case ISCSI_PARAM_TPGT: - len = sprintf(buf, "%d\n", session->tpgt); + len = sysfs_emit(buf, "%d\n", session->tpgt); break; case ISCSI_PARAM_USERNAME: - len = sprintf(buf, "%s\n", session->username); + len = sysfs_emit(buf, "%s\n", session->username); break; case ISCSI_PARAM_USERNAME_IN: - len = sprintf(buf, "%s\n", session->username_in); + len = sysfs_emit(buf, "%s\n", session->username_in); break; case ISCSI_PARAM_PASSWORD: - len = sprintf(buf, "%s\n", session->password); + len = sysfs_emit(buf, "%s\n", session->password); break; case ISCSI_PARAM_PASSWORD_IN: - len = sprintf(buf, "%s\n", session->password_in); + len = sysfs_emit(buf, "%s\n", session->password_in); break; case ISCSI_PARAM_IFACE_NAME: - len = sprintf(buf, "%s\n", session->ifacename); + len = sysfs_emit(buf, "%s\n", session->ifacename); break; case ISCSI_PARAM_INITIATOR_NAME: - len = sprintf(buf, "%s\n", session->initiatorname); + len = sysfs_emit(buf, "%s\n", session->initiatorname); break; case ISCSI_PARAM_BOOT_ROOT: - len = sprintf(buf, "%s\n", session->boot_root); + len = sysfs_emit(buf, "%s\n", session->boot_root); break; case ISCSI_PARAM_BOOT_NIC: - len = sprintf(buf, "%s\n", session->boot_nic); + len = sysfs_emit(buf, "%s\n", session->boot_nic); break; case ISCSI_PARAM_BOOT_TARGET: - len = sprintf(buf, "%s\n", session->boot_target); + len = sysfs_emit(buf, "%s\n", session->boot_target); break; case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: - len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable); + len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable); break; case ISCSI_PARAM_DISCOVERY_SESS: - len = sprintf(buf, "%u\n", session->discovery_sess); + len = sysfs_emit(buf, "%u\n", session->discovery_sess); break; case ISCSI_PARAM_PORTAL_TYPE: - len = sprintf(buf, "%s\n", session->portal_type); + len = sysfs_emit(buf, "%s\n", session->portal_type); break; case ISCSI_PARAM_CHAP_AUTH_EN: - len = sprintf(buf, "%u\n", session->chap_auth_en); + len = sysfs_emit(buf, "%u\n", session->chap_auth_en); break; case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: - len = sprintf(buf, "%u\n", session->discovery_logout_en); + len = sysfs_emit(buf, "%u\n", session->discovery_logout_en); break; case ISCSI_PARAM_BIDI_CHAP_EN: - len = sprintf(buf, "%u\n", session->bidi_chap_en); + len = sysfs_emit(buf, "%u\n", session->bidi_chap_en); break; case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: - len = sprintf(buf, "%u\n", session->discovery_auth_optional); + len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional); break; case ISCSI_PARAM_DEF_TIME2WAIT: - len = sprintf(buf, "%d\n", session->time2wait); + len = sysfs_emit(buf, "%d\n", session->time2wait); break; case ISCSI_PARAM_DEF_TIME2RETAIN: - len = sprintf(buf, "%d\n", session->time2retain); + len = sysfs_emit(buf, "%d\n", session->time2retain); break; case ISCSI_PARAM_TSID: - len = sprintf(buf, "%u\n", session->tsid); + len = sysfs_emit(buf, "%u\n", session->tsid); break; case ISCSI_PARAM_ISID: - len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", + len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n", session->isid[0], session->isid[1], session->isid[2], session->isid[3], session->isid[4], session->isid[5]); break; case ISCSI_PARAM_DISCOVERY_PARENT_IDX: - len = sprintf(buf, "%u\n", session->discovery_parent_idx); + len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx); break; case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: if (session->discovery_parent_type) - len = sprintf(buf, "%s\n", + len = sysfs_emit(buf, "%s\n", session->discovery_parent_type); else - len = sprintf(buf, "\n"); + len = sysfs_emit(buf, "\n"); break; default: return -ENOSYS; @@ -3580,16 +3580,16 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: if (sin) - len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr); + len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr); else - len = sprintf(buf, "%pI6\n", &sin6->sin6_addr); + len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr); break; case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_LOCAL_PORT: if (sin) - len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port)); + len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port)); else - len = sprintf(buf, "%hu\n", + len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin6->sin6_port)); break; default: @@ -3608,88 +3608,88 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, switch(param) { case ISCSI_PARAM_PING_TMO: - len = sprintf(buf, "%u\n", conn->ping_timeout); + len = sysfs_emit(buf, "%u\n", conn->ping_timeout); break; case ISCSI_PARAM_RECV_TMO: - len = sprintf(buf, "%u\n", conn->recv_timeout); + len = sysfs_emit(buf, "%u\n", conn->recv_timeout); break; case ISCSI_PARAM_MAX_RECV_DLENGTH: - len = sprintf(buf, "%u\n", conn->max_recv_dlength); + len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength); break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: - len = sprintf(buf, "%u\n", conn->max_xmit_dlength); + len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength); break; case ISCSI_PARAM_HDRDGST_EN: - len = sprintf(buf, "%d\n", conn->hdrdgst_en); + len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en); break; case ISCSI_PARAM_DATADGST_EN: - len = sprintf(buf, "%d\n", conn->datadgst_en); + len = sysfs_emit(buf, "%d\n", conn->datadgst_en); break; case ISCSI_PARAM_IFMARKER_EN: - len = sprintf(buf, "%d\n", conn->ifmarker_en); + len = sysfs_emit(buf, "%d\n", conn->ifmarker_en); break; case ISCSI_PARAM_OFMARKER_EN: - len = sprintf(buf, "%d\n", conn->ofmarker_en); + len = sysfs_emit(buf, "%d\n", conn->ofmarker_en); break; case ISCSI_PARAM_EXP_STATSN: - len = sprintf(buf, "%u\n", conn->exp_statsn); + len = sysfs_emit(buf, "%u\n", conn->exp_statsn); break; case ISCSI_PARAM_PERSISTENT_PORT: - len = sprintf(buf, "%d\n", conn->persistent_port); + len = sysfs_emit(buf, "%d\n", conn->persistent_port); break; case ISCSI_PARAM_PERSISTENT_ADDRESS: - len = sprintf(buf, "%s\n", conn->persistent_address); + len = sysfs_emit(buf, "%s\n", conn->persistent_address); break; case ISCSI_PARAM_STATSN: - len = sprintf(buf, "%u\n", conn->statsn); + len = sysfs_emit(buf, "%u\n", conn->statsn); break; case ISCSI_PARAM_MAX_SEGMENT_SIZE: - len = sprintf(buf, "%u\n", conn->max_segment_size); + len = sysfs_emit(buf, "%u\n", conn->max_segment_size); break; case ISCSI_PARAM_KEEPALIVE_TMO: - len = sprintf(buf, "%u\n", conn->keepalive_tmo); + len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo); break; case ISCSI_PARAM_LOCAL_PORT: - len = sprintf(buf, "%u\n", conn->local_port); + len = sysfs_emit(buf, "%u\n", conn->local_port); break; case ISCSI_PARAM_TCP_TIMESTAMP_STAT: - len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat); + len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat); break; case ISCSI_PARAM_TCP_NAGLE_DISABLE: - len = sprintf(buf, "%u\n", conn->tcp_nagle_disable); + len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable); break; case ISCSI_PARAM_TCP_WSF_DISABLE: - len = sprintf(buf, "%u\n", conn->tcp_wsf_disable); + len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable); break; case ISCSI_PARAM_TCP_TIMER_SCALE: - len = sprintf(buf, "%u\n", conn->tcp_timer_scale); + len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale); break; case ISCSI_PARAM_TCP_TIMESTAMP_EN: - len = sprintf(buf, "%u\n", conn->tcp_timestamp_en); + len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en); break; case ISCSI_PARAM_IP_FRAGMENT_DISABLE: - len = sprintf(buf, "%u\n", conn->fragment_disable); + len = sysfs_emit(buf, "%u\n", conn->fragment_disable); break; case ISCSI_PARAM_IPV4_TOS: - len = sprintf(buf, "%u\n", conn->ipv4_tos); + len = sysfs_emit(buf, "%u\n", conn->ipv4_tos); break; case ISCSI_PARAM_IPV6_TC: - len = sprintf(buf, "%u\n", conn->ipv6_traffic_class); + len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class); break; case ISCSI_PARAM_IPV6_FLOW_LABEL: - len = sprintf(buf, "%u\n", conn->ipv6_flow_label); + len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label); break; case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: - len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6); + len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6); break; case ISCSI_PARAM_TCP_XMIT_WSF: - len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf); + len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf); break; case ISCSI_PARAM_TCP_RECV_WSF: - len = sprintf(buf, "%u\n", conn->tcp_recv_wsf); + len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf); break; case ISCSI_PARAM_LOCAL_IPADDR: - len = sprintf(buf, "%s\n", conn->local_ipaddr); + len = sysfs_emit(buf, "%s\n", conn->local_ipaddr); break; default: return -ENOSYS; @@ -3707,13 +3707,13 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - len = sprintf(buf, "%s\n", ihost->netdev); + len = sysfs_emit(buf, "%s\n", ihost->netdev); break; case ISCSI_HOST_PARAM_HWADDRESS: - len = sprintf(buf, "%s\n", ihost->hwaddress); + len = sysfs_emit(buf, "%s\n", ihost->hwaddress); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: - len = sprintf(buf, "%s\n", ihost->initiatorname); + len = sysfs_emit(buf, "%s\n", ihost->initiatorname); break; default: return -ENOSYS; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index bc79a017e1a2..46a8f2d1d2b8 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -2421,7 +2421,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, memset(dstbuf, 0, 33); size = (nbytes < 32) ? nbytes : 32; if (copy_from_user(dstbuf, buf, size)) - return 0; + return -EFAULT; if (dent == phba->debug_InjErrLBA) { if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') && @@ -2430,7 +2430,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, } if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp))) - return 0; + return -EINVAL; if (dent == phba->debug_writeGuard) phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index ac066f86bb14..ac0eef975f17 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -7806,14 +7806,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pend_os_device_add_sz++; ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, GFP_KERNEL); - if (!ioc->pend_os_device_add) + if (!ioc->pend_os_device_add) { + r = -ENOMEM; goto out_free_resources; + } ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; ioc->device_remove_in_progress = kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); - if (!ioc->device_remove_in_progress) + if (!ioc->device_remove_in_progress) { + r = -ENOMEM; goto out_free_resources; + } ioc->fwfault_debug = mpt3sas_fwfault_debug; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index ffca03064797..6aa6de729187 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -413,7 +413,7 @@ mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, * And add this object to port_table_list. */ if (!ioc->multipath_on_hba) { - port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); + port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC); if (!port) return NULL; diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index 4adf9ded296a..329fd025c718 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -2273,12 +2273,12 @@ static void myrs_cleanup(struct myrs_hba *cs) if (cs->mmio_base) { cs->disable_intr(cs); iounmap(cs->mmio_base); + cs->mmio_base = NULL; } if (cs->irq) free_irq(cs->irq, cs); if (cs->io_addr) release_region(cs->io_addr, 0x80); - iounmap(cs->mmio_base); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); scsi_host_put(cs->host); diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 5d5f50d6a02d..ac89002646a3 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -55,7 +55,6 @@ MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>"); MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module"); -MODULE_SUPPORTED_DEVICE("sd,sr,sg,st"); MODULE_LICENSE("GPL"); #include "nsp_io.h" diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 47ad64b06623..69c5b5ee2169 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1675,6 +1675,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi) if (!qedi->global_queues[i]) { QEDI_ERR(&qedi->dbg_ctx, "Unable to allocation global queue %d.\n", i); + status = -ENOMEM; goto mem_alloc_failure; } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index c48daf52725d..480e7d2dcf3e 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -3222,8 +3222,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || (cmd->sess && cmd->sess->deleted)) { cmd->state = QLA_TGT_STATE_PROCESSED; - res = 0; - goto free; + return 0; } ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, @@ -3234,8 +3233,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); - if (unlikely(res != 0)) - goto free; + if (unlikely(res != 0)) { + return res; + } spin_lock_irqsave(qpair->qp_lock_ptr, flags); @@ -3255,8 +3255,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - res = 0; - goto free; + return 0; } /* Does F/W have an IOCBs for this request */ @@ -3359,8 +3358,6 @@ out_unmap_unlock: qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); -free: - vha->hw->tgt.tgt_ops->free_cmd(cmd); return res; } EXPORT_SYMBOL(qlt_xmit_response); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 10e5e6c8087d..01620f3eab39 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -116,7 +116,6 @@ (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \ QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0)) #endif -#endif #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ @@ -244,6 +243,7 @@ struct ctio_to_2xxx { #ifndef CTIO_RET_TYPE #define CTIO_RET_TYPE 0x17 /* CTIO return entry */ #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */ +#endif struct fcp_hdr { uint8_t r_ctl; diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index b55fc768a2a7..8b4890cdd4ca 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -644,7 +644,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - struct scsi_qla_host *vha = cmd->vha; if (cmd->aborted) { /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task @@ -657,7 +656,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); - vha->hw->tgt.tgt_ops->free_cmd(cmd); return 0; } @@ -685,7 +683,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - struct scsi_qla_host *vha = cmd->vha; int xmit_type = QLA_TGT_XMIT_STATUS; if (cmd->aborted) { @@ -699,7 +696,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd, kref_read(&cmd->se_cmd.cmd_kref), cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); - vha->hw->tgt.tgt_ops->free_cmd(cmd); return 0; } cmd->bufflen = se_cmd->data_length; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 969d24d580e2..f4bf62b007a0 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -132,7 +132,11 @@ show_transport_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_internal *priv = dev_to_iscsi_internal(dev); - return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport)); + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + return sysfs_emit(buf, "%llu\n", + (unsigned long long)iscsi_handle(priv->iscsi_transport)); } static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL); @@ -142,7 +146,7 @@ show_transport_##name(struct device *dev, \ struct device_attribute *attr,char *buf) \ { \ struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \ - return sprintf(buf, format"\n", priv->iscsi_transport->name); \ + return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\ } \ static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); @@ -183,7 +187,7 @@ static ssize_t show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); - return sprintf(buf, "%llu\n", (unsigned long long) ep->id); + return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id); } static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); @@ -2471,6 +2475,7 @@ static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag) */ mutex_lock(&conn_mutex); conn->transport->stop_conn(conn, flag); + conn->state = ISCSI_CONN_DOWN; mutex_unlock(&conn_mutex); } @@ -2880,6 +2885,9 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) struct iscsi_cls_session *session; int err = 0, value = 0; + if (ev->u.set_param.len > PAGE_SIZE) + return -EINVAL; + session = iscsi_session_lookup(ev->u.set_param.sid); conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid); if (!conn || !session) @@ -2894,6 +2902,13 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) default: err = transport->set_param(conn, ev->u.set_param.param, data, ev->u.set_param.len); + if ((conn->state == ISCSI_CONN_BOUND) || + (conn->state == ISCSI_CONN_UP)) { + err = transport->set_param(conn, ev->u.set_param.param, + data, ev->u.set_param.len); + } else { + return -ENOTCONN; + } } return err; @@ -2953,6 +2968,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, mutex_lock(&conn->ep_mutex); conn->ep = NULL; mutex_unlock(&conn->ep_mutex); + conn->state = ISCSI_CONN_DOWN; } transport->ep_disconnect(ep); @@ -3027,6 +3043,9 @@ iscsi_set_host_param(struct iscsi_transport *transport, if (!transport->set_host_param) return -ENOSYS; + if (ev->u.set_host_param.len > PAGE_SIZE) + return -EINVAL; + shost = scsi_host_lookup(ev->u.set_host_param.host_no); if (!shost) { printk(KERN_ERR "set_host_param could not find host no %u\n", @@ -3614,6 +3633,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; u32 portid; + u32 pdu_len; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; @@ -3621,6 +3641,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep = NULL; + if (!netlink_capable(skb, CAP_SYS_ADMIN)) + return -EPERM; + if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) *group = ISCSI_NL_GRP_UIP; else @@ -3713,6 +3736,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) ev->r.retcode = transport->bind_conn(session, conn, ev->u.b_conn.transport_eph, ev->u.b_conn.is_leading); + if (!ev->r.retcode) + conn->state = ISCSI_CONN_BOUND; mutex_unlock(&conn_mutex); if (ev->r.retcode || !transport->ep_connect) @@ -3753,6 +3778,14 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) err = -EINVAL; break; case ISCSI_UEVENT_SEND_PDU: + pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); + + if ((ev->u.send_pdu.hdr_size > pdu_len) || + (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { + err = -EINVAL; + break; + } + conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); if (conn) { mutex_lock(&conn_mutex); @@ -3944,7 +3977,8 @@ iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR); static const char *const connection_state_names[] = { [ISCSI_CONN_UP] = "up", [ISCSI_CONN_DOWN] = "down", - [ISCSI_CONN_FAILED] = "failed" + [ISCSI_CONN_FAILED] = "failed", + [ISCSI_CONN_BOUND] = "bound" }; static ssize_t show_conn_state(struct device *dev, @@ -3957,7 +3991,7 @@ static ssize_t show_conn_state(struct device *dev, conn->state < ARRAY_SIZE(connection_state_names)) state = connection_state_names[conn->state]; - return sprintf(buf, "%s\n", state); + return sysfs_emit(buf, "%s\n", state); } static ISCSI_CLASS_ATTR(conn, state, S_IRUGO, show_conn_state, NULL); @@ -4185,7 +4219,7 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sprintf(buf, "%s\n", iscsi_session_state_name(session->state)); + return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state)); } static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state, NULL); @@ -4194,7 +4228,7 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sprintf(buf, "%d\n", session->creator); + return sysfs_emit(buf, "%d\n", session->creator); } static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator, NULL); @@ -4203,7 +4237,7 @@ show_priv_session_target_id(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sprintf(buf, "%d\n", session->target_id); + return sysfs_emit(buf, "%d\n", session->target_id); } static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO, show_priv_session_target_id, NULL); @@ -4216,8 +4250,8 @@ show_priv_session_##field(struct device *dev, \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ if (session->field == -1) \ - return sprintf(buf, "off\n"); \ - return sprintf(buf, format"\n", session->field); \ + return sysfs_emit(buf, "off\n"); \ + return sysfs_emit(buf, format"\n", session->field); \ } #define iscsi_priv_session_attr_store(field) \ diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index ee558675eab4..994f1b8e3504 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -280,27 +280,28 @@ static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx, static void sd_zbc_update_wp_offset_workfn(struct work_struct *work) { struct scsi_disk *sdkp; + unsigned long flags; unsigned int zno; int ret; sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work); - spin_lock_bh(&sdkp->zones_wp_offset_lock); + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); for (zno = 0; zno < sdkp->nr_zones; zno++) { if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) continue; - spin_unlock_bh(&sdkp->zones_wp_offset_lock); + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf, SD_BUF_SIZE, zno * sdkp->zone_blocks, true); - spin_lock_bh(&sdkp->zones_wp_offset_lock); + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); if (!ret) sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64, zno, sd_zbc_update_wp_offset_cb, sdkp); } - spin_unlock_bh(&sdkp->zones_wp_offset_lock); + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); scsi_device_put(sdkp->device); } @@ -324,6 +325,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, struct request *rq = cmd->request; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); unsigned int wp_offset, zno = blk_rq_zone_no(rq); + unsigned long flags; blk_status_t ret; ret = sd_zbc_cmnd_checks(cmd); @@ -337,7 +339,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, if (!blk_req_zone_write_trylock(rq)) return BLK_STS_ZONE_RESOURCE; - spin_lock_bh(&sdkp->zones_wp_offset_lock); + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); wp_offset = sdkp->zones_wp_offset[zno]; switch (wp_offset) { case SD_ZBC_INVALID_WP_OFST: @@ -366,7 +368,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, *lba += wp_offset; } - spin_unlock_bh(&sdkp->zones_wp_offset_lock); + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); if (ret) blk_req_zone_write_unlock(rq); return ret; @@ -445,6 +447,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); unsigned int zno = blk_rq_zone_no(rq); enum req_opf op = req_op(rq); + unsigned long flags; /* * If we got an error for a command that needs updating the write @@ -452,7 +455,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, * invalid to force an update from disk the next time a zone append * command is issued. */ - spin_lock_bh(&sdkp->zones_wp_offset_lock); + spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); if (result && op != REQ_OP_ZONE_RESET_ALL) { if (op == REQ_OP_ZONE_APPEND) { @@ -496,7 +499,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, } unlock_wp_offset: - spin_unlock_bh(&sdkp->zones_wp_offset_lock); + spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); return good_bytes; } diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index c53f456fbd09..a1dacb6e993e 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -48,7 +48,6 @@ MODULE_AUTHOR("Microsemi"); MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " DRIVER_VERSION); -MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 841ad2fc369a..9ca536aae784 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -1269,8 +1269,8 @@ static int st_open(struct inode *inode, struct file *filp) spin_lock(&st_use_lock); if (STp->in_use) { spin_unlock(&st_use_lock); - scsi_tape_put(STp); DEBC_printk(STp, "Device already in use.\n"); + scsi_tape_put(STp); return (-EBUSY); } diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index c55202b92a43..a981f261b304 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -911,7 +911,7 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm) if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc) return; - if (lpm & !hba->vreg_info.vcc->enabled) + if (lpm && !hba->vreg_info.vcc->enabled) regulator_set_mode(hba->vreg_info.vccq2->reg, REGULATOR_MODE_IDLE); else if (!lpm) diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index f97d7b0ae3b6..a9dc8d7c9f78 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -253,12 +253,17 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba) { int ret = 0; struct ufs_qcom_host *host = ufshcd_get_variant(hba); + bool reenable_intr = false; if (!host->core_reset) { dev_warn(hba->dev, "%s: reset control not set\n", __func__); goto out; } + reenable_intr = hba->is_irq_enabled; + disable_irq(hba->irq); + hba->is_irq_enabled = false; + ret = reset_control_assert(host->core_reset); if (ret) { dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", @@ -280,6 +285,11 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba) usleep_range(1000, 1100); + if (reenable_intr) { + enable_irq(hba->irq); + hba->is_irq_enabled = true; + } + out: return ret; } diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 77161750c9fb..c86760788c72 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -95,8 +95,6 @@ 16, 4, buf, __len, false); \ } while (0) -static bool early_suspend; - int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, const char *prefix) { @@ -1535,7 +1533,7 @@ static ssize_t ufshcd_clkscale_enable_show(struct device *dev, { struct ufs_hba *hba = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled); + return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled); } static ssize_t ufshcd_clkscale_enable_store(struct device *dev, @@ -4987,6 +4985,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) * UFS device needs urgent BKOPs. */ if (!hba->pm_op_in_progress && + !ufshcd_eh_in_progress(hba) && ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) && schedule_work(&hba->eeh_work)) { /* @@ -5784,13 +5783,20 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ufshcd_suspend_clkscaling(hba); ufshcd_clk_scaling_allow(hba, false); } + ufshcd_scsi_block_requests(hba); + /* Drain ufshcd_queuecommand() */ + down_write(&hba->clk_scaling_lock); + up_write(&hba->clk_scaling_lock); + cancel_work_sync(&hba->eeh_work); } static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) { + ufshcd_scsi_unblock_requests(hba); ufshcd_release(hba); if (ufshcd_is_clkscaling_supported(hba)) ufshcd_clk_scaling_suspend(hba, false); + ufshcd_clear_ua_wluns(hba); pm_runtime_put(hba->dev); } @@ -5882,8 +5888,8 @@ static void ufshcd_err_handler(struct work_struct *work) spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_err_handling_prepare(hba); spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_scsi_block_requests(hba); - hba->ufshcd_state = UFSHCD_STATE_RESET; + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) + hba->ufshcd_state = UFSHCD_STATE_RESET; /* Complete requests that have door-bell cleared by h/w */ ufshcd_complete_requests(hba); @@ -6042,12 +6048,8 @@ skip_err_handling: } ufshcd_clear_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); - ufshcd_scsi_unblock_requests(hba); ufshcd_err_handling_unprepare(hba); up(&hba->host_sem); - - if (!err && needs_reset) - ufshcd_clear_ua_wluns(hba); } /** @@ -7858,6 +7860,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) unsigned long flags; ktime_t start = ktime_get(); + hba->ufshcd_state = UFSHCD_STATE_RESET; + ret = ufshcd_link_startup(hba); if (ret) goto out; @@ -8972,11 +8976,6 @@ int ufshcd_system_suspend(struct ufs_hba *hba) int ret = 0; ktime_t start = ktime_get(); - if (!hba) { - early_suspend = true; - return 0; - } - down(&hba->host_sem); if (!hba->is_powered) @@ -9028,14 +9027,6 @@ int ufshcd_system_resume(struct ufs_hba *hba) int ret = 0; ktime_t start = ktime_get(); - if (!hba) - return -EINVAL; - - if (unlikely(early_suspend)) { - early_suspend = false; - down(&hba->host_sem); - } - if (!hba->is_powered || pm_runtime_suspended(hba->dev)) /* * Let the runtime resume take care of resuming @@ -9068,9 +9059,6 @@ int ufshcd_runtime_suspend(struct ufs_hba *hba) int ret = 0; ktime_t start = ktime_get(); - if (!hba) - return -EINVAL; - if (!hba->is_powered) goto out; else @@ -9109,9 +9097,6 @@ int ufshcd_runtime_resume(struct ufs_hba *hba) int ret = 0; ktime_t start = ktime_get(); - if (!hba) - return -EINVAL; - if (!hba->is_powered) goto out; else diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 081f54ab7d86..8a79605d9652 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -17,8 +17,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * - * Maintained by: Jim Gill <jgill@vmware.com> - * */ #include <linux/kernel.h> diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 75966d3f326e..51a82f7803d3 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -17,8 +17,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * - * Maintained by: Jim Gill <jgill@vmware.com> - * */ #ifndef _VMW_PVSCSI_H_ diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index e5d7fb81ad66..bd0fbcdbdefe 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c @@ -30,7 +30,6 @@ MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); static void maple_dma_handler(struct work_struct *work); static void maple_vblank_handler(struct work_struct *work); diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c index 6268bfa7f0d6..c3e379a990f2 100644 --- a/drivers/soc/litex/litex_soc_ctrl.c +++ b/drivers/soc/litex/litex_soc_ctrl.c @@ -13,7 +13,6 @@ #include <linux/platform_device.h> #include <linux/printk.h> #include <linux/module.h> -#include <linux/errno.h> #include <linux/io.h> #include <linux/reboot.h> diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index f42954e2c98e..1fd29f93ff6d 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -3,7 +3,6 @@ #include <linux/acpi.h> #include <linux/clk.h> -#include <linux/console.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/io.h> @@ -92,14 +91,11 @@ struct geni_wrapper { struct device *dev; void __iomem *base; struct clk_bulk_data ahb_clks[NUM_AHB_CLKS]; - struct geni_icc_path to_core; }; static const char * const icc_path_names[] = {"qup-core", "qup-config", "qup-memory"}; -static struct geni_wrapper *earlycon_wrapper; - #define QUP_HW_VER_REG 0x4 /* Common SE registers */ @@ -843,44 +839,11 @@ int geni_icc_disable(struct geni_se *se) } EXPORT_SYMBOL(geni_icc_disable); -void geni_remove_earlycon_icc_vote(void) -{ - struct platform_device *pdev; - struct geni_wrapper *wrapper; - struct device_node *parent; - struct device_node *child; - - if (!earlycon_wrapper) - return; - - wrapper = earlycon_wrapper; - parent = of_get_next_parent(wrapper->dev->of_node); - for_each_child_of_node(parent, child) { - if (!of_device_is_compatible(child, "qcom,geni-se-qup")) - continue; - - pdev = of_find_device_by_node(child); - if (!pdev) - continue; - - wrapper = platform_get_drvdata(pdev); - icc_put(wrapper->to_core.path); - wrapper->to_core.path = NULL; - - } - of_node_put(parent); - - earlycon_wrapper = NULL; -} -EXPORT_SYMBOL(geni_remove_earlycon_icc_vote); - static int geni_se_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct geni_wrapper *wrapper; - struct console __maybe_unused *bcon; - bool __maybe_unused has_earlycon = false; int ret; wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL); @@ -903,43 +866,6 @@ static int geni_se_probe(struct platform_device *pdev) } } -#ifdef CONFIG_SERIAL_EARLYCON - for_each_console(bcon) { - if (!strcmp(bcon->name, "qcom_geni")) { - has_earlycon = true; - break; - } - } - if (!has_earlycon) - goto exit; - - wrapper->to_core.path = devm_of_icc_get(dev, "qup-core"); - if (IS_ERR(wrapper->to_core.path)) - return PTR_ERR(wrapper->to_core.path); - /* - * Put minmal BW request on core clocks on behalf of early console. - * The vote will be removed earlycon exit function. - * - * Note: We are putting vote on each QUP wrapper instead only to which - * earlycon is connected because QUP core clock of different wrapper - * share same voltage domain. If core1 is put to 0, then core2 will - * also run at 0, if not voted. Default ICC vote will be removed ASA - * we touch any of the core clock. - * core1 = core2 = max(core1, core2) - */ - ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW, - GENI_DEFAULT_BW); - if (ret) { - dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n", - __func__, ret); - return ret; - } - - if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart")) - earlycon_wrapper = wrapper; - of_node_put(pdev->dev.of_node); -exit: -#endif dev_set_drvdata(dev, wrapper); dev_dbg(dev, "GENI SE Driver probed\n"); return devm_of_platform_populate(dev); diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c index bf1468e5bccb..51143a68a889 100644 --- a/drivers/soc/ti/omap_prm.c +++ b/drivers/soc/ti/omap_prm.c @@ -332,7 +332,7 @@ static const struct omap_prm_data dra7_prm_data[] = { { .name = "l3init", .base = 0x4ae07300, .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon, - .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012, + .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01, .clkdm_name = "pcie" }, { @@ -830,8 +830,12 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev, reset->prm->data->name, id); exit: - if (reset->clkdm) + if (reset->clkdm) { + /* At least dra7 iva needs a delay before clkdm idle */ + if (has_rstst) + udelay(1); pdata->clkdm_allow_idle(reset->clkdm); + } return ret; } diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h index 76820d0b9deb..06bac8ba14e9 100644 --- a/drivers/soundwire/intel.h +++ b/drivers/soundwire/intel.h @@ -48,8 +48,6 @@ struct sdw_intel { #endif }; -#define SDW_INTEL_QUIRK_MASK_BUS_DISABLE BIT(1) - int intel_master_startup(struct platform_device *pdev); int intel_master_process_wakeen_event(struct platform_device *pdev); diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c index bc8520eb385e..05b726cdfebc 100644 --- a/drivers/soundwire/intel_init.c +++ b/drivers/soundwire/intel_init.c @@ -18,42 +18,12 @@ #include "cadence_master.h" #include "intel.h" -#define SDW_LINK_TYPE 4 /* from Intel ACPI documentation */ -#define SDW_MAX_LINKS 4 #define SDW_SHIM_LCAP 0x0 #define SDW_SHIM_BASE 0x2C000 #define SDW_ALH_BASE 0x2C800 #define SDW_LINK_BASE 0x30000 #define SDW_LINK_SIZE 0x10000 -static int ctrl_link_mask; -module_param_named(sdw_link_mask, ctrl_link_mask, int, 0444); -MODULE_PARM_DESC(sdw_link_mask, "Intel link mask (one bit per link)"); - -static bool is_link_enabled(struct fwnode_handle *fw_node, int i) -{ - struct fwnode_handle *link; - char name[32]; - u32 quirk_mask = 0; - - /* Find master handle */ - snprintf(name, sizeof(name), - "mipi-sdw-link-%d-subproperties", i); - - link = fwnode_get_named_child_node(fw_node, name); - if (!link) - return false; - - fwnode_property_read_u32(link, - "intel-quirk-mask", - &quirk_mask); - - if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE) - return false; - - return true; -} - static int sdw_intel_cleanup(struct sdw_intel_ctx *ctx) { struct sdw_intel_link_res *link = ctx->links; @@ -81,74 +51,6 @@ static int sdw_intel_cleanup(struct sdw_intel_ctx *ctx) return 0; } -static int -sdw_intel_scan_controller(struct sdw_intel_acpi_info *info) -{ - struct acpi_device *adev; - int ret, i; - u8 count; - - if (acpi_bus_get_device(info->handle, &adev)) - return -EINVAL; - - /* Found controller, find links supported */ - count = 0; - ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), - "mipi-sdw-master-count", &count, 1); - - /* - * In theory we could check the number of links supported in - * hardware, but in that step we cannot assume SoundWire IP is - * powered. - * - * In addition, if the BIOS doesn't even provide this - * 'master-count' property then all the inits based on link - * masks will fail as well. - * - * We will check the hardware capabilities in the startup() step - */ - - if (ret) { - dev_err(&adev->dev, - "Failed to read mipi-sdw-master-count: %d\n", ret); - return -EINVAL; - } - - /* Check count is within bounds */ - if (count > SDW_MAX_LINKS) { - dev_err(&adev->dev, "Link count %d exceeds max %d\n", - count, SDW_MAX_LINKS); - return -EINVAL; - } - - if (!count) { - dev_warn(&adev->dev, "No SoundWire links detected\n"); - return -EINVAL; - } - dev_dbg(&adev->dev, "ACPI reports %d SDW Link devices\n", count); - - info->count = count; - info->link_mask = 0; - - for (i = 0; i < count; i++) { - if (ctrl_link_mask && !(ctrl_link_mask & BIT(i))) { - dev_dbg(&adev->dev, - "Link %d masked, will not be enabled\n", i); - continue; - } - - if (!is_link_enabled(acpi_fwnode_handle(adev), i)) { - dev_dbg(&adev->dev, - "Link %d not selected in firmware\n", i); - continue; - } - - info->link_mask |= BIT(i); - } - - return 0; -} - #define HDA_DSP_REG_ADSPIC2 (0x10) #define HDA_DSP_REG_ADSPIS2 (0x14) #define HDA_DSP_REG_ADSPIC2_SNDW BIT(5) @@ -357,66 +259,6 @@ sdw_intel_startup_controller(struct sdw_intel_ctx *ctx) return 0; } -static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level, - void *cdata, void **return_value) -{ - struct sdw_intel_acpi_info *info = cdata; - struct acpi_device *adev; - acpi_status status; - u64 adr; - - status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &adr); - if (ACPI_FAILURE(status)) - return AE_OK; /* keep going */ - - if (acpi_bus_get_device(handle, &adev)) { - pr_err("%s: Couldn't find ACPI handle\n", __func__); - return AE_NOT_FOUND; - } - - info->handle = handle; - - /* - * On some Intel platforms, multiple children of the HDAS - * device can be found, but only one of them is the SoundWire - * controller. The SNDW device is always exposed with - * Name(_ADR, 0x40000000), with bits 31..28 representing the - * SoundWire link so filter accordingly - */ - if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE) - return AE_OK; /* keep going */ - - /* device found, stop namespace walk */ - return AE_CTRL_TERMINATE; -} - -/** - * sdw_intel_acpi_scan() - SoundWire Intel init routine - * @parent_handle: ACPI parent handle - * @info: description of what firmware/DSDT tables expose - * - * This scans the namespace and queries firmware to figure out which - * links to enable. A follow-up use of sdw_intel_probe() and - * sdw_intel_startup() is required for creation of devices and bus - * startup - */ -int sdw_intel_acpi_scan(acpi_handle *parent_handle, - struct sdw_intel_acpi_info *info) -{ - acpi_status status; - - info->handle = NULL; - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, - parent_handle, 1, - sdw_intel_acpi_cb, - NULL, info, NULL); - if (ACPI_FAILURE(status) || info->handle == NULL) - return -ENODEV; - - return sdw_intel_scan_controller(info); -} -EXPORT_SYMBOL_NS(sdw_intel_acpi_scan, SOUNDWIRE_INTEL_INIT); - /** * sdw_intel_probe() - SoundWire Intel probe routine * @res: resource data diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 442cc7c53a47..52ddb3255d88 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1433,6 +1433,7 @@ static int cqspi_probe(struct platform_device *pdev) cqspi = spi_master_get_devdata(master); cqspi->pdev = pdev; + platform_set_drvdata(pdev, cqspi); /* Obtain configuration from OF. */ ret = cqspi_of_get_pdata(cqspi); diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c index 35b75f0c9200..81a246fbcc01 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1032.c +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c @@ -260,6 +260,7 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) struct apci1032_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; unsigned int ctrl; + unsigned short val; /* check interrupt is from this device */ if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) & @@ -275,7 +276,8 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG); s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff; - comedi_buf_write_samples(s, &s->state, 1); + val = s->state; + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); /* enable the interrupt */ diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c index 11efb21555e3..b04c15dcfb57 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c @@ -208,7 +208,7 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) struct comedi_device *dev = d; struct apci1500_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; - unsigned int status = 0; + unsigned short status = 0; unsigned int val; val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR); @@ -238,14 +238,14 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) * * Mask Meaning * ---------- ------------------------------------------ - * 0x00000001 Event 1 has occurred - * 0x00000010 Event 2 has occurred - * 0x00000100 Counter/timer 1 has run down (not implemented) - * 0x00001000 Counter/timer 2 has run down (not implemented) - * 0x00010000 Counter 3 has run down (not implemented) - * 0x00100000 Watchdog has run down (not implemented) - * 0x01000000 Voltage error - * 0x10000000 Short-circuit error + * 0b00000001 Event 1 has occurred + * 0b00000010 Event 2 has occurred + * 0b00000100 Counter/timer 1 has run down (not implemented) + * 0b00001000 Counter/timer 2 has run down (not implemented) + * 0b00010000 Counter 3 has run down (not implemented) + * 0b00100000 Watchdog has run down (not implemented) + * 0b01000000 Voltage error + * 0b10000000 Short-circuit error */ comedi_buf_write_samples(s, &status, 1); comedi_handle_events(dev, s); diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index 692893c7e5c3..090607760be6 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -300,11 +300,11 @@ static int pci1710_ai_eoc(struct comedi_device *dev, static int pci1710_ai_read_sample(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int cur_chan, - unsigned int *val) + unsigned short *val) { const struct boardtype *board = dev->board_ptr; struct pci1710_private *devpriv = dev->private; - unsigned int sample; + unsigned short sample; unsigned int chan; sample = inw(dev->iobase + PCI171X_AD_DATA_REG); @@ -345,7 +345,7 @@ static int pci1710_ai_insn_read(struct comedi_device *dev, pci1710_ai_setup_chanlist(dev, s, &insn->chanspec, 1, 1); for (i = 0; i < insn->n; i++) { - unsigned int val; + unsigned short val; /* start conversion */ outw(0, dev->iobase + PCI171X_SOFTTRG_REG); @@ -395,7 +395,7 @@ static void pci1710_handle_every_sample(struct comedi_device *dev, { struct comedi_cmd *cmd = &s->async->cmd; unsigned int status; - unsigned int val; + unsigned short val; int ret; status = inw(dev->iobase + PCI171X_STATUS_REG); @@ -455,7 +455,7 @@ static void pci1710_handle_fifo(struct comedi_device *dev, } for (i = 0; i < devpriv->max_samples; i++) { - unsigned int val; + unsigned short val; int ret; ret = pci1710_ai_read_sample(dev, s, s->async->cur_chan, &val); diff --git a/drivers/staging/comedi/drivers/amplc_pc236_common.c b/drivers/staging/comedi/drivers/amplc_pc236_common.c index 043752663188..981d281e87a1 100644 --- a/drivers/staging/comedi/drivers/amplc_pc236_common.c +++ b/drivers/staging/comedi/drivers/amplc_pc236_common.c @@ -126,7 +126,9 @@ static irqreturn_t pc236_interrupt(int irq, void *d) handled = pc236_intr_check(dev); if (dev->attached && handled) { - comedi_buf_write_samples(s, &s->state, 1); + unsigned short val = 0; + + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); } return IRQ_RETVAL(handled); diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c index d740c4782775..2f20bd56ec6c 100644 --- a/drivers/staging/comedi/drivers/cb_pcidas.c +++ b/drivers/staging/comedi/drivers/cb_pcidas.c @@ -1281,7 +1281,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev, devpriv->amcc + AMCC_OP_REG_INTCSR); ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED, - dev->board_name, dev); + "cb_pcidas", dev); if (ret) { dev_dbg(dev->class_dev, "unable to allocate irq %d\n", pcidev->irq); diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c index fa987bb0e7cd..6d3ba399a7f0 100644 --- a/drivers/staging/comedi/drivers/cb_pcidas64.c +++ b/drivers/staging/comedi/drivers/cb_pcidas64.c @@ -4035,7 +4035,7 @@ static int auto_attach(struct comedi_device *dev, init_stc_registers(dev); retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED, - dev->board_name, dev); + "cb_pcidas64", dev); if (retval) { dev_dbg(dev->class_dev, "unable to allocate irq %u\n", pcidev->irq); diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c index 9361b2dcf949..5338b5eea440 100644 --- a/drivers/staging/comedi/drivers/comedi_parport.c +++ b/drivers/staging/comedi/drivers/comedi_parport.c @@ -210,12 +210,13 @@ static irqreturn_t parport_interrupt(int irq, void *d) struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; unsigned int ctrl; + unsigned short val = 0; ctrl = inb(dev->iobase + PARPORT_CTRL_REG); if (!(ctrl & PARPORT_CTRL_IRQ_ENA)) return IRQ_NONE; - comedi_buf_write_samples(s, &s->state, 1); + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); return IRQ_HANDLED; diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c index 04e224f8b779..96f4107b8054 100644 --- a/drivers/staging/comedi/drivers/das6402.c +++ b/drivers/staging/comedi/drivers/das6402.c @@ -186,7 +186,7 @@ static irqreturn_t das6402_interrupt(int irq, void *d) if (status & DAS6402_STATUS_FFULL) { async->events |= COMEDI_CB_OVERFLOW; } else if (status & DAS6402_STATUS_FFNE) { - unsigned int val; + unsigned short val; val = das6402_ai_read_sample(dev, s); comedi_buf_write_samples(s, &val, 1); diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c index 4ea100ff6930..2881808d6606 100644 --- a/drivers/staging/comedi/drivers/das800.c +++ b/drivers/staging/comedi/drivers/das800.c @@ -427,7 +427,7 @@ static irqreturn_t das800_interrupt(int irq, void *d) struct comedi_cmd *cmd; unsigned long irq_flags; unsigned int status; - unsigned int val; + unsigned short val; bool fifo_empty; bool fifo_overflow; int i; diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c index 17e6018918bb..56682f01242f 100644 --- a/drivers/staging/comedi/drivers/dmm32at.c +++ b/drivers/staging/comedi/drivers/dmm32at.c @@ -404,7 +404,7 @@ static irqreturn_t dmm32at_isr(int irq, void *d) { struct comedi_device *dev = d; unsigned char intstat; - unsigned int val; + unsigned short val; int i; if (!dev->attached) { diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c index 726e40dc17b6..0d3d4cafce2e 100644 --- a/drivers/staging/comedi/drivers/me4000.c +++ b/drivers/staging/comedi/drivers/me4000.c @@ -924,7 +924,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id) struct comedi_subdevice *s = dev->read_subdev; int i; int c = 0; - unsigned int lval; + unsigned short lval; if (!dev->attached) return IRQ_NONE; diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c index 99e744172f4d..f1a45cf7342a 100644 --- a/drivers/staging/comedi/drivers/ni_6527.c +++ b/drivers/staging/comedi/drivers/ni_6527.c @@ -195,7 +195,9 @@ static irqreturn_t ni6527_interrupt(int irq, void *d) return IRQ_NONE; if (status & NI6527_STATUS_EDGE) { - comedi_buf_write_samples(s, &s->state, 1); + unsigned short val = 0; + + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); } diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c index eb3f9f7109da..7cd8497420f2 100644 --- a/drivers/staging/comedi/drivers/ni_65xx.c +++ b/drivers/staging/comedi/drivers/ni_65xx.c @@ -472,6 +472,7 @@ static irqreturn_t ni_65xx_interrupt(int irq, void *d) struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; unsigned int status; + unsigned short val = 0; status = readb(dev->mmio + NI_65XX_STATUS_REG); if ((status & NI_65XX_STATUS_INT) == 0) @@ -482,7 +483,7 @@ static irqreturn_t ni_65xx_interrupt(int irq, void *d) writeb(NI_65XX_CLR_EDGE_INT | NI_65XX_CLR_OVERFLOW_INT, dev->mmio + NI_65XX_CLR_REG); - comedi_buf_write_samples(s, &s->state, 1); + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); return IRQ_HANDLED; diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c index 2dbf69e30965..bd6f42fe9e3c 100644 --- a/drivers/staging/comedi/drivers/pcl711.c +++ b/drivers/staging/comedi/drivers/pcl711.c @@ -184,7 +184,7 @@ static irqreturn_t pcl711_interrupt(int irq, void *d) struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; struct comedi_cmd *cmd = &s->async->cmd; - unsigned int data; + unsigned short data; if (!dev->attached) { dev_err(dev->class_dev, "spurious interrupt\n"); diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c index 64eb649c9813..88f25d7e76f7 100644 --- a/drivers/staging/comedi/drivers/pcl726.c +++ b/drivers/staging/comedi/drivers/pcl726.c @@ -220,9 +220,11 @@ static irqreturn_t pcl726_interrupt(int irq, void *d) struct pcl726_private *devpriv = dev->private; if (devpriv->cmd_running) { + unsigned short val = 0; + pcl726_intr_cancel(dev, s); - comedi_buf_write_samples(s, &s->state, 1); + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); } diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c index 63e3011158f2..f4b4a686c710 100644 --- a/drivers/staging/comedi/drivers/pcl818.c +++ b/drivers/staging/comedi/drivers/pcl818.c @@ -423,7 +423,7 @@ static int pcl818_ai_eoc(struct comedi_device *dev, static bool pcl818_ai_write_sample(struct comedi_device *dev, struct comedi_subdevice *s, - unsigned int chan, unsigned int val) + unsigned int chan, unsigned short val) { struct pcl818_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 7956abcbae22..9f920819cd74 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c @@ -877,5 +877,4 @@ module_comedi_usb_driver(vmk80xx_driver, vmk80xx_usb_driver); MODULE_AUTHOR("Manuel Gebele <forensixs@gmx.de>"); MODULE_DESCRIPTION("Velleman USB Board Low-Level Driver"); -MODULE_SUPPORTED_DEVICE("K8055/K8061 aka VM110/VM140"); MODULE_LICENSE("GPL"); diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c index dc09cc6e1c47..09e7b4cd0138 100644 --- a/drivers/staging/ks7010/ks_wlan_net.c +++ b/drivers/staging/ks7010/ks_wlan_net.c @@ -1120,6 +1120,7 @@ static int ks_wlan_set_scan(struct net_device *dev, { struct ks_wlan_private *priv = netdev_priv(dev); struct iw_scan_req *req = NULL; + int len; if (priv->sleep_mode == SLP_SLEEP) return -EPERM; @@ -1129,8 +1130,9 @@ static int ks_wlan_set_scan(struct net_device *dev, if (wrqu->data.length == sizeof(struct iw_scan_req) && wrqu->data.flags & IW_SCAN_THIS_ESSID) { req = (struct iw_scan_req *)extra; - priv->scan_ssid_len = req->essid_len; - memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len); + len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); + priv->scan_ssid_len = len; + memcpy(priv->scan_ssid, req->essid, len); } else { priv->scan_ssid_len = 0; } diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c index fa1e34a0d456..182bb944c9b3 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ap.c +++ b/drivers/staging/rtl8188eu/core/rtw_ap.c @@ -791,6 +791,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, WLAN_EID_SSID, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_); if (p && ie_len > 0) { + ie_len = min_t(int, ie_len, sizeof(pbss_network->ssid.ssid)); memset(&pbss_network->ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(pbss_network->ssid.ssid, p + 2, ie_len); pbss_network->ssid.ssid_length = ie_len; @@ -811,6 +812,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, WLAN_EID_SUPP_RATES, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_); if (p) { + ie_len = min_t(int, ie_len, NDIS_802_11_LENGTH_RATES_EX); memcpy(supportRate, p + 2, ie_len); supportRateNum = ie_len; } @@ -819,6 +821,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, WLAN_EID_EXT_SUPP_RATES, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_); if (p) { + ie_len = min_t(int, ie_len, + NDIS_802_11_LENGTH_RATES_EX - supportRateNum); memcpy(supportRate + supportRateNum, p + 2, ie_len); supportRateNum += ie_len; } @@ -934,6 +938,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) pht_cap->mcs.rx_mask[0] = 0xff; pht_cap->mcs.rx_mask[1] = 0x0; + ie_len = min_t(int, ie_len, sizeof(pmlmepriv->htpriv.ht_cap)); memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len); } diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index bf22f130d3e1..58954b88a817 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -1133,9 +1133,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, break; } sec_len = *(pos++); len -= 1; - if (sec_len > 0 && sec_len <= len) { + if (sec_len > 0 && + sec_len <= len && + sec_len <= 32) { ssid[ssid_index].ssid_length = sec_len; - memcpy(ssid[ssid_index].ssid, pos, ssid[ssid_index].ssid_length); + memcpy(ssid[ssid_index].ssid, pos, sec_len); ssid_index++; } pos += sec_len; diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig index 963a2ffbc1fb..39f5a6a7346a 100644 --- a/drivers/staging/rtl8192e/Kconfig +++ b/drivers/staging/rtl8192e/Kconfig @@ -27,6 +27,7 @@ config RTLLIB_CRYPTO_CCMP config RTLLIB_CRYPTO_TKIP tristate "Support for rtllib TKIP crypto" depends on RTLLIB + select CRYPTO select CRYPTO_LIB_ARC4 select CRYPTO_MICHAEL_MIC default y diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c index 16bcee13f64b..407effde5e71 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c @@ -406,9 +406,10 @@ static int _rtl92e_wx_set_scan(struct net_device *dev, struct iw_scan_req *req = (struct iw_scan_req *)b; if (req->essid_len) { - ieee->current_network.ssid_len = req->essid_len; - memcpy(ieee->current_network.ssid, req->essid, - req->essid_len); + int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); + + ieee->current_network.ssid_len = len; + memcpy(ieee->current_network.ssid, req->essid, len); } } diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index b84f00b8d18b..4cabaf21c1ca 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -1105,7 +1105,7 @@ struct rtllib_network { bool bWithAironetIE; bool bCkipSupported; bool bCcxRmEnable; - u16 CcxRmState[2]; + u8 CcxRmState[2]; bool bMBssidValid; u8 MBssidMask; u8 MBssid[ETH_ALEN]; diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index 66c135321da4..15bbb63ca130 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -1967,7 +1967,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee, info_element->data[2] == 0x96 && info_element->data[3] == 0x01) { if (info_element->len == 6) { - memcpy(network->CcxRmState, &info_element[4], 2); + memcpy(network->CcxRmState, &info_element->data[4], 2); if (network->CcxRmState[0] != 0) network->bCcxRmEnable = true; else diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c index d853586705fc..77bf88696a84 100644 --- a/drivers/staging/rtl8192u/r8192U_wx.c +++ b/drivers/staging/rtl8192u/r8192U_wx.c @@ -331,8 +331,10 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a, struct iw_scan_req *req = (struct iw_scan_req *)b; if (req->essid_len) { - ieee->current_network.ssid_len = req->essid_len; - memcpy(ieee->current_network.ssid, req->essid, req->essid_len); + int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); + + ieee->current_network.ssid_len = len; + memcpy(ieee->current_network.ssid, req->essid, len); } } diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c index 18116469bd31..75716f59044d 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.c +++ b/drivers/staging/rtl8712/rtl871x_cmd.c @@ -192,8 +192,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter, psurveyPara->ss_ssidlen = 0; memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1); if (pssid && pssid->SsidLength) { - memcpy(psurveyPara->ss_ssid, pssid->Ssid, pssid->SsidLength); - psurveyPara->ss_ssidlen = cpu_to_le32(pssid->SsidLength); + int len = min_t(int, pssid->SsidLength, IW_ESSID_MAX_SIZE); + + memcpy(psurveyPara->ss_ssid, pssid->Ssid, len); + psurveyPara->ss_ssidlen = cpu_to_le32(len); } set_fwstate(pmlmepriv, _FW_UNDER_SURVEY); r8712_enqueue_cmd(pcmdpriv, ph2c); diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c index 81de5a9e6b67..60dd798a6e51 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c @@ -924,7 +924,7 @@ static int r871x_wx_set_priv(struct net_device *dev, struct iw_point *dwrq = (struct iw_point *)awrq; len = dwrq->length; - ext = memdup_user(dwrq->pointer, len); + ext = strndup_user(dwrq->pointer, len); if (IS_ERR(ext)) return PTR_ERR(ext); diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h index e7061d383306..c3c2c1566882 100644 --- a/drivers/staging/vt6655/rxtx.h +++ b/drivers/staging/vt6655/rxtx.h @@ -150,7 +150,7 @@ struct vnt_cts { u16 reserved; struct ieee80211_cts data; u16 reserved2; -} __packed; +} __packed __aligned(2); struct vnt_cts_fb { struct vnt_phy_field b; @@ -160,7 +160,7 @@ struct vnt_cts_fb { __le16 cts_duration_ba_f1; struct ieee80211_cts data; u16 reserved2; -} __packed; +} __packed __aligned(2); struct vnt_tx_fifo_head { u8 tx_key[WLAN_KEY_LEN_CCMP]; diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c index cd6bcfdfbe9a..ed53d0b45592 100644 --- a/drivers/staging/wfx/bh.c +++ b/drivers/staging/wfx/bh.c @@ -5,6 +5,7 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/gpio/consumer.h> #include <net/mac80211.h> #include "bh.h" diff --git a/drivers/staging/wfx/bh.h b/drivers/staging/wfx/bh.h index 92ef3298d4ac..78c49329e22a 100644 --- a/drivers/staging/wfx/bh.h +++ b/drivers/staging/wfx/bh.h @@ -8,6 +8,10 @@ #ifndef WFX_BH_H #define WFX_BH_H +#include <linux/atomic.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + struct wfx_dev; struct wfx_hif { diff --git a/drivers/staging/wfx/bus.h b/drivers/staging/wfx/bus.h index ea3911485307..ca04b3da6204 100644 --- a/drivers/staging/wfx/bus.h +++ b/drivers/staging/wfx/bus.h @@ -8,6 +8,9 @@ #ifndef WFX_BUS_H #define WFX_BUS_H +#include <linux/mmc/sdio_func.h> +#include <linux/spi/spi.h> + #define WFX_REG_CONFIG 0x0 #define WFX_REG_CONTROL 0x1 #define WFX_REG_IN_OUT_QUEUE 0x2 diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c index 588edce44854..e06d7e1ebe9c 100644 --- a/drivers/staging/wfx/bus_sdio.c +++ b/drivers/staging/wfx/bus_sdio.c @@ -5,13 +5,19 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/module.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/card.h> +#include <linux/interrupt.h> #include <linux/of_irq.h> +#include <linux/irq.h> #include "bus.h" #include "wfx.h" +#include "hwio.h" +#include "main.h" +#include "bh.h" static const struct wfx_platform_data wfx_sdio_pdata = { .file_fw = "wfm_wf200", diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c index f89855abe9f8..a99125d1a30d 100644 --- a/drivers/staging/wfx/bus_spi.c +++ b/drivers/staging/wfx/bus_spi.c @@ -6,12 +6,19 @@ * Copyright (c) 2011, Sagrad Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> #include <linux/spi/spi.h> +#include <linux/interrupt.h> #include <linux/irq.h> #include <linux/of.h> #include "bus.h" #include "wfx.h" +#include "hwio.h" +#include "main.h" +#include "bh.h" #define SET_WRITE 0x7FFF /* usage: and operation */ #define SET_READ 0x8000 /* usage: or operation */ diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c index 2cfa16279220..385f2d42a0e2 100644 --- a/drivers/staging/wfx/data_rx.c +++ b/drivers/staging/wfx/data_rx.c @@ -5,8 +5,13 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/etherdevice.h> +#include <net/mac80211.h> + #include "data_rx.h" #include "wfx.h" +#include "bh.h" +#include "sta.h" static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt) { diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c index 76f26e3c4381..77fb104efdec 100644 --- a/drivers/staging/wfx/data_tx.c +++ b/drivers/staging/wfx/data_tx.c @@ -6,9 +6,14 @@ * Copyright (c) 2010, ST-Ericsson */ #include <net/mac80211.h> +#include <linux/etherdevice.h> +#include "data_tx.h" #include "wfx.h" +#include "bh.h" #include "sta.h" +#include "queue.h" +#include "debug.h" #include "traces.h" #include "hif_tx_mib.h" diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h index 6b3020097efa..401363d6b563 100644 --- a/drivers/staging/wfx/data_tx.h +++ b/drivers/staging/wfx/data_tx.h @@ -8,6 +8,9 @@ #ifndef WFX_DATA_TX_H #define WFX_DATA_TX_H +#include <linux/list.h> +#include <net/mac80211.h> + #include "hif_api_cmd.h" #include "hif_api_mib.h" diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c index 3e87d13eb358..eedada78c25f 100644 --- a/drivers/staging/wfx/debug.c +++ b/drivers/staging/wfx/debug.c @@ -5,9 +5,15 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/crc32.h> + #include "debug.h" #include "wfx.h" #include "sta.h" +#include "main.h" +#include "hif_tx.h" #include "hif_tx_mib.h" #define CREATE_TRACE_POINTS diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c index 1bb9054871c4..1b8aec02d169 100644 --- a/drivers/staging/wfx/fwio.c +++ b/drivers/staging/wfx/fwio.c @@ -6,6 +6,8 @@ * Copyright (c) 2010, ST-Ericsson */ #include <linux/firmware.h> +#include <linux/slab.h> +#include <linux/mm.h> #include <linux/bitfield.h> #include "fwio.h" diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h index 8b671c9ab97c..58c9bb036011 100644 --- a/drivers/staging/wfx/hif_api_cmd.h +++ b/drivers/staging/wfx/hif_api_cmd.h @@ -8,6 +8,10 @@ #ifndef WFX_HIF_API_CMD_H #define WFX_HIF_API_CMD_H +#include <linux/ieee80211.h> + +#include "hif_api_general.h" + enum hif_requests_ids { HIF_REQ_ID_RESET = 0x0a, HIF_REQ_ID_READ_MIB = 0x05, diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/staging/wfx/hif_api_general.h index 70b253d0265d..24188945718d 100644 --- a/drivers/staging/wfx/hif_api_general.h +++ b/drivers/staging/wfx/hif_api_general.h @@ -8,6 +8,15 @@ #ifndef WFX_HIF_API_GENERAL_H #define WFX_HIF_API_GENERAL_H +#ifdef __KERNEL__ +#include <linux/types.h> +#include <linux/if_ether.h> +#else +#include <net/ethernet.h> +#include <stdint.h> +#define __packed __attribute__((__packed__)) +#endif + #define HIF_ID_IS_INDICATION 0x80 #define HIF_COUNTER_MAX 7 diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c index 17dc13321978..63b437261eb7 100644 --- a/drivers/staging/wfx/hif_tx.c +++ b/drivers/staging/wfx/hif_tx.c @@ -6,7 +6,11 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/etherdevice.h> + +#include "hif_tx.h" #include "wfx.h" +#include "bh.h" #include "hwio.h" #include "debug.h" #include "sta.h" diff --git a/drivers/staging/wfx/hif_tx_mib.c b/drivers/staging/wfx/hif_tx_mib.c index 6432ed86505c..1926cf1b62be 100644 --- a/drivers/staging/wfx/hif_tx_mib.c +++ b/drivers/staging/wfx/hif_tx_mib.c @@ -6,8 +6,13 @@ * Copyright (c) 2010, ST-Ericsson * Copyright (C) 2010, ST-Ericsson SA */ + +#include <linux/etherdevice.h> + #include "wfx.h" +#include "hif_tx.h" #include "hif_tx_mib.h" +#include "hif_api_mib.h" int hif_set_output_power(struct wfx_vif *wvif, int val) { diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c index 089bb41be149..36fbc5b5d64c 100644 --- a/drivers/staging/wfx/hwio.c +++ b/drivers/staging/wfx/hwio.c @@ -5,10 +5,13 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/kernel.h> +#include <linux/delay.h> #include <linux/slab.h> #include "hwio.h" #include "wfx.h" +#include "bus.h" #include "traces.h" /* diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h index 8bb9bcfc3182..0b8e4f7157df 100644 --- a/drivers/staging/wfx/hwio.h +++ b/drivers/staging/wfx/hwio.h @@ -8,6 +8,8 @@ #ifndef WFX_HWIO_H #define WFX_HWIO_H +#include <linux/types.h> + struct wfx_dev; int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t buf_len); diff --git a/drivers/staging/wfx/key.c b/drivers/staging/wfx/key.c index c93d07dcdc10..2ab82bed4c1b 100644 --- a/drivers/staging/wfx/key.c +++ b/drivers/staging/wfx/key.c @@ -5,10 +5,12 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/etherdevice.h> #include <net/mac80211.h> #include "key.h" #include "wfx.h" +#include "hif_tx_mib.h" static int wfx_alloc_key(struct wfx_dev *wdev) { diff --git a/drivers/staging/wfx/key.h b/drivers/staging/wfx/key.h index 4dc9feadaba2..70a44d0ca35e 100644 --- a/drivers/staging/wfx/key.h +++ b/drivers/staging/wfx/key.h @@ -8,6 +8,8 @@ #ifndef WFX_KEY_H #define WFX_KEY_H +#include <net/mac80211.h> + struct wfx_dev; struct wfx_vif; diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c index b9ea9a93fe1a..e7bc1988124a 100644 --- a/drivers/staging/wfx/main.c +++ b/drivers/staging/wfx/main.c @@ -10,21 +10,28 @@ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. */ +#include <linux/module.h> #include <linux/of.h> #include <linux/of_net.h> +#include <linux/gpio/consumer.h> #include <linux/mmc/sdio_func.h> #include <linux/spi/spi.h> +#include <linux/etherdevice.h> #include <linux/firmware.h> +#include "main.h" #include "wfx.h" #include "fwio.h" #include "hwio.h" #include "bus.h" +#include "bh.h" #include "sta.h" #include "key.h" #include "scan.h" #include "debug.h" +#include "data_tx.h" #include "hif_tx_mib.h" +#include "hif_api_cmd.h" #define WFX_PDS_MAX_SIZE 1500 diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h index 086bcc041b90..a0db322383a3 100644 --- a/drivers/staging/wfx/main.h +++ b/drivers/staging/wfx/main.h @@ -10,8 +10,11 @@ #ifndef WFX_MAIN_H #define WFX_MAIN_H +#include <linux/device.h> #include <linux/gpio/consumer.h> +#include "hif_api_general.h" + struct wfx_dev; struct hwbus_ops; diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c index 3bddf282a4ce..31c37f69c295 100644 --- a/drivers/staging/wfx/queue.c +++ b/drivers/staging/wfx/queue.c @@ -5,9 +5,13 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/sched.h> #include <net/mac80211.h> +#include "queue.h" #include "wfx.h" +#include "sta.h" +#include "data_tx.h" #include "traces.h" void wfx_tx_lock(struct wfx_dev *wdev) diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h index e43aa9dfbc45..80ba19455ef3 100644 --- a/drivers/staging/wfx/queue.h +++ b/drivers/staging/wfx/queue.h @@ -8,6 +8,9 @@ #ifndef WFX_QUEUE_H #define WFX_QUEUE_H +#include <linux/skbuff.h> +#include <linux/atomic.h> + struct wfx_dev; struct wfx_vif; diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h index e5b7eef78858..c7496a766478 100644 --- a/drivers/staging/wfx/scan.h +++ b/drivers/staging/wfx/scan.h @@ -8,6 +8,8 @@ #ifndef WFX_SCAN_H #define WFX_SCAN_H +#include <net/mac80211.h> + struct wfx_dev; struct wfx_vif; diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c index 5585f9e876e1..196779a1b89a 100644 --- a/drivers/staging/wfx/sta.c +++ b/drivers/staging/wfx/sta.c @@ -5,11 +5,17 @@ * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ +#include <linux/etherdevice.h> #include <net/mac80211.h> #include "sta.h" #include "wfx.h" +#include "fwio.h" +#include "bh.h" +#include "key.h" #include "scan.h" +#include "debug.h" +#include "hif_tx.h" #include "hif_tx_mib.h" #define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2 diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h index a3fb9fc93fa4..d7b5df5ea4e6 100644 --- a/drivers/staging/wfx/sta.h +++ b/drivers/staging/wfx/sta.h @@ -8,6 +8,8 @@ #ifndef WFX_STA_H #define WFX_STA_H +#include <net/mac80211.h> + struct wfx_dev; struct wfx_vif; diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h index afe1074e09b3..e34c7a538c65 100644 --- a/drivers/staging/wfx/traces.h +++ b/drivers/staging/wfx/traces.h @@ -12,8 +12,11 @@ #define _WFX_TRACE_H #include <linux/tracepoint.h> +#include <net/mac80211.h> #include "bus.h" +#include "hif_api_cmd.h" +#include "hif_api_mib.h" /* The hell below need some explanations. For each symbolic number, we need to * define it with TRACE_DEFINE_ENUM() and in a list for __print_symbolic. diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h index a185b82795c4..94898680ccde 100644 --- a/drivers/staging/wfx/wfx.h +++ b/drivers/staging/wfx/wfx.h @@ -10,6 +10,9 @@ #ifndef WFX_H #define WFX_H +#include <linux/completion.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> #include <linux/nospec.h> #include <net/mac80211.h> diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 3cbc074992bc..9ee797b8cb7e 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -882,7 +882,6 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, if (!bio) { new_bio: nr_vecs = bio_max_segs(nr_pages); - nr_pages -= nr_vecs; /* * Calls bio_kmalloc() and sets bio->bi_end_io() */ @@ -939,6 +938,14 @@ new_bio: return 0; fail: + if (bio) + bio_put(bio); + while (req->bio) { + bio = req->bio; + req->bio = bio->bi_next; + bio_put(bio); + } + req->biotail = NULL; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index cf4718c6d35d..319a1e701163 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -747,7 +747,6 @@ module_platform_driver(optee_driver); MODULE_AUTHOR("Linaro"); MODULE_DESCRIPTION("OP-TEE driver"); -MODULE_SUPPORTED_DEVICE(""); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:optee"); diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index 345917a58f2f..1c4aac8464a7 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -674,6 +674,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev, { struct cooling_dev_stats *stats = cdev->stats; + if (!stats) + return; + spin_lock(&stats->lock); if (stats->state == new_state) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index b63fecca6c2a..2a95b4ce06c0 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -768,12 +768,6 @@ static int tb_init_port(struct tb_port *port) tb_dump_port(port->sw->tb, &port->config); - /* Control port does not need HopID allocation */ - if (port->port) { - ida_init(&port->in_hopids); - ida_init(&port->out_hopids); - } - INIT_LIST_HEAD(&port->list); return 0; @@ -1842,10 +1836,8 @@ static void tb_switch_release(struct device *dev) dma_port_free(sw->dma_port); tb_switch_for_each_port(sw, port) { - if (!port->disabled) { - ida_destroy(&port->in_hopids); - ida_destroy(&port->out_hopids); - } + ida_destroy(&port->in_hopids); + ida_destroy(&port->out_hopids); } kfree(sw->uuid); @@ -2025,6 +2017,12 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, /* minimum setup for tb_find_cap and tb_drom_read to work */ sw->ports[i].sw = sw; sw->ports[i].port = i; + + /* Control port does not need HopID allocation */ + if (i) { + ida_init(&sw->ports[i].in_hopids); + ida_init(&sw->ports[i].out_hopids); + } } ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 1f000ac1728b..c348b1fc0efc 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -138,6 +138,10 @@ static void tb_discover_tunnels(struct tb_switch *sw) parent->boot = true; parent = tb_switch_parent(parent); } + } else if (tb_tunnel_is_dp(tunnel)) { + /* Keep the domain from powering down */ + pm_runtime_get_sync(&tunnel->src_port->sw->dev); + pm_runtime_get_sync(&tunnel->dst_port->sw->dev); } list_add_tail(&tunnel->list, &tcm->tunnel_list); diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c index c90848919644..9afa1dcef2c2 100644 --- a/drivers/tty/hvc/hvcs.c +++ b/drivers/tty/hvc/hvcs.c @@ -317,7 +317,6 @@ static void hvcs_hangup(struct tty_struct * tty); static int hvcs_probe(struct vio_dev *dev, const struct vio_device_id *id); -static int hvcs_remove(struct vio_dev *dev); static int __init hvcs_module_init(void); static void __exit hvcs_module_exit(void); static int hvcs_initialize(void); @@ -819,7 +818,7 @@ static int hvcs_probe( return 0; } -static int hvcs_remove(struct vio_dev *dev) +static void hvcs_remove(struct vio_dev *dev) { struct hvcs_struct *hvcsd = dev_get_drvdata(&dev->dev); unsigned long flags; @@ -849,7 +848,6 @@ static int hvcs_remove(struct vio_dev *dev) printk(KERN_INFO "HVCS: vty-server@%X removed from the" " vio bus.\n", dev->unit_address); - return 0; }; static struct vio_driver hvcs_vio_driver = { diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 8b2797b6ee44..5e2374580e27 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -66,8 +66,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp) wake_up_interruptible(&tty->link->read_wait); wake_up_interruptible(&tty->link->write_wait); if (tty->driver->subtype == PTY_TYPE_MASTER) { - struct file *f; - + set_bit(TTY_OTHER_CLOSED, &tty->flags); #ifdef CONFIG_UNIX98_PTYS if (tty->driver == ptm_driver) { mutex_lock(&devpts_mutex); @@ -76,17 +75,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp) mutex_unlock(&devpts_mutex); } #endif - - /* - * This hack is required because a program can open a - * pty and redirect a console to it, but if the pty is - * closed and the console is not released, then the - * slave side will never close. So release the - * redirect when the master closes. - */ - f = tty_release_redirect(tty->link); - if (f) - fput(f); + tty_vhangup(tty->link); } } diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c index 9a872750581c..94af7a5ea497 100644 --- a/drivers/tty/serial/icom.c +++ b/drivers/tty/serial/icom.c @@ -1639,8 +1639,6 @@ module_exit(icom_exit); MODULE_AUTHOR("Michael Anderson <mjanders@us.ibm.com>"); MODULE_DESCRIPTION("IBM iSeries Serial IOA driver"); -MODULE_SUPPORTED_DEVICE - ("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("icom_call_setup.bin"); MODULE_FIRMWARE("icom_res_dce.bin"); diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c index cd30da0ef083..0ea799bf8dbb 100644 --- a/drivers/tty/serial/jsm/jsm_driver.c +++ b/drivers/tty/serial/jsm/jsm_driver.c @@ -19,7 +19,6 @@ MODULE_AUTHOR("Digi International, https://www.digi.com"); MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("jsm"); #define JSM_DRIVER_NAME "jsm" #define NR_PORTS 32 diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 9795b2e8b0b2..1b61d26bb7af 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -1056,9 +1056,9 @@ static int max310x_startup(struct uart_port *port) max310x_port_update(port, MAX310X_MODE1_REG, MAX310X_MODE1_TRNSCVCTRL_BIT, 0); - /* Reset FIFOs */ - max310x_port_write(port, MAX310X_MODE2_REG, - MAX310X_MODE2_FIFORST_BIT); + /* Configure MODE2 register & Reset FIFOs*/ + val = MAX310X_MODE2_RXEMPTINV_BIT | MAX310X_MODE2_FIFORST_BIT; + max310x_port_write(port, MAX310X_MODE2_REG, val); max310x_port_update(port, MAX310X_MODE2_REG, MAX310X_MODE2_FIFORST_BIT, 0); @@ -1086,27 +1086,8 @@ static int max310x_startup(struct uart_port *port) /* Clear IRQ status register */ max310x_port_read(port, MAX310X_IRQSTS_REG); - /* - * Let's ask for an interrupt after a timeout equivalent to - * the receiving time of 4 characters after the last character - * has been received. - */ - max310x_port_write(port, MAX310X_RXTO_REG, 4); - - /* - * Make sure we also get RX interrupts when the RX FIFO is - * filling up quickly, so get an interrupt when half of the RX - * FIFO has been filled in. - */ - max310x_port_write(port, MAX310X_FIFOTRIGLVL_REG, - MAX310X_FIFOTRIGLVL_RX(MAX310X_FIFO_SIZE / 2)); - - /* Enable RX timeout interrupt in LSR */ - max310x_port_write(port, MAX310X_LSR_IRQEN_REG, - MAX310X_LSR_RXTO_BIT); - - /* Enable LSR, RX FIFO trigger, CTS change interrupts */ - val = MAX310X_IRQ_LSR_BIT | MAX310X_IRQ_RXFIFO_BIT | MAX310X_IRQ_TXEMPTY_BIT; + /* Enable RX, TX, CTS change interrupts */ + val = MAX310X_IRQ_RXEMPTY_BIT | MAX310X_IRQ_TXEMPTY_BIT; max310x_port_write(port, MAX310X_IRQEN_REG, val | MAX310X_IRQ_CTS_BIT); return 0; diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 291649f02821..0d85b55ea823 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1177,12 +1177,6 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se, struct console *con) { } #endif -static int qcom_geni_serial_earlycon_exit(struct console *con) -{ - geni_remove_earlycon_icc_vote(); - return 0; -} - static struct qcom_geni_private_data earlycon_private_data; static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev, @@ -1233,7 +1227,6 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev, writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN); dev->con->write = qcom_geni_serial_earlycon_write; - dev->con->exit = qcom_geni_serial_earlycon_exit; dev->con->setup = NULL; qcom_geni_serial_enable_early_read(&se, dev->con); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 74733ec8f565..391bada4cedb 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -544,9 +544,7 @@ EXPORT_SYMBOL_GPL(tty_wakeup); * @tty: tty device * * This is available to the pty code so if the master closes, if the - * slave is a redirect it can release the redirect. It returns the - * filp for the redirect, which must be fput when the operations on - * the tty are completed. + * slave is a redirect it can release the redirect. */ struct file *tty_release_redirect(struct tty_struct *tty) { @@ -561,6 +559,7 @@ struct file *tty_release_redirect(struct tty_struct *tty) return f; } +EXPORT_SYMBOL_GPL(tty_release_redirect); /** * __tty_hangup - actual handler for hangup events diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index f9170d177a89..5f0513c96c04 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -2197,7 +2197,10 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev, * inverted in the first TDs isoc TRB. */ field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) | - start_cycle ? 0 : 1 | TRB_SIA | TRB_TBC(burst_count); + TRB_SIA | TRB_TBC(burst_count); + + if (!start_cycle) + field |= TRB_CYCLE; /* Fill the rest of the TRB fields, and remaining normal TRBs. */ for (i = 0; i < trbs_per_td; i++) { diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 37f824b59daa..3fda1ec961d7 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control) #define acm_send_break(acm, ms) \ acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0) -static void acm_kill_urbs(struct acm *acm) +static void acm_poison_urbs(struct acm *acm) { int i; - usb_kill_urb(acm->ctrlurb); + usb_poison_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) - usb_kill_urb(acm->wb[i].urb); + usb_poison_urb(acm->wb[i].urb); for (i = 0; i < acm->rx_buflimit; i++) - usb_kill_urb(acm->read_urbs[i]); + usb_poison_urb(acm->read_urbs[i]); +} + +static void acm_unpoison_urbs(struct acm *acm) +{ + int i; + + for (i = 0; i < acm->rx_buflimit; i++) + usb_unpoison_urb(acm->read_urbs[i]); + for (i = 0; i < ACM_NW; i++) + usb_unpoison_urb(acm->wb[i].urb); + usb_unpoison_urb(acm->ctrlurb); } + /* * Write buffer management. * All of these assume proper locks taken by the caller. @@ -226,9 +238,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb) rc = usb_submit_urb(wb->urb, GFP_ATOMIC); if (rc < 0) { - dev_err(&acm->data->dev, - "%s - usb_submit_urb(write bulk) failed: %d\n", - __func__, rc); + if (rc != -EPERM) + dev_err(&acm->data->dev, + "%s - usb_submit_urb(write bulk) failed: %d\n", + __func__, rc); acm_write_done(acm, wb); } return rc; @@ -313,8 +326,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf) acm->iocount.dsr++; if (difference & ACM_CTRL_DCD) acm->iocount.dcd++; - if (newctrl & ACM_CTRL_BRK) + if (newctrl & ACM_CTRL_BRK) { acm->iocount.brk++; + tty_insert_flip_char(&acm->port, 0, TTY_BREAK); + } if (newctrl & ACM_CTRL_RI) acm->iocount.rng++; if (newctrl & ACM_CTRL_FRAMING) @@ -480,11 +495,6 @@ static void acm_read_bulk_callback(struct urb *urb) dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", rb->index, urb->actual_length, status); - if (!acm->dev) { - dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__); - return; - } - switch (status) { case 0: usb_mark_last_busy(acm->dev); @@ -649,7 +659,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise) res = acm_set_control(acm, val); if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE)) - dev_err(&acm->control->dev, "failed to set dtr/rts\n"); + /* This is broken in too many devices to spam the logs */ + dev_dbg(&acm->control->dev, "failed to set dtr/rts\n"); } static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) @@ -731,6 +742,7 @@ static void acm_port_shutdown(struct tty_port *port) * Need to grab write_lock to prevent race with resume, but no need to * hold it due to the tty-port initialised flag. */ + acm_poison_urbs(acm); spin_lock_irq(&acm->write_lock); spin_unlock_irq(&acm->write_lock); @@ -747,7 +759,8 @@ static void acm_port_shutdown(struct tty_port *port) usb_autopm_put_interface_async(acm->control); } - acm_kill_urbs(acm); + acm_unpoison_urbs(acm); + } static void acm_tty_cleanup(struct tty_struct *tty) @@ -1296,13 +1309,6 @@ skip_normal_probe: if (!combined_interfaces && intf != control_interface) return -ENODEV; - if (!combined_interfaces && usb_interface_claimed(data_interface)) { - /* valid in this context */ - dev_dbg(&intf->dev, "The data interface isn't available\n"); - return -EBUSY; - } - - if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 || control_interface->cur_altsetting->desc.bNumEndpoints == 0) return -EINVAL; @@ -1323,8 +1329,8 @@ made_compressed_probe: dev_dbg(&intf->dev, "interfaces are valid\n"); acm = kzalloc(sizeof(struct acm), GFP_KERNEL); - if (acm == NULL) - goto alloc_fail; + if (!acm) + return -ENOMEM; tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; @@ -1341,7 +1347,7 @@ made_compressed_probe: minor = acm_alloc_minor(acm); if (minor < 0) - goto alloc_fail1; + goto err_put_port; acm->minor = minor; acm->dev = usb_dev; @@ -1372,15 +1378,15 @@ made_compressed_probe: buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); if (!buf) - goto alloc_fail1; + goto err_put_port; acm->ctrl_buffer = buf; if (acm_write_buffers_alloc(acm) < 0) - goto alloc_fail2; + goto err_free_ctrl_buffer; acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL); if (!acm->ctrlurb) - goto alloc_fail3; + goto err_free_write_buffers; for (i = 0; i < num_rx_buf; i++) { struct acm_rb *rb = &(acm->read_buffers[i]); @@ -1389,13 +1395,13 @@ made_compressed_probe: rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL, &rb->dma); if (!rb->base) - goto alloc_fail4; + goto err_free_read_urbs; rb->index = i; rb->instance = acm; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) - goto alloc_fail4; + goto err_free_read_urbs; urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; urb->transfer_dma = rb->dma; @@ -1416,8 +1422,8 @@ made_compressed_probe: struct acm_wb *snd = &(acm->wb[i]); snd->urb = usb_alloc_urb(0, GFP_KERNEL); - if (snd->urb == NULL) - goto alloc_fail5; + if (!snd->urb) + goto err_free_write_urbs; if (usb_endpoint_xfer_int(epwrite)) usb_fill_int_urb(snd->urb, usb_dev, acm->out, @@ -1435,7 +1441,7 @@ made_compressed_probe: i = device_create_file(&intf->dev, &dev_attr_bmCapabilities); if (i < 0) - goto alloc_fail5; + goto err_free_write_urbs; if (h.usb_cdc_country_functional_desc) { /* export the country data */ struct usb_cdc_country_functional_desc * cfd = @@ -1480,20 +1486,21 @@ skip_countries: acm->nb_index = 0; acm->nb_size = 0; - dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor); - acm->line.dwDTERate = cpu_to_le32(9600); acm->line.bDataBits = 8; acm_set_line(acm, &acm->line); - usb_driver_claim_interface(&acm_driver, data_interface, acm); - usb_set_intfdata(data_interface, acm); + if (!acm->combined_interfaces) { + rv = usb_driver_claim_interface(&acm_driver, data_interface, acm); + if (rv) + goto err_remove_files; + } tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, &control_interface->dev); if (IS_ERR(tty_dev)) { rv = PTR_ERR(tty_dev); - goto alloc_fail6; + goto err_release_data_interface; } if (quirks & CLEAR_HALT_CONDITIONS) { @@ -1501,32 +1508,39 @@ skip_countries: usb_clear_halt(usb_dev, acm->out); } + dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor); + return 0; -alloc_fail6: + +err_release_data_interface: + if (!acm->combined_interfaces) { + /* Clear driver data so that disconnect() returns early. */ + usb_set_intfdata(data_interface, NULL); + usb_driver_release_interface(&acm_driver, data_interface); + } +err_remove_files: if (acm->country_codes) { device_remove_file(&acm->control->dev, &dev_attr_wCountryCodes); device_remove_file(&acm->control->dev, &dev_attr_iCountryCodeRelDate); - kfree(acm->country_codes); } device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); -alloc_fail5: - usb_set_intfdata(intf, NULL); +err_free_write_urbs: for (i = 0; i < ACM_NW; i++) usb_free_urb(acm->wb[i].urb); -alloc_fail4: +err_free_read_urbs: for (i = 0; i < num_rx_buf; i++) usb_free_urb(acm->read_urbs[i]); acm_read_buffers_free(acm); usb_free_urb(acm->ctrlurb); -alloc_fail3: +err_free_write_buffers: acm_write_buffers_free(acm); -alloc_fail2: +err_free_ctrl_buffer: usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); -alloc_fail1: +err_put_port: tty_port_put(&acm->port); -alloc_fail: + return rv; } @@ -1540,8 +1554,14 @@ static void acm_disconnect(struct usb_interface *intf) if (!acm) return; - mutex_lock(&acm->mutex); acm->disconnected = true; + /* + * there is a circular dependency. acm_softint() can resubmit + * the URBs in error handling so we need to block any + * submission right away + */ + acm_poison_urbs(acm); + mutex_lock(&acm->mutex); if (acm->country_codes) { device_remove_file(&acm->control->dev, &dev_attr_wCountryCodes); @@ -1560,7 +1580,6 @@ static void acm_disconnect(struct usb_interface *intf) tty_kref_put(tty); } - acm_kill_urbs(acm); cancel_delayed_work_sync(&acm->dwork); tty_unregister_device(acm_tty_driver, acm->minor); @@ -1602,7 +1621,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) if (cnt) return 0; - acm_kill_urbs(acm); + acm_poison_urbs(acm); cancel_delayed_work_sync(&acm->dwork); acm->urbs_in_error_delay = 0; @@ -1615,6 +1634,7 @@ static int acm_resume(struct usb_interface *intf) struct urb *urb; int rv = 0; + acm_unpoison_urbs(acm); spin_lock_irq(&acm->write_lock); if (--acm->susp_count) @@ -1935,6 +1955,11 @@ static const struct usb_device_id acm_ids[] = { .driver_info = SEND_ZERO_PACKET, }, + /* Exclude Goodix Fingerprint Reader */ + { USB_DEVICE(0x27c6, 0x5395), + .driver_info = IGNORE_DEVICE, + }, + /* control interfaces without any protocol set */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_PROTO_NONE) }, diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index c9f6e9758288..f27b4aecff3d 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -494,16 +494,24 @@ static int usblp_release(struct inode *inode, struct file *file) /* No kernel lock - fine */ static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait) { - __poll_t ret; + struct usblp *usblp = file->private_data; + __poll_t ret = 0; unsigned long flags; - struct usblp *usblp = file->private_data; /* Should we check file->f_mode & FMODE_WRITE before poll_wait()? */ poll_wait(file, &usblp->rwait, wait); poll_wait(file, &usblp->wwait, wait); + + mutex_lock(&usblp->mut); + if (!usblp->present) + ret |= EPOLLHUP; + mutex_unlock(&usblp->mut); + spin_lock_irqsave(&usblp->lock, flags); - ret = ((usblp->bidir && usblp->rcomplete) ? EPOLLIN | EPOLLRDNORM : 0) | - ((usblp->no_paper || usblp->wcomplete) ? EPOLLOUT | EPOLLWRNORM : 0); + if (usblp->bidir && usblp->rcomplete) + ret |= EPOLLIN | EPOLLRDNORM; + if (usblp->no_paper || usblp->wcomplete) + ret |= EPOLLOUT | EPOLLWRNORM; spin_unlock_irqrestore(&usblp->lock, flags); return ret; } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 6ade3daf7858..76ac5d6555ae 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -498,6 +498,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, + /* Fibocom L850-GL LTE Modem */ + { USB_DEVICE(0x2cb7, 0x0007), .driver_info = + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 8f07b0516100..a566bb494e24 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -748,6 +748,38 @@ void usb_put_intf(struct usb_interface *intf) } EXPORT_SYMBOL_GPL(usb_put_intf); +/** + * usb_intf_get_dma_device - acquire a reference on the usb interface's DMA endpoint + * @intf: the usb interface + * + * While a USB device cannot perform DMA operations by itself, many USB + * controllers can. A call to usb_intf_get_dma_device() returns the DMA endpoint + * for the given USB interface, if any. The returned device structure must be + * released with put_device(). + * + * See also usb_get_dma_device(). + * + * Returns: A reference to the usb interface's DMA endpoint; or NULL if none + * exists. + */ +struct device *usb_intf_get_dma_device(struct usb_interface *intf) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct device *dmadev; + + if (!udev->bus) + return NULL; + + dmadev = get_device(udev->bus->sysdev); + if (!dmadev || !dmadev->dma_mask) { + put_device(dmadev); + return NULL; + } + + return dmadev; +} +EXPORT_SYMBOL_GPL(usb_intf_get_dma_device); + /* USB device locking * * USB devices and interfaces are locked using the semaphore in their diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index fc3269f5faf1..1a9789ec5847 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -4322,7 +4322,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) if (hsotg->op_state == OTG_STATE_B_PERIPHERAL) goto unlock; - if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL) + if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL || + hsotg->flags.b.port_connect_status == 0) goto skip_power_saving; /* @@ -5398,7 +5399,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg) dwc2_writel(hsotg, hprt0, HPRT0); /* Wait for the HPRT0.PrtSusp register field to be set */ - if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000)) + if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000)) dev_warn(hsotg->dev, "Suspend wasn't generated\n"); /* diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 3d3918a8d5fb..4c5c6972124a 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -120,6 +120,8 @@ static const struct property_entry dwc3_pci_intel_properties[] = { static const struct property_entry dwc3_pci_mrfld_properties[] = { PROPERTY_ENTRY_STRING("dr_mode", "otg"), PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), + PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"), + PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), {} }; diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 846a47be6df7..3de291ab951a 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -244,6 +244,9 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom) struct device *dev = qcom->dev; int ret; + if (has_acpi_companion(dev)) + return 0; + qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr"); if (IS_ERR(qcom->icc_path_ddr)) { dev_err(dev, "failed to get usb-ddr path: %ld\n", @@ -358,8 +361,10 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom) if (ret) dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret); + if (device_may_wakeup(qcom->dev)) + dwc3_qcom_enable_interrupts(qcom); + qcom->is_suspended = true; - dwc3_qcom_enable_interrupts(qcom); return 0; } @@ -372,7 +377,8 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom) if (!qcom->is_suspended) return 0; - dwc3_qcom_disable_interrupts(qcom); + if (device_may_wakeup(qcom->dev)) + dwc3_qcom_disable_interrupts(qcom); for (i = 0; i < qcom->num_clocks; i++) { ret = clk_prepare_enable(qcom->clks[i]); @@ -650,16 +656,19 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev) ret = of_platform_populate(np, NULL, NULL, dev); if (ret) { dev_err(dev, "failed to register dwc3 core - %d\n", ret); - return ret; + goto node_put; } qcom->dwc3 = of_find_device_by_node(dwc3_np); if (!qcom->dwc3) { + ret = -ENODEV; dev_err(dev, "failed to get dwc3 platform device\n"); - return -ENODEV; } - return 0; +node_put: + of_node_put(dwc3_np); + + return ret; } static struct platform_device * @@ -938,6 +947,8 @@ static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = { static const struct acpi_device_id dwc3_qcom_acpi_match[] = { { "QCOM2430", (unsigned long)&sdm845_acpi_pdata }, { "QCOM0304", (unsigned long)&sdm845_acpi_urs_pdata }, + { "QCOM0497", (unsigned long)&sdm845_acpi_urs_pdata }, + { "QCOM04A6", (unsigned long)&sdm845_acpi_pdata }, { }, }; MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index aebcf8ec0716..c7ef218e7a8c 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -783,8 +783,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) trace_dwc3_gadget_ep_disable(dep); - dwc3_remove_requests(dwc, dep); - /* make sure HW endpoint isn't stalled */ if (dep->flags & DWC3_EP_STALL) __dwc3_gadget_ep_set_halt(dep, 0, false); @@ -793,16 +791,18 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) reg &= ~DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); - dep->stream_capable = false; - dep->type = 0; - dep->flags = 0; - /* Clear out the ep descriptors for non-ep0 */ if (dep->number > 1) { dep->endpoint.comp_desc = NULL; dep->endpoint.desc = NULL; } + dwc3_remove_requests(dwc, dep); + + dep->stream_capable = false; + dep->type = 0; + dep->flags = 0; + return 0; } @@ -1617,7 +1617,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) { struct dwc3 *dwc = dep->dwc; - if (!dep->endpoint.desc || !dwc->pullups_connected) { + if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) { dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", dep->name); return -ESHUTDOWN; @@ -2083,7 +2083,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc) u32 reg; speed = dwc->gadget_max_speed; - if (speed > dwc->maximum_speed) + if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed) speed = dwc->maximum_speed; if (speed == USB_SPEED_SUPER_PLUS && @@ -2247,6 +2247,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) if (!is_on) { u32 count; + dwc->connected = false; /* * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a * Section 4.1.8 Table 4-7, it states that for a device-initiated @@ -2271,7 +2272,6 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) % dwc->ev_buf->length; } - dwc->connected = false; } else { __dwc3_gadget_start(dwc); } @@ -2523,6 +2523,7 @@ static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g, unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); + dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS; dwc->gadget_ssp_rate = rate; spin_unlock_irqrestore(&dwc->lock, flags); } @@ -3321,8 +3322,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) { u32 reg; - dwc->connected = true; - /* * WORKAROUND: DWC3 revisions <1.88a have an issue which * would cause a missing Disconnect Event if there's a @@ -3362,6 +3361,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) * transfers." */ dwc3_stop_active_transfers(dwc); + dwc->connected = true; reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= ~DWC3_DCTL_TSTCTRL_MASK; diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 0d56f33d63c2..15a607ccef8a 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -97,6 +97,8 @@ struct gadget_config_name { struct list_head list; }; +#define USB_MAX_STRING_WITH_NULL_LEN (USB_MAX_STRING_LEN+1) + static int usb_string_copy(const char *s, char **s_copy) { int ret; @@ -106,12 +108,16 @@ static int usb_string_copy(const char *s, char **s_copy) if (ret > USB_MAX_STRING_LEN) return -EOVERFLOW; - str = kstrdup(s, GFP_KERNEL); - if (!str) - return -ENOMEM; + if (copy) { + str = copy; + } else { + str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL); + if (!str) + return -ENOMEM; + } + strcpy(str, s); if (str[ret - 1] == '\n') str[ret - 1] = '\0'; - kfree(copy); *s_copy = str; return 0; } diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c index 00d346965f7a..560382e0a8f3 100644 --- a/drivers/usb/gadget/function/f_uac1.c +++ b/drivers/usb/gadget/function/f_uac1.c @@ -499,6 +499,7 @@ static void f_audio_disable(struct usb_function *f) uac1->as_out_alt = 0; uac1->as_in_alt = 0; + u_audio_stop_playback(&uac1->g_audio); u_audio_stop_capture(&uac1->g_audio); } diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 5d960b6603b6..6f03e944e0e3 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -478,7 +478,7 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, } max_size_bw = num_channels(chmask) * ssize * - DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))); + ((srate / (factor / (1 << (ep_desc->bInterval - 1)))) + 1); ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw, max_size_ep)); diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h index 3dfb460908fa..f558c3139ebe 100644 --- a/drivers/usb/gadget/function/u_ether_configfs.h +++ b/drivers/usb/gadget/function/u_ether_configfs.h @@ -182,12 +182,11 @@ out: \ size_t len) \ { \ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ - int ret; \ + int ret = -EINVAL; \ u8 val; \ \ mutex_lock(&opts->lock); \ - ret = sscanf(page, "%02hhx", &val); \ - if (ret > 0) { \ + if (sscanf(page, "%02hhx", &val) > 0) { \ opts->_n_ = val; \ ret = len; \ } \ diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c index 8d387e0e4d91..c80f9bd51b75 100644 --- a/drivers/usb/gadget/udc/amd5536udc_pci.c +++ b/drivers/usb/gadget/udc/amd5536udc_pci.c @@ -153,6 +153,11 @@ static int udc_pci_probe( pci_set_master(pdev); pci_try_set_mwi(pdev); + dev->phys_addr = resource; + dev->irq = pdev->irq; + dev->pdev = pdev; + dev->dev = &pdev->dev; + /* init dma pools */ if (use_dma) { retval = init_dma_pools(dev); @@ -160,11 +165,6 @@ static int udc_pci_probe( goto err_dma; } - dev->phys_addr = resource; - dev->irq = pdev->irq; - dev->pdev = pdev; - dev->dev = &pdev->dev; - /* general probing */ if (udc_probe(dev)) { retval = -ENODEV; diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c index f1ea51476add..1d3ebb07ccd4 100644 --- a/drivers/usb/gadget/udc/s3c2410_udc.c +++ b/drivers/usb/gadget/udc/s3c2410_udc.c @@ -1773,8 +1773,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev) udc_info = dev_get_platdata(&pdev->dev); base_addr = devm_platform_ioremap_resource(pdev, 0); - if (!base_addr) { - retval = -ENOMEM; + if (IS_ERR(base_addr)) { + retval = PTR_ERR(base_addr); goto err_mem; } diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index fe010cc61f19..2f27dc0d9c6b 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -397,6 +397,13 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_SPURIOUS_SUCCESS; if (mtk->lpm_support) xhci->quirks |= XHCI_LPM_SUPPORT; + + /* + * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream, + * and it's 3 when support it. + */ + if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4) + xhci->quirks |= XHCI_BROKEN_STREAMS; } /* called during probe() after chip reset completes */ @@ -548,7 +555,8 @@ static int xhci_mtk_probe(struct platform_device *pdev) if (ret) goto put_usb3_hcd; - if (HCC_MAX_PSA(xhci->hcc_params) >= 4) + if (HCC_MAX_PSA(xhci->hcc_params) >= 4 && + !(xhci->quirks & XHCI_BROKEN_STREAMS)) xhci->shared_hcd->can_do_streams = 1; ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 84da8406d5b4..5bbccc9a0179 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -66,6 +66,7 @@ #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 +#define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242 static const char hcd_name[] = "xhci_hcd"; @@ -276,11 +277,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) xhci->quirks |= XHCI_BROKEN_STREAMS; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) { xhci->quirks |= XHCI_TRUST_TX_LENGTH; + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; + } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI || - pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI)) + pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI || + pdev->device == PCI_DEVICE_ID_ASMEDIA_3242_XHCI)) xhci->quirks |= XHCI_NO_64BIT_SUPPORT; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && @@ -295,6 +299,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x9026) xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT; + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2 || + pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4)) + xhci->quirks |= XHCI_NO_SOFT_RETRY; + if (xhci->quirks & XHCI_RESET_ON_RESUME) xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Resetting on resume"); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 5e548a1c93ab..ce38076901e2 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2484,7 +2484,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, remaining = 0; break; case COMP_USB_TRANSACTION_ERROR: - if ((ep_ring->err_count++ > MAX_SOFT_RETRY) || + if (xhci->quirks & XHCI_NO_SOFT_RETRY || + (ep_ring->err_count++ > MAX_SOFT_RETRY) || le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) break; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index bd27bd670104..1975016f46bf 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -883,44 +883,42 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) xhci_set_cmd_ring_deq(xhci); } -static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) +/* + * Disable port wake bits if do_wakeup is not set. + * + * Also clear a possible internal port wake state left hanging for ports that + * detected termination but never successfully enumerated (trained to 0U). + * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done + * at enumeration clears this wake, force one here as well for unconnected ports + */ + +static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, + struct xhci_hub *rhub, + bool do_wakeup) { - struct xhci_port **ports; - int port_index; unsigned long flags; u32 t1, t2, portsc; + int i; spin_lock_irqsave(&xhci->lock, flags); - /* disable usb3 ports Wake bits */ - port_index = xhci->usb3_rhub.num_ports; - ports = xhci->usb3_rhub.ports; - while (port_index--) { - t1 = readl(ports[port_index]->addr); - portsc = t1; - t1 = xhci_port_state_to_neutral(t1); - t2 = t1 & ~PORT_WAKE_BITS; - if (t1 != t2) { - writel(t2, ports[port_index]->addr); - xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", - xhci->usb3_rhub.hcd->self.busnum, - port_index + 1, portsc, t2); - } - } + for (i = 0; i < rhub->num_ports; i++) { + portsc = readl(rhub->ports[i]->addr); + t1 = xhci_port_state_to_neutral(portsc); + t2 = t1; + + /* clear wake bits if do_wake is not set */ + if (!do_wakeup) + t2 &= ~PORT_WAKE_BITS; + + /* Don't touch csc bit if connected or connect change is set */ + if (!(portsc & (PORT_CSC | PORT_CONNECT))) + t2 |= PORT_CSC; - /* disable usb2 ports Wake bits */ - port_index = xhci->usb2_rhub.num_ports; - ports = xhci->usb2_rhub.ports; - while (port_index--) { - t1 = readl(ports[port_index]->addr); - portsc = t1; - t1 = xhci_port_state_to_neutral(t1); - t2 = t1 & ~PORT_WAKE_BITS; if (t1 != t2) { - writel(t2, ports[port_index]->addr); - xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", - xhci->usb2_rhub.hcd->self.busnum, - port_index + 1, portsc, t2); + writel(t2, rhub->ports[i]->addr); + xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", + rhub->hcd->self.busnum, i + 1, portsc, t2); } } spin_unlock_irqrestore(&xhci->lock, flags); @@ -983,8 +981,8 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) return -EINVAL; /* Clear root port wake on bits if wakeup not allowed. */ - if (!do_wakeup) - xhci_disable_port_wake_on_bits(xhci); + xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup); + xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup); if (!HCD_HW_ACCESSIBLE(hcd)) return 0; @@ -1088,6 +1086,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) struct usb_hcd *secondary_hcd; int retval = 0; bool comp_timer_running = false; + bool pending_portevent = false; if (!hcd->state) return 0; @@ -1226,13 +1225,22 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) done: if (retval == 0) { - /* Resume root hubs only when have pending events. */ - if (xhci_pending_portevent(xhci)) { + /* + * Resume roothubs only if there are pending events. + * USB 3 devices resend U3 LFPS wake after a 100ms delay if + * the first wake signalling failed, give it that chance. + */ + pending_portevent = xhci_pending_portevent(xhci); + if (!pending_portevent) { + msleep(120); + pending_portevent = xhci_pending_portevent(xhci); + } + + if (pending_portevent) { usb_hcd_resume_root_hub(xhci->shared_hcd); usb_hcd_resume_root_hub(hcd); } } - /* * If system is subject to the Quirk, Compliance Mode Timer needs to * be re-initialized Always after a system resume. Ports are subject diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index d41de5dc0452..ca822ad3b65b 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1891,6 +1891,7 @@ struct xhci_hcd { #define XHCI_SKIP_PHY_INIT BIT_ULL(37) #define XHCI_DISABLE_SPARSE BIT_ULL(38) #define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39) +#define XHCI_NO_SOFT_RETRY BIT_ULL(40) unsigned int num_active_eps; unsigned int limit_active_eps; diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 670e4d91e9ca..dcc88df72df4 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -117,7 +117,6 @@ MODULE_DEVICE_TABLE(usb, ld_usb_table); MODULE_AUTHOR("Michael Hund <mhund@ld-didactic.de>"); MODULE_DESCRIPTION("LD USB Driver"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("LD USB Devices"); /* All interrupt in transfers are collected in a ring buffer to * avoid racing conditions and get better performance of the driver. diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 1cd87729ba60..fc0457db62e1 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2004,10 +2004,14 @@ static void musb_pm_runtime_check_session(struct musb *musb) MUSB_DEVCTL_HR; switch (devctl & ~s) { case MUSB_QUIRK_B_DISCONNECT_99: - musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); - schedule_delayed_work(&musb->irq_work, - msecs_to_jiffies(1000)); - break; + if (musb->quirk_retries && !musb->flush_irq_work) { + musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + musb->quirk_retries--; + break; + } + fallthrough; case MUSB_QUIRK_B_INVALID_VBUS_91: if (musb->quirk_retries && !musb->flush_irq_work) { musb_dbg(musb, diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index e7334b7fb3a6..75fff2e4cbc6 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -746,6 +746,8 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, void usbhs_pipe_free(struct usbhs_pipe *pipe) { + usbhsp_pipe_select(pipe); + usbhsp_pipe_cfg_set(pipe, 0xFFFF, 0); usbhsp_put_pipe(pipe); } diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 8d997b71056f..2db917eab799 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, { USB_DEVICE(0x4348, 0x5523) }, + { USB_DEVICE(0x9986, 0x7523) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 9e1c609792fb..a373cd63b3a4 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -145,6 +145,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88D8) }, /* Acuity Brands nLight Air Adapter */ { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */ { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ @@ -201,6 +202,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ + { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */ + { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index a493670c06e6..68401adcffde 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -3003,26 +3003,32 @@ static int edge_startup(struct usb_serial *serial) response = -ENODEV; } - usb_free_urb(edge_serial->interrupt_read_urb); - kfree(edge_serial->interrupt_in_buffer); - - usb_free_urb(edge_serial->read_urb); - kfree(edge_serial->bulk_in_buffer); - - kfree(edge_serial); - - return response; + goto error; } /* start interrupt read for this edgeport this interrupt will * continue as long as the edgeport is connected */ response = usb_submit_urb(edge_serial->interrupt_read_urb, GFP_KERNEL); - if (response) + if (response) { dev_err(ddev, "%s - Error %d submitting control urb\n", __func__, response); + + goto error; + } } return response; + +error: + usb_free_urb(edge_serial->interrupt_read_urb); + kfree(edge_serial->interrupt_in_buffer); + + usb_free_urb(edge_serial->read_urb); + kfree(edge_serial->bulk_in_buffer); + + kfree(edge_serial); + + return response; } diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c index 483d07dee19d..0ca04906da4b 100644 --- a/drivers/usb/serial/xr_serial.c +++ b/drivers/usb/serial/xr_serial.c @@ -545,37 +545,13 @@ static void xr_close(struct usb_serial_port *port) static int xr_probe(struct usb_serial *serial, const struct usb_device_id *id) { - struct usb_driver *driver = serial->type->usb_driver; - struct usb_interface *control_interface; - int ret; - /* Don't bind to control interface */ if (serial->interface->cur_altsetting->desc.bInterfaceNumber == 0) return -ENODEV; - /* But claim the control interface during data interface probe */ - control_interface = usb_ifnum_to_if(serial->dev, 0); - if (!control_interface) - return -ENODEV; - - ret = usb_driver_claim_interface(driver, control_interface, NULL); - if (ret) { - dev_err(&serial->interface->dev, "Failed to claim control interface\n"); - return ret; - } - return 0; } -static void xr_disconnect(struct usb_serial *serial) -{ - struct usb_driver *driver = serial->type->usb_driver; - struct usb_interface *control_interface; - - control_interface = usb_ifnum_to_if(serial->dev, 0); - usb_driver_release_interface(driver, control_interface); -} - static const struct usb_device_id id_table[] = { { USB_DEVICE(0x04e2, 0x1410) }, /* XR21V141X */ { } @@ -590,7 +566,6 @@ static struct usb_serial_driver xr_device = { .id_table = id_table, .num_ports = 1, .probe = xr_probe, - .disconnect = xr_disconnect, .open = xr_open, .close = xr_close, .break_ctl = xr_break_ctl, diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 5eb895b19c55..f4304ce69350 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -656,6 +656,13 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) need_auto_sense = 1; } + /* Some devices (Kindle) require another command after SYNC CACHE */ + if ((us->fflags & US_FL_SENSE_AFTER_SYNC) && + srb->cmnd[0] == SYNCHRONIZE_CACHE) { + usb_stor_dbg(us, "-- sense after SYNC CACHE\n"); + need_auto_sense = 1; + } + /* * If we have a failure, we're going to do a REQUEST_SENSE * automatically. Note that we differentiate between a command diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 5732e9691f08..efa972be2ee3 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2212,6 +2212,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200, US_FL_NO_READ_DISC_INFO ), /* + * Reported by Matthias Schwarzott <zzam@gentoo.org> + * The Amazon Kindle treats SYNCHRONIZE CACHE as an indication that + * the host may be finished with it, and automatically ejects its + * emulated media unless it receives another command within one second. + */ +UNUSUAL_DEV( 0x1949, 0x0004, 0x0000, 0x9999, + "Amazon", + "Kindle", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_SENSE_AFTER_SYNC ), + +/* * Reported by Oliver Neukum <oneukum@suse.com> * This device morphes spontaneously into another device if the access * pattern of Windows isn't followed. Thus writable media would be dirty diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index be0b6469dd3d..ce7af398c7c1 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -942,6 +942,7 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv) port->supply_voltage = mv; port->current_limit = max_ma; + power_supply_changed(port->psy); if (port->tcpc->set_current_limit) ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); @@ -2928,6 +2929,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, port->pps_data.supported = false; port->usb_type = POWER_SUPPLY_USB_TYPE_PD; + power_supply_changed(port->psy); /* * Select the source PDO providing the most power which has a @@ -2952,6 +2954,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, port->pps_data.supported = true; port->usb_type = POWER_SUPPLY_USB_TYPE_PD_PPS; + power_supply_changed(port->psy); } continue; default: @@ -3109,6 +3112,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) port->pps_data.out_volt)); port->pps_data.op_curr = min(port->pps_data.max_curr, port->pps_data.op_curr); + power_supply_changed(port->psy); } return src_pdo; @@ -3344,6 +3348,7 @@ static int tcpm_set_charge(struct tcpm_port *port, bool charge) return ret; } port->vbus_charge = charge; + power_supply_changed(port->psy); return 0; } @@ -3523,6 +3528,7 @@ static void tcpm_reset_port(struct tcpm_port *port) port->try_src_count = 0; port->try_snk_count = 0; port->usb_type = POWER_SUPPLY_USB_TYPE_C; + power_supply_changed(port->psy); port->nr_sink_caps = 0; port->sink_cap_done = false; if (port->tcpc->enable_frs) @@ -5167,7 +5173,7 @@ static void tcpm_enable_frs_work(struct kthread_work *work) goto unlock; /* Send when the state machine is idle */ - if (port->state != SNK_READY || port->vdm_state != VDM_STATE_DONE || port->send_discover) + if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover) goto resched; port->upcoming_state = GET_SINK_CAP; @@ -5905,7 +5911,7 @@ static int tcpm_psy_set_prop(struct power_supply *psy, ret = -EINVAL; break; } - + power_supply_changed(port->psy); return ret; } @@ -6058,6 +6064,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) err = devm_tcpm_psy_register(port); if (err) goto out_role_sw_put; + power_supply_changed(port->psy); port->typec_port = typec_register_port(port->dev, &port->typec_caps); if (IS_ERR(port->typec_port)) { diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c index 6e6ef6317523..29bd1c5a283c 100644 --- a/drivers/usb/typec/tps6598x.c +++ b/drivers/usb/typec/tps6598x.c @@ -64,7 +64,6 @@ enum { struct tps6598x_rx_identity_reg { u8 status; struct usb_pd_identity identity; - u32 vdo[3]; } __packed; /* Standard Task return codes */ diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 2305d425e6c9..8f1de1fbbeed 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -46,6 +46,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a int sockfd = 0; struct socket *socket; int rv; + struct task_struct *tcp_rx = NULL; + struct task_struct *tcp_tx = NULL; if (!sdev) { dev_err(dev, "sdev is null\n"); @@ -69,23 +71,47 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a } socket = sockfd_lookup(sockfd, &err); - if (!socket) + if (!socket) { + dev_err(dev, "failed to lookup sock"); goto err; + } - sdev->ud.tcp_socket = socket; - sdev->ud.sockfd = sockfd; + if (socket->type != SOCK_STREAM) { + dev_err(dev, "Expecting SOCK_STREAM - found %d", + socket->type); + goto sock_err; + } + /* unlock and create threads and get tasks */ spin_unlock_irq(&sdev->ud.lock); + tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); + if (IS_ERR(tcp_rx)) { + sockfd_put(socket); + return -EINVAL; + } + tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); + if (IS_ERR(tcp_tx)) { + kthread_stop(tcp_rx); + sockfd_put(socket); + return -EINVAL; + } - sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud, - "stub_rx"); - sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud, - "stub_tx"); + /* get task structs now */ + get_task_struct(tcp_rx); + get_task_struct(tcp_tx); + /* lock and update sdev->ud state */ spin_lock_irq(&sdev->ud.lock); + sdev->ud.tcp_socket = socket; + sdev->ud.sockfd = sockfd; + sdev->ud.tcp_rx = tcp_rx; + sdev->ud.tcp_tx = tcp_tx; sdev->ud.status = SDEV_ST_USED; spin_unlock_irq(&sdev->ud.lock); + wake_up_process(sdev->ud.tcp_rx); + wake_up_process(sdev->ud.tcp_tx); + } else { dev_info(dev, "stub down\n"); @@ -100,6 +126,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a return count; +sock_err: + sockfd_put(socket); err: spin_unlock_irq(&sdev->ud.lock); return -EINVAL; diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 3209b5ddd30c..a20a8380ca0c 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -594,6 +594,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, pr_err("invalid port number %d\n", wIndex); goto error; } + if (wValue >= 32) + goto error; if (hcd->speed == HCD_USB3) { if ((vhci_hcd->port_status[rhport] & USB_SS_PORT_STAT_POWER) != 0) { diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index 96e5371dc335..c4b4256e5dad 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -312,6 +312,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, struct vhci *vhci; int err; unsigned long flags; + struct task_struct *tcp_rx = NULL; + struct task_struct *tcp_tx = NULL; /* * @rhport: port number of vhci_hcd @@ -349,12 +351,35 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, /* Extract socket from fd. */ socket = sockfd_lookup(sockfd, &err); - if (!socket) + if (!socket) { + dev_err(dev, "failed to lookup sock"); return -EINVAL; + } + if (socket->type != SOCK_STREAM) { + dev_err(dev, "Expecting SOCK_STREAM - found %d", + socket->type); + sockfd_put(socket); + return -EINVAL; + } + + /* create threads before locking */ + tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); + if (IS_ERR(tcp_rx)) { + sockfd_put(socket); + return -EINVAL; + } + tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); + if (IS_ERR(tcp_tx)) { + kthread_stop(tcp_rx); + sockfd_put(socket); + return -EINVAL; + } - /* now need lock until setting vdev status as used */ + /* get task structs now */ + get_task_struct(tcp_rx); + get_task_struct(tcp_tx); - /* begin a lock */ + /* now begin lock until setting vdev status set */ spin_lock_irqsave(&vhci->lock, flags); spin_lock(&vdev->ud.lock); @@ -364,6 +389,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, spin_unlock_irqrestore(&vhci->lock, flags); sockfd_put(socket); + kthread_stop_put(tcp_rx); + kthread_stop_put(tcp_tx); dev_err(dev, "port %d already used\n", rhport); /* @@ -382,6 +409,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, vdev->speed = speed; vdev->ud.sockfd = sockfd; vdev->ud.tcp_socket = socket; + vdev->ud.tcp_rx = tcp_rx; + vdev->ud.tcp_tx = tcp_tx; vdev->ud.status = VDEV_ST_NOTASSIGNED; usbip_kcov_handle_init(&vdev->ud); @@ -389,8 +418,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, spin_unlock_irqrestore(&vhci->lock, flags); /* end the lock */ - vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); - vdev->ud.tcp_tx = kthread_get_run(vhci_tx_loop, &vdev->ud, "vhci_tx"); + wake_up_process(vdev->ud.tcp_rx); + wake_up_process(vdev->ud.tcp_tx); rh_port_connect(vdev, speed); diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index 100f680c572a..7383a543c6d1 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c @@ -90,8 +90,9 @@ unlock: } static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor)); -static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *attr, - const char *in, size_t count) +static ssize_t usbip_sockfd_store(struct device *dev, + struct device_attribute *attr, + const char *in, size_t count) { struct vudc *udc = (struct vudc *) dev_get_drvdata(dev); int rv; @@ -100,6 +101,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a struct socket *socket; unsigned long flags; int ret; + struct task_struct *tcp_rx = NULL; + struct task_struct *tcp_tx = NULL; rv = kstrtoint(in, 0, &sockfd); if (rv != 0) @@ -138,24 +141,54 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a goto unlock_ud; } - udc->ud.tcp_socket = socket; + if (socket->type != SOCK_STREAM) { + dev_err(dev, "Expecting SOCK_STREAM - found %d", + socket->type); + ret = -EINVAL; + goto sock_err; + } + /* unlock and create threads and get tasks */ spin_unlock_irq(&udc->ud.lock); spin_unlock_irqrestore(&udc->lock, flags); - udc->ud.tcp_rx = kthread_get_run(&v_rx_loop, - &udc->ud, "vudc_rx"); - udc->ud.tcp_tx = kthread_get_run(&v_tx_loop, - &udc->ud, "vudc_tx"); + tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx"); + if (IS_ERR(tcp_rx)) { + sockfd_put(socket); + return -EINVAL; + } + tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx"); + if (IS_ERR(tcp_tx)) { + kthread_stop(tcp_rx); + sockfd_put(socket); + return -EINVAL; + } + + /* get task structs now */ + get_task_struct(tcp_rx); + get_task_struct(tcp_tx); + /* lock and update udc->ud state */ spin_lock_irqsave(&udc->lock, flags); spin_lock_irq(&udc->ud.lock); + + udc->ud.tcp_socket = socket; + udc->ud.tcp_rx = tcp_rx; + udc->ud.tcp_tx = tcp_tx; udc->ud.status = SDEV_ST_USED; + spin_unlock_irq(&udc->ud.lock); ktime_get_ts64(&udc->start_time); v_start_timer(udc); udc->connected = 1; + + spin_unlock_irqrestore(&udc->lock, flags); + + wake_up_process(udc->ud.tcp_rx); + wake_up_process(udc->ud.tcp_tx); + return count; + } else { if (!udc->connected) { dev_err(dev, "Device not connected"); @@ -177,6 +210,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a return count; +sock_err: + sockfd_put(socket); unlock_ud: spin_unlock_irq(&udc->ud.lock); unlock: diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index 7c8bbfcf6c3e..d555a6a5d1ba 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -431,8 +431,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, - dev, &ifc_vdpa_ops, - IFCVF_MAX_QUEUE_PAIRS * 2, NULL); + dev, &ifc_vdpa_ops, NULL); if (adapter == NULL) { IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); return -ENOMEM; @@ -456,7 +455,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) vf->vring[i].irq = -EINVAL; - ret = vdpa_register_device(&adapter->vdpa); + ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2); if (ret) { IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus"); goto err; diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 10e9b09932eb..71397fdafa6a 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1982,7 +1982,7 @@ static int mlx5v_probe(struct auxiliary_device *adev, max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, - 2 * mlx5_vdpa_max_qps(max_vqs), NULL); + NULL); if (IS_ERR(ndev)) return PTR_ERR(ndev); @@ -2009,7 +2009,7 @@ static int mlx5v_probe(struct auxiliary_device *adev, if (err) goto err_res; - err = vdpa_register_device(&mvdev->vdev); + err = vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs)); if (err) goto err_reg; diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index da67f07e24fd..5cffce67cab0 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -69,7 +69,6 @@ static void vdpa_release_dev(struct device *d) * initialized but before registered. * @parent: the parent device * @config: the bus operations that is supported by this device - * @nvqs: number of virtqueues supported by this device * @size: size of the parent structure that contains private data * @name: name of the vdpa device; optional. * @@ -81,7 +80,7 @@ static void vdpa_release_dev(struct device *d) */ struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, - int nvqs, size_t size, const char *name) + size_t size, const char *name) { struct vdpa_device *vdev; int err = -EINVAL; @@ -107,7 +106,6 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, vdev->index = err; vdev->config = config; vdev->features_valid = false; - vdev->nvqs = nvqs; if (name) err = dev_set_name(&vdev->dev, "%s", name); @@ -136,10 +134,12 @@ static int vdpa_name_match(struct device *dev, const void *data) return (strcmp(dev_name(&vdev->dev), data) == 0); } -static int __vdpa_register_device(struct vdpa_device *vdev) +static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) { struct device *dev; + vdev->nvqs = nvqs; + lockdep_assert_held(&vdpa_dev_mutex); dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); if (dev) { @@ -155,15 +155,16 @@ static int __vdpa_register_device(struct vdpa_device *vdev) * Caller must invoke this routine in the management device dev_add() * callback after setting up valid mgmtdev for this vdpa device. * @vdev: the vdpa device to be registered to vDPA bus + * @nvqs: number of virtqueues supported by this device * * Returns an error when fail to add device to vDPA bus */ -int _vdpa_register_device(struct vdpa_device *vdev) +int _vdpa_register_device(struct vdpa_device *vdev, int nvqs) { if (!vdev->mdev) return -EINVAL; - return __vdpa_register_device(vdev); + return __vdpa_register_device(vdev, nvqs); } EXPORT_SYMBOL_GPL(_vdpa_register_device); @@ -171,15 +172,16 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device); * vdpa_register_device - register a vDPA device * Callers must have a succeed call of vdpa_alloc_device() before. * @vdev: the vdpa device to be registered to vDPA bus + * @nvqs: number of virtqueues supported by this device * * Returns an error when fail to add to vDPA bus */ -int vdpa_register_device(struct vdpa_device *vdev) +int vdpa_register_device(struct vdpa_device *vdev, int nvqs) { int err; mutex_lock(&vdpa_dev_mutex); - err = __vdpa_register_device(vdev); + err = __vdpa_register_device(vdev, nvqs); mutex_unlock(&vdpa_dev_mutex); return err; } diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index d5942842432d..5b6b2f87d40c 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -235,7 +235,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr) ops = &vdpasim_config_ops; vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, - dev_attr->nvqs, dev_attr->name); + dev_attr->name); if (!vdpasim) goto err_alloc; diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c index d344c5b7c914..a1ab6163f7d1 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c @@ -110,8 +110,7 @@ out: static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config) { - struct virtio_net_config *net_config = - (struct virtio_net_config *)config; + struct virtio_net_config *net_config = config; net_config->mtu = cpu_to_vdpasim16(vdpasim, 1500); net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP); @@ -147,7 +146,7 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name) if (IS_ERR(simdev)) return PTR_ERR(simdev); - ret = _vdpa_register_device(&simdev->vdpa); + ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM); if (ret) goto reg_err; diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index 5533df91b257..67d0bf4efa16 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -21,8 +21,8 @@ config VFIO_VIRQFD menuconfig VFIO tristate "VFIO Non-Privileged userspace driver framework" - depends on IOMMU_API - select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64) + select IOMMU_API + select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) help VFIO provides a framework for secure userspace device drivers. See Documentation/driver-api/vfio.rst for more details. diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index ac3c1dd3edef..4abddbebd4b2 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -42,6 +42,6 @@ config VFIO_PCI_IGD config VFIO_PCI_NVLINK2 def_bool y - depends on VFIO_PCI && PPC_POWERNV + depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU help VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs diff --git a/drivers/vfio/platform/Kconfig b/drivers/vfio/platform/Kconfig index dc1a3c44f2c6..ab341108a0be 100644 --- a/drivers/vfio/platform/Kconfig +++ b/drivers/vfio/platform/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config VFIO_PLATFORM tristate "VFIO support for platform devices" - depends on VFIO && EVENTFD && (ARM || ARM64) + depends on VFIO && EVENTFD && (ARM || ARM64 || COMPILE_TEST) select VFIO_VIRQFD help Support for platform devices with VFIO. This is required to make @@ -12,7 +12,7 @@ config VFIO_PLATFORM config VFIO_AMBA tristate "VFIO support for AMBA devices" - depends on VFIO_PLATFORM && ARM_AMBA + depends on VFIO_PLATFORM && (ARM_AMBA || COMPILE_TEST) help Support for ARM AMBA devices with VFIO. This is required to make use of ARM AMBA devices present on the system using the VFIO diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 4bb162c1d649..45cbfd4879a5 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -189,7 +189,7 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, } static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, - dma_addr_t start, size_t size) + dma_addr_t start, u64 size) { struct rb_node *res = NULL; struct rb_node *node = iommu->dma_list.rb_node; @@ -739,6 +739,12 @@ out: ret = vfio_lock_acct(dma, lock_acct, false); unpin_out: + if (batch->size == 1 && !batch->offset) { + /* May be a VM_PFNMAP pfn, which the batch can't remember. */ + put_pfn(pfn, dma->prot); + batch->size = 0; + } + if (ret < 0) { if (pinned && !rsvd) { for (pfn = *pfn_base ; pinned ; pfn++, pinned--) @@ -785,7 +791,12 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, return -ENODEV; ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); - if (ret == 1 && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { + if (ret != 1) + goto out; + + ret = 0; + + if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { ret = vfio_lock_acct(dma, 1, true); if (ret) { put_pfn(*pfn_base, dma->prot); @@ -797,6 +808,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, } } +out: mmput(mm); return ret; } @@ -1288,7 +1300,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, int ret = -EINVAL, retries = 0; unsigned long pgshift; dma_addr_t iova = unmap->iova; - unsigned long size = unmap->size; + u64 size = unmap->size; bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL; bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR; struct rb_node *n, *first_n; @@ -1304,14 +1316,12 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, if (unmap_all) { if (iova || size) goto unlock; - size = SIZE_MAX; - } else if (!size || size & (pgsize - 1)) { + size = U64_MAX; + } else if (!size || size & (pgsize - 1) || + iova + size - 1 < iova || size > SIZE_MAX) { goto unlock; } - if (iova + size - 1 < iova || size > SIZE_MAX) - goto unlock; - /* When dirty tracking is enabled, allow only min supported pgsize */ if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) && (!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) { diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index ef688c8c0e0e..e0a27e336293 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -308,8 +308,10 @@ static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp) static void vhost_vdpa_config_put(struct vhost_vdpa *v) { - if (v->config_ctx) + if (v->config_ctx) { eventfd_ctx_put(v->config_ctx); + v->config_ctx = NULL; + } } static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp) @@ -329,8 +331,12 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp) if (!IS_ERR_OR_NULL(ctx)) eventfd_ctx_put(ctx); - if (IS_ERR(v->config_ctx)) - return PTR_ERR(v->config_ctx); + if (IS_ERR(v->config_ctx)) { + long ret = PTR_ERR(v->config_ctx); + + v->config_ctx = NULL; + return ret; + } v->vdpa->config->set_config_cb(v->vdpa, &cb); @@ -900,14 +906,10 @@ err: static void vhost_vdpa_clean_irq(struct vhost_vdpa *v) { - struct vhost_virtqueue *vq; int i; - for (i = 0; i < v->nvqs; i++) { - vq = &v->vqs[i]; - if (vq->call_ctx.producer.irq) - irq_bypass_unregister_producer(&vq->call_ctx.producer); - } + for (i = 0; i < v->nvqs; i++) + vhost_vdpa_unsetup_vq_irq(v, i); } static int vhost_vdpa_release(struct inode *inode, struct file *filep) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index a262e12c6dc2..5ccb0705beae 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -332,8 +332,8 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->error_ctx = NULL; vq->kick = NULL; vq->log_ctx = NULL; - vhost_reset_is_le(vq); vhost_disable_cross_endian(vq); + vhost_reset_is_le(vq); vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h index 551372f9b9aa..465f55beb97f 100644 --- a/drivers/video/fbdev/aty/atyfb.h +++ b/drivers/video/fbdev/aty/atyfb.h @@ -287,11 +287,8 @@ static inline void aty_st_8(int regindex, u8 val, const struct atyfb_par *par) #endif } -#if defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) || \ -defined (CONFIG_FB_ATY_BACKLIGHT) extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par); extern u32 aty_ld_lcd(int index, const struct atyfb_par *par); -#endif /* * DAC operations diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index e946903a86c2..1aef3d6ebd88 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -133,7 +133,7 @@ #define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args) #if defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_GENERIC_LCD) || \ -defined(CONFIG_FB_ATY_BACKLIGHT) +defined(CONFIG_FB_ATY_BACKLIGHT) || defined (CONFIG_PPC_PMAC) static const u32 lt_lcd_regs[] = { CNFG_PANEL_LG, LCD_GEN_CNTL_LG, @@ -175,8 +175,8 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par) return aty_ld_le32(LCD_DATA, par); } } -#else /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) \ - defined(CONFIG_FB_ATY_GENERIC_LCD) */ +#else /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) || + defined(CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_PPC_PMAC) */ void aty_st_lcd(int index, u32 val, const struct atyfb_par *par) { } @@ -184,7 +184,8 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par) { return 0; } -#endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */ +#endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) || + defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_PPC_PMAC) */ #ifdef CONFIG_FB_ATY_GENERIC_LCD /* diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 44a5cd2f54cc..3406067985b1 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1333,6 +1333,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode) ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; + if (!ops->cursor) + return; + ops->cursor(vc, info, mode, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); } diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index c8b0ae676809..4dc9077dd2ac 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -1031,7 +1031,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) PCI_DEVICE_ID_HYPERV_VIDEO, NULL); if (!pdev) { pr_err("Unable to find PCI Hyper-V video\n"); - kfree(info->apertures); return -ENODEV; } @@ -1129,7 +1128,6 @@ getmem_done: } else { pci_dev_put(pdev); } - kfree(info->apertures); return 0; @@ -1141,7 +1139,6 @@ err2: err1: if (!gen2vm) pci_dev_put(pdev); - kfree(info->apertures); return -ENOMEM; } diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c index 1f6b7c54a1a4..130e12b8652a 100644 --- a/drivers/virt/acrn/hsm.c +++ b/drivers/virt/acrn/hsm.c @@ -333,7 +333,7 @@ static long acrn_dev_ioctl(struct file *filp, unsigned int cmd, acrn_ioreq_request_clear(vm); break; case ACRN_IOCTL_PM_GET_CPU_STATE: - if (copy_from_user(&cstate_cmd, (void *)ioctl_param, + if (copy_from_user(&cstate_cmd, (void __user *)ioctl_param, sizeof(cstate_cmd))) return -EFAULT; @@ -404,6 +404,14 @@ fail_remove: } static DEVICE_ATTR_WO(remove_cpu); +static umode_t acrn_attr_visible(struct kobject *kobj, struct attribute *a, int n) +{ + if (a == &dev_attr_remove_cpu.attr) + return IS_ENABLED(CONFIG_HOTPLUG_CPU) ? a->mode : 0; + + return a->mode; +} + static struct attribute *acrn_attrs[] = { &dev_attr_remove_cpu.attr, NULL @@ -411,6 +419,7 @@ static struct attribute *acrn_attrs[] = { static struct attribute_group acrn_attr_group = { .attrs = acrn_attrs, + .is_visible = acrn_attr_visible, }; static const struct attribute_group *acrn_attr_groups[] = { diff --git a/drivers/virt/acrn/irqfd.c b/drivers/virt/acrn/irqfd.c index a8766d528e29..df5184979b28 100644 --- a/drivers/virt/acrn/irqfd.c +++ b/drivers/virt/acrn/irqfd.c @@ -112,7 +112,7 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args) { struct eventfd_ctx *eventfd = NULL; struct hsm_irqfd *irqfd, *tmp; - unsigned int events; + __poll_t events; struct fd f; int ret = 0; @@ -158,9 +158,9 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args) mutex_unlock(&vm->irqfds_lock); /* Check the pending event in this stage */ - events = f.file->f_op->poll(f.file, &irqfd->pt); + events = vfs_poll(f.file, &irqfd->pt); - if (events & POLLIN) + if (events & EPOLLIN) acrn_irqfd_inject(irqfd); fdput(f); diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 42e09cc1b8ac..4b15c00c0a0a 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -141,15 +141,14 @@ void virtio_config_changed(struct virtio_device *dev) } EXPORT_SYMBOL_GPL(virtio_config_changed); -void virtio_config_disable(struct virtio_device *dev) +static void virtio_config_disable(struct virtio_device *dev) { spin_lock_irq(&dev->config_lock); dev->config_enabled = false; spin_unlock_irq(&dev->config_lock); } -EXPORT_SYMBOL_GPL(virtio_config_disable); -void virtio_config_enable(struct virtio_device *dev) +static void virtio_config_enable(struct virtio_device *dev) { spin_lock_irq(&dev->config_lock); dev->config_enabled = true; @@ -158,7 +157,6 @@ void virtio_config_enable(struct virtio_device *dev) dev->config_change_pending = false; spin_unlock_irq(&dev->config_lock); } -EXPORT_SYMBOL_GPL(virtio_config_enable); void virtio_add_status(struct virtio_device *dev, unsigned int status) { diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index a286d22b6551..56128b9c46eb 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -548,8 +548,7 @@ static void virtio_mmio_release_dev(struct device *_d) { struct virtio_device *vdev = container_of(_d, struct virtio_device, dev); - struct virtio_mmio_device *vm_dev = - container_of(vdev, struct virtio_mmio_device, vdev); + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); struct platform_device *pdev = vm_dev->pdev; devm_kfree(&pdev->dev, vm_dev); diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c index 9867a3a936df..688b112e712b 100644 --- a/drivers/watchdog/cpu5wdt.c +++ b/drivers/watchdog/cpu5wdt.c @@ -273,7 +273,6 @@ module_exit(cpu5wdt_exit_module); MODULE_AUTHOR("Heiko Ronsdorf <hero@ihg.uni-duisburg.de>"); MODULE_DESCRIPTION("sma cpu5 watchdog driver"); -MODULE_SUPPORTED_DEVICE("sma cpu5 watchdog"); MODULE_LICENSE("GPL"); module_param_hw(port, int, ioport, 0); diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index 808eeb4779e4..1eafe0b4d71c 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -172,7 +172,6 @@ MODULE_PARM_DESC(wd2_timeout, "Default watchdog2 timeout in 1/10secs"); MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); MODULE_DESCRIPTION("Hardware watchdog driver for Sun Microsystems CP1400/1500"); MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("watchdog"); static void cpwd_writew(u16 val, void __iomem *addr) { diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c index 7008596a575f..747e346ed06c 100644 --- a/drivers/watchdog/riowd.c +++ b/drivers/watchdog/riowd.c @@ -46,7 +46,6 @@ MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_DESCRIPTION("Hardware watchdog driver for Sun RIO"); -MODULE_SUPPORTED_DEVICE("watchdog"); MODULE_LICENSE("GPL"); #define DRIVER_NAME "riowd" diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 41645fe6ad48..ea0efd290c37 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -50,11 +50,11 @@ config XEN_BALLOON_MEMORY_HOTPLUG SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'" -config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT +config XEN_MEMORY_HOTPLUG_LIMIT int "Hotplugged memory limit (in GiB) for a PV guest" default 512 depends on XEN_HAVE_PVMMU - depends on XEN_BALLOON_MEMORY_HOTPLUG + depends on MEMORY_HOTPLUG help Maxmium amount of memory (in GiB) that a PV guest can be expanded to when using memory hotplug. diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c index da87f3a1e351..b8f2f971c2f0 100644 --- a/drivers/xen/events/events_2l.c +++ b/drivers/xen/events/events_2l.c @@ -47,6 +47,11 @@ static unsigned evtchn_2l_max_channels(void) return EVTCHN_2L_NR_CHANNELS; } +static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu) +{ + clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); +} + static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu, unsigned int old_cpu) { @@ -72,12 +77,6 @@ static bool evtchn_2l_is_pending(evtchn_port_t port) return sync_test_bit(port, BM(&s->evtchn_pending[0])); } -static bool evtchn_2l_test_and_set_mask(evtchn_port_t port) -{ - struct shared_info *s = HYPERVISOR_shared_info; - return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0])); -} - static void evtchn_2l_mask(evtchn_port_t port) { struct shared_info *s = HYPERVISOR_shared_info; @@ -355,18 +354,27 @@ static void evtchn_2l_resume(void) EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); } +static int evtchn_2l_percpu_deinit(unsigned int cpu) +{ + memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); + + return 0; +} + static const struct evtchn_ops evtchn_ops_2l = { .max_channels = evtchn_2l_max_channels, .nr_channels = evtchn_2l_max_channels, + .remove = evtchn_2l_remove, .bind_to_cpu = evtchn_2l_bind_to_cpu, .clear_pending = evtchn_2l_clear_pending, .set_pending = evtchn_2l_set_pending, .is_pending = evtchn_2l_is_pending, - .test_and_set_mask = evtchn_2l_test_and_set_mask, .mask = evtchn_2l_mask, .unmask = evtchn_2l_unmask, .handle_events = evtchn_2l_handle_events, .resume = evtchn_2l_resume, + .percpu_deinit = evtchn_2l_percpu_deinit, }; void __init xen_evtchn_2l_init(void) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index adb7260e94b2..8236e2364eeb 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -98,13 +98,19 @@ struct irq_info { short refcnt; u8 spurious_cnt; u8 is_accounted; - enum xen_irq_type type; /* type */ + short type; /* type: IRQT_* */ + u8 mask_reason; /* Why is event channel masked */ +#define EVT_MASK_REASON_EXPLICIT 0x01 +#define EVT_MASK_REASON_TEMPORARY 0x02 +#define EVT_MASK_REASON_EOI_PENDING 0x04 + u8 is_active; /* Is event just being handled? */ unsigned irq; evtchn_port_t evtchn; /* event channel */ unsigned short cpu; /* cpu bound */ unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */ unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ u64 eoi_time; /* Time in jiffies when to EOI. */ + spinlock_t lock; union { unsigned short virq; @@ -154,6 +160,7 @@ static DEFINE_RWLOCK(evtchn_rwlock); * evtchn_rwlock * IRQ-desc lock * percpu eoi_list_lock + * irq_info->lock */ static LIST_HEAD(xen_irq_list_head); @@ -304,6 +311,8 @@ static int xen_irq_info_common_setup(struct irq_info *info, info->irq = irq; info->evtchn = evtchn; info->cpu = cpu; + info->mask_reason = EVT_MASK_REASON_EXPLICIT; + spin_lock_init(&info->lock); ret = set_evtchn_to_irq(evtchn, irq); if (ret < 0) @@ -377,6 +386,7 @@ static int xen_irq_info_pirq_setup(unsigned irq, static void xen_irq_info_cleanup(struct irq_info *info) { set_evtchn_to_irq(info->evtchn, -1); + xen_evtchn_port_remove(info->evtchn, info->cpu); info->evtchn = 0; channels_on_cpu_dec(info); } @@ -458,6 +468,34 @@ unsigned int cpu_from_evtchn(evtchn_port_t evtchn) return ret; } +static void do_mask(struct irq_info *info, u8 reason) +{ + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + + if (!info->mask_reason) + mask_evtchn(info->evtchn); + + info->mask_reason |= reason; + + spin_unlock_irqrestore(&info->lock, flags); +} + +static void do_unmask(struct irq_info *info, u8 reason) +{ + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + + info->mask_reason &= ~reason; + + if (!info->mask_reason) + unmask_evtchn(info->evtchn); + + spin_unlock_irqrestore(&info->lock, flags); +} + #ifdef CONFIG_X86 static bool pirq_check_eoi_map(unsigned irq) { @@ -604,7 +642,7 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious) } info->eoi_time = 0; - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_EOI_PENDING); } static void xen_irq_lateeoi_worker(struct work_struct *work) @@ -773,6 +811,12 @@ static void xen_evtchn_close(evtchn_port_t port) BUG(); } +static void event_handler_exit(struct irq_info *info) +{ + smp_store_release(&info->is_active, 0); + clear_evtchn(info->evtchn); +} + static void pirq_query_unmask(int irq) { struct physdev_irq_status_query irq_status; @@ -791,14 +835,15 @@ static void pirq_query_unmask(int irq) static void eoi_pirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; int rc = 0; if (!VALID_EVTCHN(evtchn)) return; - clear_evtchn(evtchn); + event_handler_exit(info); if (pirq_needs_eoi(data->irq)) { rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); @@ -849,7 +894,8 @@ static unsigned int __startup_pirq(unsigned int irq) goto err; out: - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_EXPLICIT); + eoi_pirq(irq_get_irq_data(irq)); return 0; @@ -876,7 +922,7 @@ static void shutdown_pirq(struct irq_data *data) if (!VALID_EVTCHN(evtchn)) return; - mask_evtchn(evtchn); + do_mask(info, EVT_MASK_REASON_EXPLICIT); xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); } @@ -1628,6 +1674,8 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) } info = info_for_irq(irq); + if (xchg_acquire(&info->is_active, 1)) + return; dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL; if (dev) @@ -1720,10 +1768,10 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) } /* Rebind an evtchn so that it gets delivered to a specific cpu */ -static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) +static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu) { struct evtchn_bind_vcpu bind_vcpu; - int masked; + evtchn_port_t evtchn = info ? info->evtchn : 0; if (!VALID_EVTCHN(evtchn)) return -1; @@ -1739,7 +1787,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) * Mask the event while changing the VCPU binding to prevent * it being delivered on an unexpected VCPU. */ - masked = test_and_set_mask(evtchn); + do_mask(info, EVT_MASK_REASON_TEMPORARY); /* * If this fails, it usually just indicates that we're dealing with a @@ -1749,8 +1797,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu, false); - if (!masked) - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_TEMPORARY); return 0; } @@ -1789,7 +1836,7 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, unsigned int tcpu = select_target_cpu(dest); int ret; - ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); + ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu); if (!ret) irq_data_update_effective_affinity(data, cpumask_of(tcpu)); @@ -1798,28 +1845,29 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, static void enable_dynirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (VALID_EVTCHN(evtchn)) - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_EXPLICIT); } static void disable_dynirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (VALID_EVTCHN(evtchn)) - mask_evtchn(evtchn); + do_mask(info, EVT_MASK_REASON_EXPLICIT); } static void ack_dynirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; - if (!VALID_EVTCHN(evtchn)) - return; - - clear_evtchn(evtchn); + if (VALID_EVTCHN(evtchn)) + event_handler_exit(info); } static void mask_ack_dynirq(struct irq_data *data) @@ -1828,18 +1876,39 @@ static void mask_ack_dynirq(struct irq_data *data) ack_dynirq(data); } +static void lateeoi_ack_dynirq(struct irq_data *data) +{ + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EOI_PENDING); + event_handler_exit(info); + } +} + +static void lateeoi_mask_ack_dynirq(struct irq_data *data) +{ + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EXPLICIT); + event_handler_exit(info); + } +} + static int retrigger_dynirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); - int masked; + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (!VALID_EVTCHN(evtchn)) return 0; - masked = test_and_set_mask(evtchn); + do_mask(info, EVT_MASK_REASON_TEMPORARY); set_evtchn(evtchn); - if (!masked) - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_TEMPORARY); return 1; } @@ -1938,10 +2007,11 @@ static void restore_cpu_ipis(unsigned int cpu) /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq) { - evtchn_port_t evtchn = evtchn_from_irq(irq); + struct irq_info *info = info_for_irq(irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (VALID_EVTCHN(evtchn)) - clear_evtchn(evtchn); + event_handler_exit(info); } EXPORT_SYMBOL(xen_clear_irq_pending); void xen_set_irq_pending(int irq) @@ -2053,8 +2123,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = { .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, - .irq_ack = mask_ack_dynirq, - .irq_mask_ack = mask_ack_dynirq, + .irq_ack = lateeoi_ack_dynirq, + .irq_mask_ack = lateeoi_mask_ack_dynirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index b234f1766810..ad9fe51d3fb3 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(evtchn_port_t port) return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } -static bool evtchn_fifo_test_and_set_mask(evtchn_port_t port) -{ - event_word_t *word = event_word_from_port(port); - return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); -} - static void evtchn_fifo_mask(evtchn_port_t port) { event_word_t *word = event_word_from_port(port); @@ -423,7 +417,6 @@ static const struct evtchn_ops evtchn_ops_fifo = { .clear_pending = evtchn_fifo_clear_pending, .set_pending = evtchn_fifo_set_pending, .is_pending = evtchn_fifo_is_pending, - .test_and_set_mask = evtchn_fifo_test_and_set_mask, .mask = evtchn_fifo_mask, .unmask = evtchn_fifo_unmask, .handle_events = evtchn_fifo_handle_events, diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h index 0a97c0549db7..4d3398eff9cd 100644 --- a/drivers/xen/events/events_internal.h +++ b/drivers/xen/events/events_internal.h @@ -14,13 +14,13 @@ struct evtchn_ops { unsigned (*nr_channels)(void); int (*setup)(evtchn_port_t port); + void (*remove)(evtchn_port_t port, unsigned int cpu); void (*bind_to_cpu)(evtchn_port_t evtchn, unsigned int cpu, unsigned int old_cpu); void (*clear_pending)(evtchn_port_t port); void (*set_pending)(evtchn_port_t port); bool (*is_pending)(evtchn_port_t port); - bool (*test_and_set_mask)(evtchn_port_t port); void (*mask)(evtchn_port_t port); void (*unmask)(evtchn_port_t port); @@ -54,6 +54,13 @@ static inline int xen_evtchn_port_setup(evtchn_port_t evtchn) return 0; } +static inline void xen_evtchn_port_remove(evtchn_port_t evtchn, + unsigned int cpu) +{ + if (evtchn_ops->remove) + evtchn_ops->remove(evtchn, cpu); +} + static inline void xen_evtchn_port_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu, unsigned int old_cpu) @@ -76,11 +83,6 @@ static inline bool test_evtchn(evtchn_port_t port) return evtchn_ops->is_pending(port); } -static inline bool test_and_set_mask(evtchn_port_t port) -{ - return evtchn_ops->test_and_set_mask(port); -} - static inline void mask_evtchn(evtchn_port_t port) { return evtchn_ops->mask(port); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 5447c5156b2e..f01d58c7a042 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -133,20 +133,26 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, if (NULL == add) return NULL; - add->grants = kvcalloc(count, sizeof(add->grants[0]), GFP_KERNEL); - add->map_ops = kvcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL); - add->unmap_ops = kvcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); - add->kmap_ops = kvcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL); - add->kunmap_ops = kvcalloc(count, - sizeof(add->kunmap_ops[0]), GFP_KERNEL); + add->grants = kvmalloc_array(count, sizeof(add->grants[0]), + GFP_KERNEL); + add->map_ops = kvmalloc_array(count, sizeof(add->map_ops[0]), + GFP_KERNEL); + add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]), + GFP_KERNEL); add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); if (NULL == add->grants || NULL == add->map_ops || NULL == add->unmap_ops || - NULL == add->kmap_ops || - NULL == add->kunmap_ops || NULL == add->pages) goto err; + if (use_ptemod) { + add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]), + GFP_KERNEL); + add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]), + GFP_KERNEL); + if (NULL == add->kmap_ops || NULL == add->kunmap_ops) + goto err; + } #ifdef CONFIG_XEN_GRANT_DMA_ALLOC add->dma_flags = dma_flags; @@ -183,10 +189,14 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, goto err; for (i = 0; i < count; i++) { - add->map_ops[i].handle = -1; - add->unmap_ops[i].handle = -1; - add->kmap_ops[i].handle = -1; - add->kunmap_ops[i].handle = -1; + add->grants[i].domid = DOMID_INVALID; + add->grants[i].ref = INVALID_GRANT_REF; + add->map_ops[i].handle = INVALID_GRANT_HANDLE; + add->unmap_ops[i].handle = INVALID_GRANT_HANDLE; + if (use_ptemod) { + add->kmap_ops[i].handle = INVALID_GRANT_HANDLE; + add->kunmap_ops[i].handle = INVALID_GRANT_HANDLE; + } } add->index = 0; @@ -274,7 +284,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) map->grants[pgnr].ref, map->grants[pgnr].domid); gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags, - -1 /* handle */); + INVALID_GRANT_HANDLE); return 0; } @@ -292,7 +302,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) if (!use_ptemod) { /* Note: it could already be mapped */ - if (map->map_ops[0].handle != -1) + if (map->map_ops[0].handle != INVALID_GRANT_HANDLE) return 0; for (i = 0; i < map->count; i++) { unsigned long addr = (unsigned long) @@ -301,7 +311,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) map->grants[i].ref, map->grants[i].domid); gnttab_set_unmap_op(&map->unmap_ops[i], addr, - map->flags, -1 /* handle */); + map->flags, INVALID_GRANT_HANDLE); } } else { /* @@ -327,13 +337,13 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) map->grants[i].ref, map->grants[i].domid); gnttab_set_unmap_op(&map->kunmap_ops[i], address, - flags, -1); + flags, INVALID_GRANT_HANDLE); } } pr_debug("map %d+%d\n", map->index, map->count); - err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, - map->pages, map->count); + err = gnttab_map_refs(map->map_ops, map->kmap_ops, map->pages, + map->count); for (i = 0; i < map->count; i++) { if (map->map_ops[i].status == GNTST_okay) @@ -385,7 +395,7 @@ static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset, pr_debug("unmap handle=%d st=%d\n", map->unmap_ops[offset+i].handle, map->unmap_ops[offset+i].status); - map->unmap_ops[offset+i].handle = -1; + map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; } return err; } @@ -401,13 +411,15 @@ static int unmap_grant_pages(struct gntdev_grant_map *map, int offset, * already unmapped some of the grants. Only unmap valid ranges. */ while (pages && !err) { - while (pages && map->unmap_ops[offset].handle == -1) { + while (pages && + map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) { offset++; pages--; } range = 0; while (range < pages) { - if (map->unmap_ops[offset+range].handle == -1) + if (map->unmap_ops[offset + range].handle == + INVALID_GRANT_HANDLE) break; range++; } |