diff options
Diffstat (limited to 'drivers')
627 files changed, 7799 insertions, 3872 deletions
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index 3f045b5953b2..a0c1a665dfc1 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c @@ -99,13 +99,12 @@ acpi_status acpi_ns_root_initialize(void) * just create and link the new node(s) here. */ new_node = - ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node)); + acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name)); if (!new_node) { status = AE_NO_MEMORY; goto unlock_and_exit; } - ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name); new_node->descriptor_type = ACPI_DESC_TYPE_NAMED; new_node->type = init_val->type; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index e6a5d997241c..cb8f70842249 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -9,6 +9,8 @@ #ifndef _ACPI_INTERNAL_H_ #define _ACPI_INTERNAL_H_ +#include <linux/idr.h> + #define PREFIX "ACPI: " int early_acpi_osi_init(void); @@ -96,9 +98,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context); extern struct list_head acpi_bus_id_list; +#define ACPI_MAX_DEVICE_INSTANCES 4096 + struct acpi_device_bus_id { const char *bus_id; - unsigned int instance_no; + struct ida instance_ida; struct list_head node; }; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index d93e400940a3..4e2d76b8b697 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -29,6 +29,7 @@ */ #ifdef CONFIG_X86 #include <asm/apic.h> +#include <asm/cpu.h> #endif #define _COMPONENT ACPI_PROCESSOR_COMPONENT @@ -541,6 +542,10 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) wait_for_freeze(); } else return -ENODEV; + +#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU) + cond_wakeup_cpu0(); +#endif } /* Never reached */ diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index a184529d8fa4..6efe7edd7b1e 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -479,9 +479,8 @@ static void acpi_device_del(struct acpi_device *device) list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) if (!strcmp(acpi_device_bus_id->bus_id, acpi_device_hid(device))) { - if (acpi_device_bus_id->instance_no > 0) - acpi_device_bus_id->instance_no--; - else { + ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no); + if (ida_is_empty(&acpi_device_bus_id->instance_ida)) { list_del(&acpi_device_bus_id->node); kfree_const(acpi_device_bus_id->bus_id); kfree(acpi_device_bus_id); @@ -631,6 +630,21 @@ static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id) return NULL; } +static int acpi_device_set_name(struct acpi_device *device, + struct acpi_device_bus_id *acpi_device_bus_id) +{ + struct ida *instance_ida = &acpi_device_bus_id->instance_ida; + int result; + + result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL); + if (result < 0) + return result; + + device->pnp.instance_no = result; + dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result); + return 0; +} + int acpi_device_add(struct acpi_device *device, void (*release)(struct device *)) { @@ -665,7 +679,9 @@ int acpi_device_add(struct acpi_device *device, acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device)); if (acpi_device_bus_id) { - acpi_device_bus_id->instance_no++; + result = acpi_device_set_name(device, acpi_device_bus_id); + if (result) + goto err_unlock; } else { acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id), GFP_KERNEL); @@ -681,9 +697,16 @@ int acpi_device_add(struct acpi_device *device, goto err_unlock; } + ida_init(&acpi_device_bus_id->instance_ida); + + result = acpi_device_set_name(device, acpi_device_bus_id); + if (result) { + kfree(acpi_device_bus_id); + goto err_unlock; + } + list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); } - dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); if (device->parent) list_add_tail(&device->node, &device->parent->children); @@ -1647,6 +1670,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, device_initialize(&device->dev); dev_set_uevent_suppress(&device->dev, true); acpi_init_coherency(device); + /* Assume there are unmet deps to start with. */ + device->dep_unmet = 1; } void acpi_device_add_finalize(struct acpi_device *device) @@ -1910,6 +1935,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev) { struct acpi_dep_data *dep; + adev->dep_unmet = 0; + mutex_lock(&acpi_dep_list_lock); list_for_each_entry(dep, &acpi_dep_list, node) { @@ -1957,7 +1984,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, return AE_CTRL_DEPTH; acpi_scan_init_hotplug(device); - if (!check_dep) + /* + * If check_dep is true at this point, the device has no dependencies, + * or the creation of the device object would have been postponed above. + */ + if (check_dep) + device->dep_unmet = 0; + else acpi_scan_dep_init(device); out: diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index e48690a006a4..9d581045acff 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -780,7 +780,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table, } /* - * acpi_table_init() + * acpi_locate_initial_tables() * * find RSDP, find and checksum SDT/XSDT. * checksum all tables, print SDT/XSDT @@ -788,7 +788,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table, * result: sdt_entry[] is initialized */ -int __init acpi_table_init(void) +int __init acpi_locate_initial_tables(void) { acpi_status status; @@ -803,9 +803,45 @@ int __init acpi_table_init(void) status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); if (ACPI_FAILURE(status)) return -EINVAL; - acpi_table_initrd_scan(); + return 0; +} + +void __init acpi_reserve_initial_tables(void) +{ + int i; + + for (i = 0; i < ACPI_MAX_TABLES; i++) { + struct acpi_table_desc *table_desc = &initial_tables[i]; + u64 start = table_desc->address; + u64 size = table_desc->length; + + if (!start || !size) + break; + + pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n", + table_desc->signature.ascii, start, start + size - 1); + + memblock_reserve(start, size); + } +} + +void __init acpi_table_init_complete(void) +{ + acpi_table_initrd_scan(); check_multiple_madt(); +} + +int __init acpi_table_init(void) +{ + int ret; + + ret = acpi_locate_initial_tables(); + if (ret) + return ret; + + acpi_table_init_complete(); + return 0; } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 811d298637cb..83cd4c95faf0 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -147,6 +147,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, }, { + .callback = video_detect_force_vendor, .ident = "Sony VPCEH3U1E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b574cce98dc3..422753d52244 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2054,7 +2054,7 @@ static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb) } submitted++; ATM_SKB(skb)->vcc = vcc; - tasklet_disable(&ENI_DEV(vcc->dev)->task); + tasklet_disable_in_atomic(&ENI_DEV(vcc->dev)->task); res = do_tx(skb); tasklet_enable(&ENI_DEV(vcc->dev)->task); if (res == enq_ok) return 0; diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index f43430e9dcee..24fd6f369ebe 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -470,12 +470,14 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf, char c; for (; count-- > 0; (*ppos)++, tmp++) { - if (!in_interrupt() && (((count + 1) & 0x1f) == 0)) + if (((count + 1) & 0x1f) == 0) { /* - * let's be a little nice with other processes - * that need some CPU + * charlcd_write() is invoked as a VFS->write() callback + * and as such it is always invoked from preemptible + * context and may sleep. */ - schedule(); + cond_resched(); + } if (get_user(c, tmp)) return -EFAULT; @@ -537,12 +539,8 @@ static void charlcd_puts(struct charlcd *lcd, const char *s) int count = strlen(s); for (; count-- > 0; tmp++) { - if (!in_interrupt() && (((count + 1) & 0x1f) == 0)) - /* - * let's be a little nice with other processes - * that need some CPU - */ - schedule(); + if (((count + 1) & 0x1f) == 0) + cond_resched(); charlcd_write_char(lcd, *tmp); } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 9179825ff646..37a5e5f8b221 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -97,6 +97,9 @@ static void deferred_probe_work_func(struct work_struct *work) get_device(dev); + kfree(dev->p->deferred_probe_reason); + dev->p->deferred_probe_reason = NULL; + /* * Drop the mutex while probing each device; the probe path may * manipulate the deferred list @@ -289,14 +292,16 @@ int driver_deferred_probe_check_state(struct device *dev) static void deferred_probe_timeout_work_func(struct work_struct *work) { - struct device_private *private, *p; + struct device_private *p; driver_deferred_probe_timeout = 0; driver_deferred_probe_trigger(); flush_work(&deferred_probe_work); - list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe) - dev_info(private->device, "deferred probe pending\n"); + mutex_lock(&deferred_probe_mutex); + list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe) + dev_info(p->device, "deferred probe pending\n"); + mutex_unlock(&deferred_probe_mutex); wake_up_all(&probe_timeout_waitqueue); } static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index a46a7e30881b..fe1dad68aee4 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev) return 0; } -static void rpm_put_suppliers(struct device *dev) +static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) { struct device_link *link; @@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev) device_links_read_lock_held()) { while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put(link->supplier); + pm_runtime_put_noidle(link->supplier); + + if (try_to_suspend) + pm_request_idle(link->supplier); } } +static void rpm_put_suppliers(struct device *dev) +{ + __rpm_put_suppliers(dev, true); +} + +static void rpm_suspend_suppliers(struct device *dev) +{ + struct device_link *link; + int idx = device_links_read_lock(); + + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, + device_links_read_lock_held()) + pm_request_idle(link->supplier); + + device_links_read_unlock(idx); +} + /** * __rpm_callback - Run a given runtime PM callback for a given device. * @cb: Runtime PM callback to run. @@ -344,8 +364,10 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) idx = device_links_read_lock(); retval = rpm_get_suppliers(dev); - if (retval) + if (retval) { + rpm_put_suppliers(dev); goto fail; + } device_links_read_unlock(idx); } @@ -368,9 +390,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) || (dev->power.runtime_status == RPM_RESUMING && retval))) { idx = device_links_read_lock(); - fail: - rpm_put_suppliers(dev); + __rpm_put_suppliers(dev, false); +fail: device_links_read_unlock(idx); } @@ -642,8 +664,11 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto out; } + if (dev->power.irq_safe) + goto out; + /* Maybe the parent is now able to suspend. */ - if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { + if (parent && !parent->power.ignore_children) { spin_unlock(&dev->power.lock); spin_lock(&parent->power.lock); @@ -652,6 +677,14 @@ static int rpm_suspend(struct device *dev, int rpmflags) spin_lock(&dev->power.lock); } + /* Maybe the suppliers are now able to suspend. */ + if (dev->power.links_count > 0) { + spin_unlock_irq(&dev->power.lock); + + rpm_suspend_suppliers(dev); + + spin_lock_irq(&dev->power.lock); + } out: trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); @@ -1657,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev) device_links_read_lock_held()) if (link->flags & DL_FLAG_PM_RUNTIME) { link->supplier_preactivated = true; - refcount_inc(&link->rpm_active); pm_runtime_get_sync(link->supplier); + refcount_inc(&link->rpm_active); } device_links_read_unlock(idx); @@ -1671,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev) void pm_runtime_put_suppliers(struct device *dev) { struct device_link *link; + unsigned long flags; + bool put; int idx; idx = device_links_read_lock(); @@ -1679,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev) device_links_read_lock_held()) if (link->supplier_preactivated) { link->supplier_preactivated = false; - if (refcount_dec_not_one(&link->rpm_active)) + spin_lock_irqsave(&dev->power.lock, flags); + put = pm_runtime_status_suspended(dev) && + refcount_dec_not_one(&link->rpm_active); + spin_unlock_irqrestore(&dev->power.lock, flags); + if (put) pm_runtime_put(link->supplier); } diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index d6c821d48090..51bfd7737552 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1369,10 +1369,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, } if (dev->zoned) - cmd->error = null_process_zoned_cmd(cmd, op, - sector, nr_sectors); + sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors); else - cmd->error = null_process_cmd(cmd, op, sector, nr_sectors); + sts = null_process_cmd(cmd, op, sector, nr_sectors); + + /* Do not overwrite errors (e.g. timeout errors) */ + if (cmd->error == BLK_STS_OK) + cmd->error = sts; out: nullb_complete_cmd(cmd); @@ -1451,8 +1454,20 @@ static bool should_requeue_request(struct request *rq) static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) { + struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); + pr_info("rq %p timed out\n", rq); - blk_mq_complete_request(rq); + + /* + * If the device is marked as blocking (i.e. memory backed or zoned + * device), the submission path may be blocked waiting for resources + * and cause real timeouts. For these real timeouts, the submission + * path will complete the request using blk_mq_complete_request(). + * Only fake timeouts need to execute blk_mq_complete_request() here. + */ + cmd->error = BLK_STS_TIMEOUT; + if (cmd->fake_timeout) + blk_mq_complete_request(rq); return BLK_EH_DONE; } @@ -1473,6 +1488,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, cmd->rq = bd->rq; cmd->error = BLK_STS_OK; cmd->nq = nq; + cmd->fake_timeout = should_timeout_request(bd->rq); blk_mq_start_request(bd->rq); @@ -1489,7 +1505,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; } } - if (should_timeout_request(bd->rq)) + if (cmd->fake_timeout) return BLK_STS_OK; return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h index 83504f3cc9d6..4876d5adb12d 100644 --- a/drivers/block/null_blk/null_blk.h +++ b/drivers/block/null_blk/null_blk.h @@ -22,6 +22,7 @@ struct nullb_cmd { blk_status_t error; struct nullb_queue *nq; struct hrtimer timer; + bool fake_timeout; }; struct nullb_queue { diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 1cdf09ff67b6..14e452896d04 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -891,7 +891,7 @@ next: out: for (i = last_map; i < num; i++) { /* Don't zap current batch's valid persistent grants. */ - if(i >= last_map + segs_to_map) + if(i >= map_until) pages[i]->persistent_gnt = NULL; pages[i]->handle = BLKBACK_INVALID_HANDLE; } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 52683fd22e05..5cbfbd948f67 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -4849,8 +4849,8 @@ static int btusb_probe(struct usb_interface *intf, data->diag = NULL; } - if (!enable_autosuspend) - usb_disable_autosuspend(data->udev); + if (enable_autosuspend) + usb_enable_autosuspend(data->udev); err = hci_register_dev(hdev); if (err < 0) @@ -4910,9 +4910,6 @@ static void btusb_disconnect(struct usb_interface *intf) gpiod_put(data->reset_gpio); hci_free_dev(hdev); - - if (!enable_autosuspend) - usb_enable_autosuspend(data->udev); } #ifdef CONFIG_PM diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index b20fdcbd035b..fd87a59837fa 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -2,7 +2,7 @@ /* * Turris Mox module configuration bus driver * - * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> + * Copyright (C) 2019 Marek BehĂșn <kabel@kernel.org> */ #include <dt-bindings/bus/moxtet.h> @@ -879,6 +879,6 @@ static void __exit moxtet_exit(void) } module_exit(moxtet_exit); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index dd9e7343a5e3..ea0424922de7 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -618,7 +618,7 @@ mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end) * This part of the memory is above 4 GB, so we don't * care for the MBus bridge hole. */ - if (reg_start >= 0x100000000ULL) + if ((u64)reg_start >= 0x100000000ULL) continue; /* diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c index b040447575ad..dcfb32ee5cb6 100644 --- a/drivers/bus/omap_l3_noc.c +++ b/drivers/bus/omap_l3_noc.c @@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev) */ l3->debug_irq = platform_get_irq(pdev, 0); ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler, - 0x0, "l3-dbg-irq", l3); + IRQF_NO_THREAD, "l3-dbg-irq", l3); if (ret) { dev_err(l3->dev, "request_irq failed for %d\n", l3->debug_irq); @@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev) l3->app_irq = platform_get_irq(pdev, 1); ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler, - 0x0, "l3-app-irq", l3); + IRQF_NO_THREAD, "l3-app-irq", l3); if (ret) dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index a27d751cf219..3d74f237f005 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -3053,7 +3053,9 @@ static int sysc_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - reset_control_assert(ddata->rsts); + + if (!reset_control_status(ddata->rsts)) + reset_control_assert(ddata->rsts); unprepare: sysc_unprepare(ddata); diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index a086dd34f932..4f501e4842ab 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -125,7 +125,7 @@ config AGP_HP_ZX1 config AGP_PARISC tristate "HP Quicksilver AGP support" - depends on AGP && PARISC && 64BIT + depends on AGP && PARISC && 64BIT && IOMMU_SBA help This option gives you AGP GART support for the HP Quicksilver AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000 diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c index 410b50b05e21..5b7ca0416490 100644 --- a/drivers/char/hw_random/ba431-rng.c +++ b/drivers/char/hw_random/ba431-rng.c @@ -170,7 +170,6 @@ static int ba431_trng_init(struct hwrng *rng) static int ba431_trng_probe(struct platform_device *pdev) { struct ba431_trng *ba431; - struct resource *res; int ret; ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL); @@ -179,8 +178,7 @@ static int ba431_trng_probe(struct platform_device *pdev) ba431->dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ba431->base = devm_ioremap_resource(&pdev->dev, res); + ba431->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ba431->base)) return PTR_ERR(ba431->base); @@ -193,7 +191,7 @@ static int ba431_trng_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ba431); - ret = hwrng_register(&ba431->rng); + ret = devm_hwrng_register(&pdev->dev, &ba431->rng); if (ret) { dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret); return ret; @@ -204,15 +202,6 @@ static int ba431_trng_probe(struct platform_device *pdev) return 0; } -static int ba431_trng_remove(struct platform_device *pdev) -{ - struct ba431_trng *ba431 = platform_get_drvdata(pdev); - - hwrng_unregister(&ba431->rng); - - return 0; -} - static const struct of_device_id ba431_trng_dt_ids[] = { { .compatible = "silex-insight,ba431-rng", .data = NULL }, { /* sentinel */ } @@ -225,7 +214,6 @@ static struct platform_driver ba431_trng_driver = { .of_match_table = ba431_trng_dt_ids, }, .probe = ba431_trng_probe, - .remove = ba431_trng_remove, }; module_platform_driver(ba431_trng_driver); diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index 1a7c43b43c6b..e7dd457e9b22 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -13,6 +13,7 @@ #include <linux/platform_device.h> #include <linux/printk.h> #include <linux/clk.h> +#include <linux/reset.h> #define RNG_CTRL 0x0 #define RNG_STATUS 0x4 @@ -32,6 +33,7 @@ struct bcm2835_rng_priv { void __iomem *base; bool mask_interrupts; struct clk *clk; + struct reset_control *reset; }; static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng) @@ -88,11 +90,13 @@ static int bcm2835_rng_init(struct hwrng *rng) int ret = 0; u32 val; - if (!IS_ERR(priv->clk)) { - ret = clk_prepare_enable(priv->clk); - if (ret) - return ret; - } + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + ret = reset_control_reset(priv->reset); + if (ret) + return ret; if (priv->mask_interrupts) { /* mask the interrupt */ @@ -115,8 +119,7 @@ static void bcm2835_rng_cleanup(struct hwrng *rng) /* disable rng hardware */ rng_writel(priv, 0, RNG_CTRL); - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); } struct bcm2835_rng_of_data { @@ -155,9 +158,13 @@ static int bcm2835_rng_probe(struct platform_device *pdev) return PTR_ERR(priv->base); /* Clock is optional on most platforms */ - priv->clk = devm_clk_get(dev, NULL); - if (PTR_ERR(priv->clk) == -EPROBE_DEFER) - return -EPROBE_DEFER; + priv->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->reset = devm_reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(priv->reset)) + return PTR_ERR(priv->reset); priv->rng.name = pdev->name; priv->rng.init = bcm2835_rng_init; diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index 7a293f2147a0..302ffa354c2f 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -486,7 +486,6 @@ static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata) static int cctrng_probe(struct platform_device *pdev) { - struct resource *req_mem_cc_regs = NULL; struct cctrng_drvdata *drvdata; struct device *dev = &pdev->dev; int rc = 0; @@ -510,27 +509,16 @@ static int cctrng_probe(struct platform_device *pdev) drvdata->circ.buf = (char *)drvdata->data_buf; - /* Get device resources */ - /* First CC registers space */ - req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - /* Map registers space */ - drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs); + drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(drvdata->cc_base)) { dev_err(dev, "Failed to ioremap registers"); return PTR_ERR(drvdata->cc_base); } - dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name, - req_mem_cc_regs); - dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n", - &req_mem_cc_regs->start, drvdata->cc_base); - /* Then IRQ */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "Failed getting IRQ resource\n"); + if (irq < 0) return irq; - } /* parse sampling rate from device tree */ rc = cc_trng_parse_sampling_ratio(drvdata); @@ -585,7 +573,7 @@ static int cctrng_probe(struct platform_device *pdev) atomic_set(&drvdata->pending_hw, 1); /* registration of the hwrng device */ - rc = hwrng_register(&drvdata->rng); + rc = devm_hwrng_register(dev, &drvdata->rng); if (rc) { dev_err(dev, "Could not register hwrng device.\n"); goto post_pm_err; @@ -618,8 +606,6 @@ static int cctrng_remove(struct platform_device *pdev) dev_dbg(dev, "Releasing cctrng resources...\n"); - hwrng_unregister(&drvdata->rng); - cc_trng_pm_fini(drvdata); cc_trng_clk_fini(drvdata); diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 8c1c47dd9f46..adb3c2bd7783 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -396,7 +396,7 @@ static ssize_t hwrng_attr_selected_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user); + return sysfs_emit(buf, "%d\n", cur_rng_set_by_user); } static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c index eb7db27f9f19..d740b8814bf3 100644 --- a/drivers/char/hw_random/intel-rng.c +++ b/drivers/char/hw_random/intel-rng.c @@ -25,13 +25,13 @@ */ #include <linux/hw_random.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/stop_machine.h> #include <linux/delay.h> #include <linux/slab.h> -#include <asm/io.h> #define PFX KBUILD_MODNAME ": " diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 5cc5fc504968..cede9f159102 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -30,8 +30,7 @@ #include <linux/of_address.h> #include <linux/interrupt.h> #include <linux/clk.h> - -#include <asm/io.h> +#include <linux/io.h> #define RNG_REG_STATUS_RDY (1 << 0) @@ -378,16 +377,13 @@ MODULE_DEVICE_TABLE(of, omap_rng_of_match); static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, struct platform_device *pdev) { - const struct of_device_id *match; struct device *dev = &pdev->dev; int irq, err; - match = of_match_device(of_match_ptr(omap_rng_of_match), dev); - if (!match) { - dev_err(dev, "no compatible OF match\n"); - return -EINVAL; - } - priv->pdata = match->data; + priv->pdata = of_device_get_match_data(dev); + if (!priv->pdata) + return -ENODEV; + if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") || of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) { diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index e8210c1715cf..99c8bd0859a1 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c @@ -96,7 +96,7 @@ static int pic32_rng_probe(struct platform_device *pdev) priv->rng.name = pdev->name; priv->rng.read = pic32_rng_read; - ret = hwrng_register(&priv->rng); + ret = devm_hwrng_register(&pdev->dev, &priv->rng); if (ret) goto err_register; @@ -113,7 +113,6 @@ static int pic32_rng_remove(struct platform_device *pdev) { struct pic32_rng *rng = platform_get_drvdata(pdev); - hwrng_unregister(&rng->rng); writel(0, rng->base + RNGCON); clk_disable_unprepare(rng->clk); return 0; diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c index 7bdab8c8a6a8..2a9fea72b2e0 100644 --- a/drivers/char/hw_random/xiphera-trng.c +++ b/drivers/char/hw_random/xiphera-trng.c @@ -63,14 +63,12 @@ static int xiphera_trng_probe(struct platform_device *pdev) int ret; struct xiphera_trng *trng; struct device *dev = &pdev->dev; - struct resource *res; trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL); if (!trng) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - trng->mem = devm_ioremap_resource(dev, res); + trng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(trng->mem)) return PTR_ERR(trng->mem); diff --git a/drivers/char/random.c b/drivers/char/random.c index 0fe9e200e4c8..605969ed0f96 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -500,7 +500,6 @@ struct entropy_store { unsigned short add_ptr; unsigned short input_rotate; int entropy_count; - unsigned int initialized:1; unsigned int last_data_init:1; __u8 last_data[EXTRACT_SIZE]; }; @@ -660,7 +659,7 @@ static void process_random_ready_list(void) */ static void credit_entropy_bits(struct entropy_store *r, int nbits) { - int entropy_count, orig, has_initialized = 0; + int entropy_count, orig; const int pool_size = r->poolinfo->poolfracbits; int nfrac = nbits << ENTROPY_SHIFT; @@ -717,23 +716,14 @@ retry: if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) goto retry; - if (has_initialized) { - r->initialized = 1; - kill_fasync(&fasync, SIGIO, POLL_IN); - } - trace_credit_entropy_bits(r->name, nbits, entropy_count >> ENTROPY_SHIFT, _RET_IP_); if (r == &input_pool) { int entropy_bits = entropy_count >> ENTROPY_SHIFT; - if (crng_init < 2) { - if (entropy_bits < 128) - return; + if (crng_init < 2 && entropy_bits >= 128) crng_reseed(&primary_crng, r); - entropy_bits = ENTROPY_BITS(r); - } } } @@ -819,7 +809,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng) static void __maybe_unused crng_initialize_secondary(struct crng_state *crng) { - memcpy(&crng->state[0], "expand 32-byte k", 16); + chacha_init_consts(crng->state); _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); crng_init_try_arch(crng); crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; @@ -827,7 +817,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng) static void __init crng_initialize_primary(struct crng_state *crng) { - memcpy(&crng->state[0], "expand 32-byte k", 16); + chacha_init_consts(crng->state); _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); if (crng_init_try_arch_early(crng) && trust_cpu) { invalidate_batched_entropy(); @@ -1372,8 +1362,7 @@ retry: } /* - * This function does the actual extraction for extract_entropy and - * extract_entropy_user. + * This function does the actual extraction for extract_entropy. * * Note: we assume that .poolwords is a multiple of 16 words. */ diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c index 3633ed70f48f..1b18ce5ebab1 100644 --- a/drivers/char/tpm/eventlog/acpi.c +++ b/drivers/char/tpm/eventlog/acpi.c @@ -41,6 +41,27 @@ struct acpi_tcpa { }; }; +/* Check that the given log is indeed a TPM2 log. */ +static bool tpm_is_tpm2_log(void *bios_event_log, u64 len) +{ + struct tcg_efi_specid_event_head *efispecid; + struct tcg_pcr_event *event_header; + int n; + + if (len < sizeof(*event_header)) + return false; + len -= sizeof(*event_header); + event_header = bios_event_log; + + if (len < sizeof(*efispecid)) + return false; + efispecid = (struct tcg_efi_specid_event_head *)event_header->event; + + n = memcmp(efispecid->signature, TCG_SPECID_SIG, + sizeof(TCG_SPECID_SIG)); + return n == 0; +} + /* read binary bios log */ int tpm_read_log_acpi(struct tpm_chip *chip) { @@ -52,6 +73,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip) struct acpi_table_tpm2 *tbl; struct acpi_tpm2_phy *tpm2_phy; int format; + int ret; log = &chip->log; @@ -112,6 +134,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip) log->bios_event_log_end = log->bios_event_log + len; + ret = -EIO; virt = acpi_os_map_iomem(start, len); if (!virt) goto err; @@ -119,11 +142,19 @@ int tpm_read_log_acpi(struct tpm_chip *chip) memcpy_fromio(log->bios_event_log, virt, len); acpi_os_unmap_iomem(virt, len); + + if (chip->flags & TPM_CHIP_FLAG_TPM2 && + !tpm_is_tpm2_log(log->bios_event_log, len)) { + /* try EFI log next */ + ret = -ENODEV; + goto err; + } + return format; err: kfree(log->bios_event_log); log->bios_event_log = NULL; - return -EIO; + return ret; } diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c index 7460f230bae4..8512ec76d526 100644 --- a/drivers/char/tpm/eventlog/common.c +++ b/drivers/char/tpm/eventlog/common.c @@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip) int log_version; int rc = 0; + if (chip->flags & TPM_CHIP_FLAG_VIRTUAL) + return; + rc = tpm_read_log(chip); if (rc < 0) return; diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c index 35229e5143ca..e6cb9d525e30 100644 --- a/drivers/char/tpm/eventlog/efi.c +++ b/drivers/char/tpm/eventlog/efi.c @@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip) { struct efi_tcg2_final_events_table *final_tbl = NULL; + int final_events_log_size = efi_tpm_final_log_size; struct linux_efi_tpm_eventlog *log_tbl; struct tpm_bios_log *log; u32 log_size; @@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip) ret = tpm_log_version; if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR || - efi_tpm_final_log_size == 0 || + final_events_log_size == 0 || tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) goto out; final_tbl = memremap(efi.tpm_final_log, - sizeof(*final_tbl) + efi_tpm_final_log_size, + sizeof(*final_tbl) + final_events_log_size, MEMREMAP_WB); if (!final_tbl) { pr_err("Could not map UEFI TPM final log\n"); @@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip) goto out; } - efi_tpm_final_log_size -= log_tbl->final_events_preboot_size; + /* + * The 'final events log' size excludes the 'final events preboot log' + * at its beginning. + */ + final_events_log_size -= log_tbl->final_events_preboot_size; + /* + * Allocate memory for the 'combined log' where we will append the + * 'final events log' to. + */ tmp = krealloc(log->bios_event_log, - log_size + efi_tpm_final_log_size, + log_size + final_events_log_size, GFP_KERNEL); if (!tmp) { kfree(log->bios_event_log); @@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip) log->bios_event_log = tmp; /* - * Copy any of the final events log that didn't also end up in the - * main log. Events can be logged in both if events are generated + * Append any of the 'final events log' that didn't also end up in the + * 'main log'. Events can be logged in both if events are generated * between GetEventLog() and ExitBootServices(). */ memcpy((void *)log->bios_event_log + log_size, final_tbl->events + log_tbl->final_events_preboot_size, - efi_tpm_final_log_size); + final_events_log_size); + /* + * The size of the 'combined log' is the size of the 'main log' plus + * the size of the 'final events log'. + */ log->bios_event_log_end = log->bios_event_log + - log_size + efi_tpm_final_log_size; + log_size + final_events_log_size; out: memunmap(final_tbl); diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c index ec9a65e7887d..f19c227d20f4 100644 --- a/drivers/char/tpm/tpm_tis_i2c_cr50.c +++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c @@ -483,6 +483,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len) expected = be32_to_cpup((__be32 *)(buf + 2)); if (expected > buf_len) { dev_err(&chip->dev, "Buffer too small to receive i2c data\n"); + rc = -E2BIG; goto out_err; } diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index 4f7bf3929d6d..4e4b6d367612 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -66,7 +66,14 @@ EXPORT_SYMBOL_GPL(clk_fixed_factor_ops); static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *res) { - clk_hw_unregister_fixed_factor(&((struct clk_fixed_factor *)res)->hw); + struct clk_fixed_factor *fix = res; + + /* + * We can not use clk_hw_unregister_fixed_factor, since it will kfree() + * the hw, resulting in double free. Just unregister the hw and let + * devres code kfree() it. + */ + clk_hw_unregister(&fix->hw); } static struct clk_hw * diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5052541a0986..39cfc6c6a8d2 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -4357,20 +4357,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) /* search the list of notifiers for this clk */ list_for_each_entry(cn, &clk_notifier_list, node) if (cn->clk == clk) - break; + goto found; /* if clk wasn't in the notifier list, allocate new clk_notifier */ - if (cn->clk != clk) { - cn = kzalloc(sizeof(*cn), GFP_KERNEL); - if (!cn) - goto out; + cn = kzalloc(sizeof(*cn), GFP_KERNEL); + if (!cn) + goto out; - cn->clk = clk; - srcu_init_notifier_head(&cn->notifier_head); + cn->clk = clk; + srcu_init_notifier_head(&cn->notifier_head); - list_add(&cn->node, &clk_notifier_list); - } + list_add(&cn->node, &clk_notifier_list); +found: ret = srcu_notifier_chain_register(&cn->notifier_head, nb); clk->core->notifier_count++; @@ -4395,32 +4394,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register); */ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) { - struct clk_notifier *cn = NULL; - int ret = -EINVAL; + struct clk_notifier *cn; + int ret = -ENOENT; if (!clk || !nb) return -EINVAL; clk_prepare_lock(); - list_for_each_entry(cn, &clk_notifier_list, node) - if (cn->clk == clk) - break; - - if (cn->clk == clk) { - ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); + list_for_each_entry(cn, &clk_notifier_list, node) { + if (cn->clk == clk) { + ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); - clk->core->notifier_count--; + clk->core->notifier_count--; - /* XXX the notifier code should handle this better */ - if (!cn->notifier_head.head) { - srcu_cleanup_notifier_head(&cn->notifier_head); - list_del(&cn->node); - kfree(cn); + /* XXX the notifier code should handle this better */ + if (!cn->notifier_head.head) { + srcu_cleanup_notifier_head(&cn->notifier_head); + list_del(&cn->node); + kfree(cn); + } + break; } - - } else { - ret = -ENOENT; } clk_prepare_unlock(); diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c index dbac5651ab85..9bcf2f8ed4de 100644 --- a/drivers/clk/qcom/camcc-sc7180.c +++ b/drivers/clk/qcom/camcc-sc7180.c @@ -304,7 +304,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = { .name = "cam_cc_bps_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -325,7 +325,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = { .name = "cam_cc_cci_0_clk_src", .parent_data = cam_cc_parent_data_5, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -339,7 +339,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = { .name = "cam_cc_cci_1_clk_src", .parent_data = cam_cc_parent_data_5, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -360,7 +360,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { .name = "cam_cc_cphy_rx_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -379,7 +379,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { .name = "cam_cc_csi0phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -393,7 +393,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { .name = "cam_cc_csi1phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -407,7 +407,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = { .name = "cam_cc_csi2phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -421,7 +421,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { .name = "cam_cc_csi3phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -443,7 +443,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { .name = "cam_cc_fast_ahb_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -466,7 +466,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = { .name = "cam_cc_icp_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -488,7 +488,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = { .name = "cam_cc_ife_0_clk_src", .parent_data = cam_cc_parent_data_4, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -510,7 +510,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = { .name = "cam_cc_ife_0_csid_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -524,7 +524,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = { .name = "cam_cc_ife_1_clk_src", .parent_data = cam_cc_parent_data_4, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -538,7 +538,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = { .name = "cam_cc_ife_1_csid_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -553,7 +553,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = { .parent_data = cam_cc_parent_data_4, .num_parents = 4, .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -567,7 +567,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = { .name = "cam_cc_ife_lite_csid_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -590,7 +590,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = { .name = "cam_cc_ipe_0_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -613,7 +613,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = { .name = "cam_cc_jpeg_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -635,7 +635,7 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = { .name = "cam_cc_lrme_clk_src", .parent_data = cam_cc_parent_data_6, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -656,7 +656,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = { .name = "cam_cc_mclk0_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -670,7 +670,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = { .name = "cam_cc_mclk1_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -684,7 +684,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = { .name = "cam_cc_mclk2_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -698,7 +698,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = { .name = "cam_cc_mclk3_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -712,7 +712,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = { .name = "cam_cc_mclk4_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -732,7 +732,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = 4, .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 42f13a2d1cc1..05ff3b0d233e 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -730,7 +730,8 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, struct clk_rate_request parent_req = { }; struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); struct clk_hw *xo, *p0, *p1, *p2; - unsigned long request, p0_rate; + unsigned long p0_rate; + u8 mux_div = cgfx->div; int ret; p0 = cgfx->hws[0]; @@ -750,14 +751,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, return 0; } - request = req->rate; - if (cgfx->div > 1) - parent_req.rate = request = request * cgfx->div; + if (mux_div == 0) + mux_div = 1; + + parent_req.rate = req->rate * mux_div; /* This has to be a fixed rate PLL */ p0_rate = clk_hw_get_rate(p0); - if (request == p0_rate) { + if (parent_req.rate == p0_rate) { req->rate = req->best_parent_rate = p0_rate; req->best_parent_hw = p0; return 0; @@ -765,7 +767,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, if (req->best_parent_hw == p0) { /* Are we going back to a previously used rate? */ - if (clk_hw_get_rate(p2) == request) + if (clk_hw_get_rate(p2) == parent_req.rate) req->best_parent_hw = p2; else req->best_parent_hw = p1; @@ -780,8 +782,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, return ret; req->rate = req->best_parent_rate = parent_req.rate; - if (cgfx->div > 1) - req->rate /= cgfx->div; + req->rate /= mux_div; return 0; } diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index 91dc390a583b..c623ce900406 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -510,9 +510,12 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = { .num_clks = ARRAY_SIZE(sm8350_rpmh_clocks), }; +/* Resource name must match resource id present in cmd-db */ +DEFINE_CLK_RPMH_ARC(sc7280, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 4); + static struct clk_hw *sc7280_rpmh_clocks[] = { - [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw, - [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw, + [RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw, + [RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw, [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw, [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw, [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw, diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c index 88e896abb663..da8b627ca156 100644 --- a/drivers/clk/qcom/gcc-sc7180.c +++ b/drivers/clk/qcom/gcc-sc7180.c @@ -620,7 +620,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { .name = "gcc_sdcc1_apps_clk_src", .parent_data = gcc_parent_data_1, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { .name = "gcc_sdcc1_ice_core_clk_src", .parent_data = gcc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_floor_ops, + .ops = &clk_rcg2_ops, }, }; diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index 43ecd507bf83..cf94a12459ea 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; val &= GENMASK(socfpgaclk->width - 1, 0); /* Check for GPIO_DB_CLK by its offset */ - if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) + if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) div = val + 1; else div = (1 << val); diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index d0177824c518..1b885964fb34 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -51,7 +51,7 @@ static unsigned arch_timers_present __initdata; -static void __iomem *arch_counter_base; +static void __iomem *arch_counter_base __ro_after_init; struct arch_timer { void __iomem *base; @@ -60,15 +60,16 @@ struct arch_timer { #define to_arch_timer(e) container_of(e, struct arch_timer, evt) -static u32 arch_timer_rate; -static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI]; +static u32 arch_timer_rate __ro_after_init; +u32 arch_timer_rate1 __ro_after_init; +static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init; static struct clock_event_device __percpu *arch_timer_evt; -static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI; -static bool arch_timer_c3stop; -static bool arch_timer_mem_use_virtual; -static bool arch_counter_suspend_stop; +static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI; +static bool arch_timer_c3stop __ro_after_init; +static bool arch_timer_mem_use_virtual __ro_after_init; +static bool arch_counter_suspend_stop __ro_after_init; #ifdef CONFIG_GENERIC_GETTIMEOFDAY static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER; #else @@ -76,7 +77,7 @@ static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE; #endif /* CONFIG_GENERIC_GETTIMEOFDAY */ static cpumask_t evtstrm_available = CPU_MASK_NONE; -static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); +static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); static int __init early_evtstrm_cfg(char *buf) { @@ -176,7 +177,7 @@ static notrace u64 arch_counter_get_cntvct(void) * to exist on arm64. arm doesn't use this before DT is probed so even * if we don't have the cp15 accessors we won't have a problem. */ -u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; +u64 (*arch_timer_read_counter)(void) __ro_after_init = arch_counter_get_cntvct; EXPORT_SYMBOL_GPL(arch_timer_read_counter); static u64 arch_counter_read(struct clocksource *cs) @@ -925,7 +926,7 @@ static int validate_timer_rate(void) * rate was probed first, and don't verify that others match. If the first node * probed has a clock-frequency property, this overrides the HW register. */ -static void arch_timer_of_configure_rate(u32 rate, struct device_node *np) +static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np) { /* Who has more than one independent system counter? */ if (arch_timer_rate) @@ -939,7 +940,7 @@ static void arch_timer_of_configure_rate(u32 rate, struct device_node *np) pr_warn("frequency not available\n"); } -static void arch_timer_banner(unsigned type) +static void __init arch_timer_banner(unsigned type) { pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "", diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c index 996900d017c6..2fc93e46cea3 100644 --- a/drivers/clocksource/clksrc-dbx500-prcmu.c +++ b/drivers/clocksource/clksrc-dbx500-prcmu.c @@ -18,7 +18,7 @@ #define RATE_32K 32768 -#define TIMER_MODE_CONTINOUS 0x1 +#define TIMER_MODE_CONTINUOUS 0x1 #define TIMER_DOWNCOUNT_VAL 0xffffffff #define PRCMU_TIMER_REF 0 @@ -55,13 +55,13 @@ static int __init clksrc_dbx500_prcmu_init(struct device_node *node) /* * The A9 sub system expects the timer to be configured as - * a continous looping timer. + * a continuous looping timer. * The PRCMU should configure it but if it for some reason * don't we do it here. */ if (readl(clksrc_dbx500_timer_base + PRCMU_TIMER_MODE) != - TIMER_MODE_CONTINOUS) { - writel(TIMER_MODE_CONTINOUS, + TIMER_MODE_CONTINUOUS) { + writel(TIMER_MODE_CONTINUOUS, clksrc_dbx500_timer_base + PRCMU_TIMER_MODE); writel(TIMER_DOWNCOUNT_VAL, clksrc_dbx500_timer_base + PRCMU_TIMER_REF); diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index 42e7e43b8fcd..3819ef5b7098 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -38,7 +38,7 @@ static int __init timer_get_base_and_rate(struct device_node *np, } /* - * Not all implementations use a periphal clock, so don't panic + * Not all implementations use a peripheral clock, so don't panic * if it's not present */ pclk = of_clk_get_by_name(np, "pclk"); @@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np, return 0; timer_clk = of_clk_get_by_name(np, "timer"); - if (IS_ERR(timer_clk)) - return PTR_ERR(timer_clk); + if (IS_ERR(timer_clk)) { + ret = PTR_ERR(timer_clk); + goto out_pclk_disable; + } ret = clk_prepare_enable(timer_clk); if (ret) - return ret; + goto out_timer_clk_put; *rate = clk_get_rate(timer_clk); - if (!(*rate)) - return -EINVAL; + if (!(*rate)) { + ret = -EINVAL; + goto out_timer_clk_disable; + } return 0; + +out_timer_clk_disable: + clk_disable_unprepare(timer_clk); +out_timer_clk_put: + clk_put(timer_clk); +out_pclk_disable: + if (!IS_ERR(pclk)) { + clk_disable_unprepare(pclk); + clk_put(pclk); + } + iounmap(*base); + return ret; } static int __init add_clockevent(struct device_node *event_timer) diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index 269a691bd2c4..a02b0a224807 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -457,7 +457,7 @@ void __init hv_init_clocksource(void) { /* * Try to set up the TSC page clocksource. If it succeeds, we're - * done. Otherwise, set up the MSR clocksoruce. At least one of + * done. Otherwise, set up the MSR clocksource. At least one of * these will always be available except on very old versions of * Hyper-V on x86. In that case we won't have a Hyper-V * clocksource, but Linux will still run with a clocksource based diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c index 029efc2731b4..06d25754e606 100644 --- a/drivers/clocksource/ingenic-ost.c +++ b/drivers/clocksource/ingenic-ost.c @@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev) return PTR_ERR(ost->regs); map = device_node_to_regmap(dev->parent->of_node); - if (!map) { + if (IS_ERR(map)) { dev_err(dev, "regmap not found"); - return -EINVAL; + return PTR_ERR(map); } ost->clk = devm_clk_get(dev, "ost"); @@ -167,13 +167,14 @@ static const struct ingenic_ost_soc_info jz4725b_ost_soc_info = { .is64bit = false, }; -static const struct ingenic_ost_soc_info jz4770_ost_soc_info = { +static const struct ingenic_ost_soc_info jz4760b_ost_soc_info = { .is64bit = true, }; static const struct of_device_id ingenic_ost_of_match[] = { { .compatible = "ingenic,jz4725b-ost", .data = &jz4725b_ost_soc_info, }, - { .compatible = "ingenic,jz4770-ost", .data = &jz4770_ost_soc_info, }, + { .compatible = "ingenic,jz4760b-ost", .data = &jz4760b_ost_soc_info, }, + { .compatible = "ingenic,jz4770-ost", .data = &jz4760b_ost_soc_info, }, { } }; diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c index 905fd6b163a8..24ed0f1f089b 100644 --- a/drivers/clocksource/ingenic-timer.c +++ b/drivers/clocksource/ingenic-timer.c @@ -264,6 +264,7 @@ static const struct ingenic_soc_info jz4725b_soc_info = { static const struct of_device_id ingenic_tcu_of_match[] = { { .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, }, { .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, }, + { .compatible = "ingenic,jz4760-tcu", .data = &jz4740_soc_info, }, { .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, }, { .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, }, { /* sentinel */ } @@ -358,6 +359,7 @@ err_free_ingenic_tcu: TIMER_OF_DECLARE(jz4740_tcu_intc, "ingenic,jz4740-tcu", ingenic_tcu_init); TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init); +TIMER_OF_DECLARE(jz4760_tcu_intc, "ingenic,jz4760-tcu", ingenic_tcu_init); TIMER_OF_DECLARE(jz4770_tcu_intc, "ingenic,jz4770-tcu", ingenic_tcu_init); TIMER_OF_DECLARE(x1000_tcu_intc, "ingenic,x1000-tcu", ingenic_tcu_init); diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index c98f8851fd68..d7ed99f0001f 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -339,8 +339,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch) sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE | SH_CMT16_CMCSR_CKS512); } else { - sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM | - SH_CMT32_CMCSR_CMTOUT_IE | + u32 cmtout = ch->cmt->info->model <= SH_CMT_48BIT ? + SH_CMT32_CMCSR_CMTOUT_IE : 0; + sh_cmt_write_cmcsr(ch, cmtout | SH_CMT32_CMCSR_CMM | SH_CMT32_CMCSR_CMR_IRQ | SH_CMT32_CMCSR_CKS_RCLK8); } diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c index 787dbebbb432..27af17c99590 100644 --- a/drivers/clocksource/timer-atmel-tcb.c +++ b/drivers/clocksource/timer-atmel-tcb.c @@ -455,9 +455,9 @@ static int __init tcb_clksrc_init(struct device_node *node) tcaddr = tc.regs; if (bits == 32) { - /* use apropriate function to read 32 bit counter */ + /* use appropriate function to read 32 bit counter */ clksrc.read = tc_get_cycles32; - /* setup ony channel 0 */ + /* setup only channel 0 */ tcb_setup_single_chan(&tc, best_divisor_idx); tc_sched_clock = tc_sched_clock_read32; tc_delay_timer.read_current_timer = tc_delay_timer_read32; diff --git a/drivers/clocksource/timer-fsl-ftm.c b/drivers/clocksource/timer-fsl-ftm.c index 12a2ed7cfaff..93f336ec875a 100644 --- a/drivers/clocksource/timer-fsl-ftm.c +++ b/drivers/clocksource/timer-fsl-ftm.c @@ -116,7 +116,7 @@ static int ftm_set_next_event(unsigned long delta, * to the MOD register latches the value into a buffer. The MOD * register is updated with the value of its write buffer with * the following scenario: - * a, the counter source clock is diabled. + * a, the counter source clock is disabled. */ ftm_counter_disable(priv->clkevt_base); diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c index ab623b25a47b..cfa4ec7ef396 100644 --- a/drivers/clocksource/timer-microchip-pit64b.c +++ b/drivers/clocksource/timer-microchip-pit64b.c @@ -237,7 +237,7 @@ static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate, break; } - /* Use the bigest prescaler if we didn't match one. */ + /* Use the biggest prescaler if we didn't match one. */ if (*pres == MCHP_PIT64B_PRES_MAX) *pres = MCHP_PIT64B_PRES_MAX - 1; } diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c index 9780ffd8010e..a00520cbb660 100644 --- a/drivers/clocksource/timer-npcm7xx.c +++ b/drivers/clocksource/timer-npcm7xx.c @@ -208,5 +208,6 @@ static int __init npcm7xx_timer_init(struct device_node *np) return 0; } +TIMER_OF_DECLARE(wpcm450, "nuvoton,wpcm450-timer", npcm7xx_timer_init); TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init); diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index 572da477c6d3..529cc6a51cdb 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -211,10 +211,10 @@ out_fail: } /** - * timer_of_cleanup - release timer_of ressources + * timer_of_cleanup - release timer_of resources * @to: timer_of structure * - * Release the ressources that has been used in timer_of_init(). + * Release the resources that has been used in timer_of_init(). * This function should be called in init error cases */ void __init timer_of_cleanup(struct timer_of *to) diff --git a/drivers/clocksource/timer-pistachio.c b/drivers/clocksource/timer-pistachio.c index a2dd85d0c1d7..6f37181a8c63 100644 --- a/drivers/clocksource/timer-pistachio.c +++ b/drivers/clocksource/timer-pistachio.c @@ -71,7 +71,7 @@ static u64 notrace pistachio_clocksource_read_cycles(struct clocksource *cs) { struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); - u32 counter, overflw; + u32 counter, overflow; unsigned long flags; /* @@ -80,7 +80,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs) */ raw_spin_lock_irqsave(&pcs->lock, flags); - overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0); + overflow = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0); counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0); raw_spin_unlock_irqrestore(&pcs->lock, flags); diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index 33b3e8aa2cc5..b6f97960d8ee 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -2,6 +2,7 @@ #include <linux/clk.h> #include <linux/clocksource.h> #include <linux/clockchips.h> +#include <linux/cpuhotplug.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -449,13 +450,13 @@ static int dmtimer_set_next_event(unsigned long cycles, struct dmtimer_systimer *t = &clkevt->t; void __iomem *pend = t->base + t->pend; - writel_relaxed(0xffffffff - cycles, t->base + t->counter); while (readl_relaxed(pend) & WP_TCRR) cpu_relax(); + writel_relaxed(0xffffffff - cycles, t->base + t->counter); - writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl); while (readl_relaxed(pend) & WP_TCLR) cpu_relax(); + writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl); return 0; } @@ -490,18 +491,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt) dmtimer_clockevent_shutdown(evt); /* Looks like we need to first set the load value separately */ - writel_relaxed(clkevt->period, t->base + t->load); while (readl_relaxed(pend) & WP_TLDR) cpu_relax(); + writel_relaxed(clkevt->period, t->base + t->load); - writel_relaxed(clkevt->period, t->base + t->counter); while (readl_relaxed(pend) & WP_TCRR) cpu_relax(); + writel_relaxed(clkevt->period, t->base + t->counter); - writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST, - t->base + t->ctrl); while (readl_relaxed(pend) & WP_TCLR) cpu_relax(); + writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST, + t->base + t->ctrl); return 0; } @@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt) writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup); } -static int __init dmtimer_clockevent_init(struct device_node *np) +static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt, + struct device_node *np, + unsigned int features, + const struct cpumask *cpumask, + const char *name, + int rating) { - struct dmtimer_clockevent *clkevt; struct clock_event_device *dev; struct dmtimer_systimer *t; int error; - clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL); - if (!clkevt) - return -ENOMEM; - t = &clkevt->t; dev = &clkevt->dev; @@ -548,24 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np) * We mostly use cpuidle_coupled with ARM local timers for runtime, * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here. */ - dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; - dev->rating = 300; + dev->features = features; + dev->rating = rating; dev->set_next_event = dmtimer_set_next_event; dev->set_state_shutdown = dmtimer_clockevent_shutdown; dev->set_state_periodic = dmtimer_set_periodic; dev->set_state_oneshot = dmtimer_clockevent_shutdown; + dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown; dev->tick_resume = dmtimer_clockevent_shutdown; - dev->cpumask = cpu_possible_mask; + dev->cpumask = cpumask; dev->irq = irq_of_parse_and_map(np, 0); - if (!dev->irq) { - error = -ENXIO; - goto err_out_free; - } + if (!dev->irq) + return -ENXIO; error = dmtimer_systimer_setup(np, &clkevt->t); if (error) - goto err_out_free; + return error; clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ); @@ -577,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np) writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl); error = request_irq(dev->irq, dmtimer_clockevent_interrupt, - IRQF_TIMER, "clockevent", clkevt); + IRQF_TIMER, name, clkevt); if (error) goto err_out_unmap; writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena); writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup); - pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n", - of_find_property(np, "ti,timer-alwon", NULL) ? + pr_info("TI gptimer %s: %s%lu Hz at %pOF\n", + name, of_find_property(np, "ti,timer-alwon", NULL) ? "always-on " : "", t->rate, np->parent); - clockevents_config_and_register(dev, t->rate, - 3, /* Timer internal resynch latency */ + return 0; + +err_out_unmap: + iounmap(t->base); + + return error; +} + +static int __init dmtimer_clockevent_init(struct device_node *np) +{ + struct dmtimer_clockevent *clkevt; + int error; + + clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL); + if (!clkevt) + return -ENOMEM; + + error = dmtimer_clkevt_init_common(clkevt, np, + CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + cpu_possible_mask, "clockevent", + 300); + if (error) + goto err_out_free; + + clockevents_config_and_register(&clkevt->dev, clkevt->t.rate, + 3, /* Timer internal resync latency */ 0xffffffff); if (of_machine_is_compatible("ti,am33xx") || of_machine_is_compatible("ti,am43")) { - dev->suspend = omap_clockevent_idle; - dev->resume = omap_clockevent_unidle; + clkevt->dev.suspend = omap_clockevent_idle; + clkevt->dev.resume = omap_clockevent_unidle; } return 0; -err_out_unmap: - iounmap(t->base); - err_out_free: kfree(clkevt); return error; } +/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */ +static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer); + +static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu) +{ + struct dmtimer_clockevent *clkevt; + int error; + + if (!cpu_possible(cpu)) + return -EINVAL; + + if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") || + !of_property_read_bool(np->parent, "ti,no-idle")) + pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent); + + clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu); + + error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT, + cpumask_of(cpu), "percpu-dmtimer", + 500); + if (error) + return error; + + return 0; +} + +/* See TRM for timer internal resynch latency */ +static int omap_dmtimer_starting_cpu(unsigned int cpu) +{ + struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu); + struct clock_event_device *dev = &clkevt->dev; + struct dmtimer_systimer *t = &clkevt->t; + + clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX); + irq_force_affinity(dev->irq, cpumask_of(cpu)); + + return 0; +} + +static int __init dmtimer_percpu_timer_startup(void) +{ + struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0); + struct dmtimer_systimer *t = &clkevt->t; + + if (t->sysc) { + cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING, + "clockevents/omap/gptimer:starting", + omap_dmtimer_starting_cpu, NULL); + } + + return 0; +} +subsys_initcall(dmtimer_percpu_timer_startup); + +static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa) +{ + struct device_node *arm_timer; + + arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer"); + if (of_device_is_available(arm_timer)) { + pr_warn_once("ARM architected timer wrap issue i940 detected\n"); + return 0; + } + + if (pa == 0x48034000) /* dra7 dmtimer3 */ + return dmtimer_percpu_timer_init(np, 0); + else if (pa == 0x48036000) /* dra7 dmtimer4 */ + return dmtimer_percpu_timer_init(np, 1); + + return 0; +} + /* Clocksource */ static struct dmtimer_clocksource * to_dmtimer_clocksource(struct clocksource *cs) @@ -742,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np) if (clockevent == pa) return dmtimer_clockevent_init(np); + if (of_machine_is_compatible("ti,dra7")) + return dmtimer_percpu_quirk_init(np, pa); + return 0; } diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c index 1a86a4e7e344..911c92146eca 100644 --- a/drivers/clocksource/timer-vf-pit.c +++ b/drivers/clocksource/timer-vf-pit.c @@ -136,7 +136,7 @@ static int __init pit_clockevent_init(unsigned long rate, int irq) /* * The value for the LDVAL register trigger is calculated as: * LDVAL trigger = (period / clock period) - 1 - * The pit is a 32-bit down count timer, when the conter value + * The pit is a 32-bit down count timer, when the counter value * reaches 0, it will generate an interrupt, thus the minimal * LDVAL trigger value is 1. And then the min_delta is * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit. diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index d3f756f7b5a0..67e56cf638ef 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -267,7 +267,7 @@ struct freq_attr cpufreq_freq_attr_##_name##_freqs = \ __ATTR_RO(_name##_frequencies) /* - * show_scaling_available_frequencies - show available normal frequencies for + * scaling_available_frequencies_show - show available normal frequencies for * the specified CPU */ static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy, @@ -279,7 +279,7 @@ cpufreq_attr_available_freq(scaling_available); EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); /* - * show_available_boost_freqs - show available boost frequencies for + * scaling_boost_frequencies_show - show available boost frequencies for * the specified CPU */ static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy, diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig index 856fb2045656..b8e75210a0e3 100644 --- a/drivers/crypto/allwinner/Kconfig +++ b/drivers/crypto/allwinner/Kconfig @@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG config CRYPTO_DEV_SUN8I_CE_HASH bool "Enable support for hash on sun8i-ce" depends on CRYPTO_DEV_SUN8I_CE - select MD5 - select SHA1 - select SHA256 - select SHA512 + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 help Say y to enable support for hash algorithms. @@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG config CRYPTO_DEV_SUN8I_SS_HASH bool "Enable support for hash on sun8i-ss" depends on CRYPTO_DEV_SUN8I_SS - select MD5 - select SHA1 - select SHA256 + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 help Say y to enable support for hash algorithms. diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c index c2e6f5ed1d79..dec79fa3ebaf 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c @@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm) sizeof(struct sun4i_cipher_req_ctx) + crypto_skcipher_reqsize(op->fallback_tfm)); - err = pm_runtime_get_sync(op->ss->dev); + err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) goto error_pm; diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c index 709905ec4680..44b8fc4b786d 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c @@ -288,8 +288,7 @@ static int sun4i_ss_pm_suspend(struct device *dev) { struct sun4i_ss_ctx *ss = dev_get_drvdata(dev); - if (ss->reset) - reset_control_assert(ss->reset); + reset_control_assert(ss->reset); clk_disable_unprepare(ss->ssclk); clk_disable_unprepare(ss->busclk); @@ -314,12 +313,10 @@ static int sun4i_ss_pm_resume(struct device *dev) goto err_enable; } - if (ss->reset) { - err = reset_control_deassert(ss->reset); - if (err) { - dev_err(ss->dev, "Cannot deassert reset control\n"); - goto err_enable; - } + err = reset_control_deassert(ss->reset); + if (err) { + dev_err(ss->dev, "Cannot deassert reset control\n"); + goto err_enable; } return err; @@ -401,12 +398,10 @@ static int sun4i_ss_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "clock ahb_ss acquired\n"); ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); - if (IS_ERR(ss->reset)) { - if (PTR_ERR(ss->reset) == -EPROBE_DEFER) - return PTR_ERR(ss->reset); + if (IS_ERR(ss->reset)) + return PTR_ERR(ss->reset); + if (!ss->reset) dev_info(&pdev->dev, "no reset control found\n"); - ss->reset = NULL; - } /* * Check that clock have the correct rates given in the datasheet @@ -459,7 +454,7 @@ static int sun4i_ss_probe(struct platform_device *pdev) * this info could be useful */ - err = pm_runtime_get_sync(ss->dev); + err = pm_runtime_resume_and_get(ss->dev); if (err < 0) goto error_pm; diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c index c1b4585e9bbc..d28292762b32 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c @@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm) algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); op->ss = algt->ss; - err = pm_runtime_get_sync(op->ss->dev); + err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) return err; diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c index 443160a114bb..491fcb7b81b4 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c @@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); ss = algt->ss; - err = pm_runtime_get_sync(ss->dev); + err = pm_runtime_resume_and_get(ss->dev); if (err < 0) return err; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 33707a2e55ff..54ae8d16e493 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -240,11 +240,14 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req theend_sgs: if (areq->src == areq->dst) { - dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); + dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src), + DMA_BIDIRECTIONAL); } else { if (nr_sgs > 0) - dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); + dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src), + DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst), + DMA_FROM_DEVICE); } theend_iv: diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index 158422ff5695..00194d1d9ae6 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev) if (err) goto error_alg; - err = pm_runtime_get_sync(ce->dev); + err = pm_runtime_resume_and_get(ce->dev); if (err < 0) goto error_alg; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 2f09a37306e2..88194718a806 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -405,7 +405,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm)); dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src), + DMA_TO_DEVICE); dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c index cfde9ee4356b..cd1baee424a1 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c @@ -99,6 +99,7 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); if (dma_mapping_error(ce->dev, dma_iv)) { dev_err(ce->dev, "Cannot DMA MAP IV\n"); + err = -EFAULT; goto err_iv; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index ed2a69f82e1c..9ef1c85c4aaa 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -232,10 +232,13 @@ sgd_next: theend_sgs: if (areq->src == areq->dst) { - dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); + dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), + DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE); - dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); + dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), + DMA_TO_DEVICE); + dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst), + DMA_FROM_DEVICE); } theend_iv: @@ -351,7 +354,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) op->enginectx.op.prepare_request = NULL; op->enginectx.op.unprepare_request = NULL; - err = pm_runtime_get_sync(op->ss->dev); + err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) { dev_err(op->ss->dev, "pm error %d\n", err); goto error_pm; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index e0ddc684798d..80e89066dbd1 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev) if (err) goto error_alg; - err = pm_runtime_get_sync(ss->dev); + err = pm_runtime_resume_and_get(ss->dev); if (err < 0) goto error_alg; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 11cbcbc83a7b..3c073eb3db03 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) bf = (__le32 *)pad; result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); - if (!result) + if (!result) { + kfree(pad); return -ENOMEM; + } for (i = 0; i < MAX_SG; i++) { rctx->t_dst[i].addr = 0; @@ -432,14 +434,14 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE); - dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE); + dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), + DMA_TO_DEVICE); dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE); - kfree(pad); - memcpy(areq->result, result, algt->alg.hash.halg.digestsize); - kfree(result); theend: + kfree(pad); + kfree(result); crypto_finalize_hash_request(engine, breq, err); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c index 08a1473b2145..3191527928e4 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c @@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); if (dma_mapping_error(ss->dev, dma_iv)) { dev_err(ss->dev, "Cannot DMA MAP IV\n"); - return -EFAULT; + err = -EFAULT; + goto err_free; } dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE); @@ -167,6 +168,7 @@ err_iv: memcpy(ctx->seed, d + dlen, ctx->slen); } memzero_explicit(d, todo); +err_free: kfree(d); return err; diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index a3fa849b139a..ded732242732 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. @@ -115,7 +115,7 @@ int crypto4xx_decrypt_iv_block(struct skcipher_request *req) return crypto4xx_crypt(req, AES_IV_SIZE, true, true); } -/** +/* * AES Functions */ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher, @@ -374,7 +374,7 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx, return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen); } -/** +/* * AES-CCM Functions */ @@ -489,7 +489,7 @@ int crypto4xx_setauthsize_aead(struct crypto_aead *cipher, return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize); } -/** +/* * AES-GCM Functions */ @@ -617,7 +617,7 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req) return crypto4xx_crypt_aes_gcm(req, true); } -/** +/* * HASH SHA1 Functions */ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm, @@ -711,7 +711,7 @@ int crypto4xx_hash_digest(struct ahash_request *req) ctx->sa_len, 0, NULL); } -/** +/* * SHA1 Algorithm */ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm) diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 8d1b918a0533..8278d98074e9 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. @@ -44,7 +44,7 @@ #define PPC4XX_SEC_VERSION_STR "0.5" -/** +/* * PPC4xx Crypto Engine Initialization Routine */ static void crypto4xx_hw_init(struct crypto4xx_device *dev) @@ -159,7 +159,7 @@ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx) ctx->sa_len = 0; } -/** +/* * alloc memory for the gather ring * no need to alloc buf for the ring * gdr_tail, gdr_head and gdr_count are initialized by this function @@ -268,7 +268,7 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) return tail; } -/** +/* * alloc memory for the gather ring * no need to alloc buf for the ring * gdr_tail, gdr_head and gdr_count are initialized by this function @@ -346,7 +346,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev, return &dev->gdr[idx]; } -/** +/* * alloc memory for the scatter ring * need to alloc buf for the ring * sdr_tail, sdr_head and sdr_count are initialized by this function @@ -930,7 +930,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, return is_busy ? -EBUSY : -EINPROGRESS; } -/** +/* * Algorithm Registration Functions */ static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg, @@ -1097,7 +1097,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data) } while (head != tail); } -/** +/* * Top Half of isr. */ static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data, @@ -1186,7 +1186,7 @@ static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed, return 0; } -/** +/* * Supported Crypto Algorithms */ static struct crypto4xx_alg_common crypto4xx_alg[] = { @@ -1369,7 +1369,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { } }, }; -/** +/* * Module Initialization Routine */ static int crypto4xx_probe(struct platform_device *ofdev) diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index a4e25b46cd0a..56c10668c0ab 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. @@ -188,7 +188,7 @@ int crypto4xx_hash_final(struct ahash_request *req); int crypto4xx_hash_update(struct ahash_request *req); int crypto4xx_hash_init(struct ahash_request *req); -/** +/* * Note: Only use this function to copy items that is word aligned. */ static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf, diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h index c4c0a1a75941..1038061224da 100644 --- a/drivers/crypto/amcc/crypto4xx_reg_def.h +++ b/drivers/crypto/amcc/crypto4xx_reg_def.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. @@ -104,7 +104,7 @@ #define CRYPTO4XX_PRNG_LFSR_L 0x00070030 #define CRYPTO4XX_PRNG_LFSR_H 0x00070034 -/** +/* * Initialize CRYPTO ENGINE registers, and memory bases. */ #define PPC4XX_PDR_POLL 0x3ff @@ -123,7 +123,7 @@ #define PPC4XX_INT_TIMEOUT_CNT 0 #define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF #define PPC4XX_INT_CFG 1 -/** +/* * all follow define are ad hoc */ #define PPC4XX_RING_RETRY 100 @@ -131,7 +131,7 @@ #define PPC4XX_SDR_SIZE PPC4XX_NUM_SD #define PPC4XX_GDR_SIZE PPC4XX_NUM_GD -/** +/* * Generic Security Association (SA) with all possible fields. These will * never likely used except for reference purpose. These structure format * can be not changed as the hardware expects them to be layout as defined. diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h index fe756abfc19f..e98e4e7abbad 100644 --- a/drivers/crypto/amcc/crypto4xx_sa.h +++ b/drivers/crypto/amcc/crypto4xx_sa.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. @@ -14,7 +14,7 @@ #define AES_IV_SIZE 16 -/** +/* * Contents of Dynamic Security Association (SA) with all possible fields */ union dynamic_sa_contents { @@ -122,7 +122,7 @@ union sa_command_0 { #define SA_AES_KEY_LEN_256 4 #define SA_REV2 1 -/** +/* * The follow defines bits sa_command_1 * In Basic hash mode this bit define simple hash or hmac. * In IPsec mode, this bit define muting control. @@ -172,7 +172,7 @@ struct dynamic_sa_ctl { union sa_command_1 sa_command_1; } __attribute__((packed)); -/** +/* * State Record for Security Association (SA) */ struct sa_state_record { @@ -184,7 +184,7 @@ struct sa_state_record { }; } __attribute__((packed)); -/** +/* * Security Association (SA) for AES128 * */ @@ -213,7 +213,7 @@ struct dynamic_sa_aes192 { #define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4) #define SA_AES192_CONTENTS 0x3e000062 -/** +/* * Security Association (SA) for AES256 */ struct dynamic_sa_aes256 { @@ -228,7 +228,7 @@ struct dynamic_sa_aes256 { #define SA_AES256_CONTENTS 0x3e000082 #define SA_AES_CONTENTS 0x3e000002 -/** +/* * Security Association (SA) for AES128 CCM */ struct dynamic_sa_aes128_ccm { @@ -242,7 +242,7 @@ struct dynamic_sa_aes128_ccm { #define SA_AES128_CCM_CONTENTS 0x3e000042 #define SA_AES_CCM_CONTENTS 0x3e000002 -/** +/* * Security Association (SA) for AES128_GCM */ struct dynamic_sa_aes128_gcm { @@ -258,7 +258,7 @@ struct dynamic_sa_aes128_gcm { #define SA_AES128_GCM_CONTENTS 0x3e000442 #define SA_AES_GCM_CONTENTS 0x3e000402 -/** +/* * Security Association (SA) for HASH160: HMAC-SHA1 */ struct dynamic_sa_hash160 { diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h index 3af732f25c1c..7356716274cb 100644 --- a/drivers/crypto/amcc/crypto4xx_trng.h +++ b/drivers/crypto/amcc/crypto4xx_trng.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index 8b5e07316352..c6865cbd334b 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -236,10 +236,10 @@ static int meson_cipher(struct skcipher_request *areq) dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE); if (areq->src == areq->dst) { - dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); + dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE); - dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); + dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); + dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE); } if (areq->iv && ivsize > 0) { diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 5bbeff433c8c..6e7ae896717c 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -217,9 +217,6 @@ static int meson_crypto_probe(struct platform_device *pdev) struct meson_dev *mc; int err, i; - if (!pdev->dev.of_node) - return -ENODEV; - mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); if (!mc) return -ENOMEM; diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index 9bd8e5167be3..333fbefbbccb 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c @@ -26,7 +26,7 @@ static struct atmel_ecc_driver_data driver_data; /** - * atmel_ecdh_ctx - transformation context + * struct atmel_ecdh_ctx - transformation context * @client : pointer to i2c client device * @fallback : used for unsupported curves or when user wants to use its own * private key. @@ -34,7 +34,6 @@ static struct atmel_ecc_driver_data driver_data; * of the user to not call set_secret() while * generate_public_key() or compute_shared_secret() are in flight. * @curve_id : elliptic curve id - * @n_sz : size in bytes of the n prime * @do_fallback: true when the device doesn't support the curve or when the user * wants to use its own private key. */ @@ -43,7 +42,6 @@ struct atmel_ecdh_ctx { struct crypto_kpp *fallback; const u8 *public_key; unsigned int curve_id; - size_t n_sz; bool do_fallback; }; @@ -51,7 +49,6 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq, int status) { struct kpp_request *req = areq; - struct atmel_ecdh_ctx *ctx = work_data->ctx; struct atmel_i2c_cmd *cmd = &work_data->cmd; size_t copied, n_sz; @@ -59,7 +56,7 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq, goto free_work_data; /* might want less than we've got */ - n_sz = min_t(size_t, ctx->n_sz, req->dst_len); + n_sz = min_t(size_t, ATMEL_ECC_NIST_P256_N_SIZE, req->dst_len); /* copy the shared secret */ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz), @@ -73,14 +70,6 @@ free_work_data: kpp_request_complete(req, status); } -static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id) -{ - if (curve_id == ECC_CURVE_NIST_P256) - return ATMEL_ECC_NIST_P256_N_SIZE; - - return 0; -} - /* * A random private key is generated and stored in the device. The device * returns the pair public key. @@ -104,8 +93,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, return -EINVAL; } - ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id); - if (!ctx->n_sz || params.key_size) { + if (params.key_size) { /* fallback to ecdh software implementation */ ctx->do_fallback = true; return crypto_kpp_set_secret(ctx->fallback, buf, len); @@ -125,7 +113,6 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, goto free_cmd; ctx->do_fallback = false; - ctx->curve_id = params.curve_id; atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2); @@ -263,6 +250,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm) struct crypto_kpp *fallback; struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + ctx->curve_id = ECC_CURVE_NIST_P256; ctx->client = atmel_ecc_i2c_client_alloc(); if (IS_ERR(ctx->client)) { pr_err("tfm - i2c_client binding failed\n"); @@ -306,7 +294,7 @@ static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm) return ATMEL_ECC_PUBKEY_SIZE; } -static struct kpp_alg atmel_ecdh = { +static struct kpp_alg atmel_ecdh_nist_p256 = { .set_secret = atmel_ecdh_set_secret, .generate_public_key = atmel_ecdh_generate_public_key, .compute_shared_secret = atmel_ecdh_compute_shared_secret, @@ -315,7 +303,7 @@ static struct kpp_alg atmel_ecdh = { .max_size = atmel_ecdh_max_size, .base = { .cra_flags = CRYPTO_ALG_NEED_FALLBACK, - .cra_name = "ecdh", + .cra_name = "ecdh-nist-p256", .cra_driver_name = "atmel-ecdh", .cra_priority = ATMEL_ECC_PRIORITY, .cra_module = THIS_MODULE, @@ -340,14 +328,14 @@ static int atmel_ecc_probe(struct i2c_client *client, &driver_data.i2c_client_list); spin_unlock(&driver_data.i2c_list_lock); - ret = crypto_register_kpp(&atmel_ecdh); + ret = crypto_register_kpp(&atmel_ecdh_nist_p256); if (ret) { spin_lock(&driver_data.i2c_list_lock); list_del(&i2c_priv->i2c_client_list_node); spin_unlock(&driver_data.i2c_list_lock); dev_err(&client->dev, "%s alg registration failed\n", - atmel_ecdh.base.cra_driver_name); + atmel_ecdh_nist_p256.base.cra_driver_name); } else { dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n"); } @@ -365,7 +353,7 @@ static int atmel_ecc_remove(struct i2c_client *client) return -EBUSY; } - crypto_unregister_kpp(&atmel_ecdh); + crypto_unregister_kpp(&atmel_ecdh_nist_p256); spin_lock(&driver_data.i2c_list_lock); list_del(&i2c_priv->i2c_client_list_node); diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c index e8e8281e027d..6fd3e969211d 100644 --- a/drivers/crypto/atmel-i2c.c +++ b/drivers/crypto/atmel-i2c.c @@ -339,7 +339,7 @@ int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) } if (bus_clk_rate > 1000000L) { - dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n", + dev_err(dev, "%u exceeds maximum supported clock frequency (1MHz)\n", bus_clk_rate); return -EINVAL; } diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 352d80cb5ae9..1b13f601fd95 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -434,7 +434,7 @@ static int atmel_sha_init(struct ahash_request *req) ctx->flags = 0; - dev_dbg(dd->dev, "init: digest size: %d\n", + dev_dbg(dd->dev, "init: digest size: %u\n", crypto_ahash_digestsize(tfm)); switch (crypto_ahash_digestsize(tfm)) { @@ -1102,7 +1102,7 @@ static int atmel_sha_start(struct atmel_sha_dev *dd) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); int err; - dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", + dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %u\n", ctx->op, req->nbytes); err = atmel_sha_hw_init(dd); diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 4d63cb13a54f..6f01c51e3c37 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1217,7 +1217,6 @@ static int atmel_tdes_probe(struct platform_device *pdev) tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res); if (IS_ERR(tdes_dd->io_base)) { - dev_err(dev, "can't ioremap\n"); err = PTR_ERR(tdes_dd->io_base); goto err_tasklet_kill; } diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 851b149f7170..053315e260c2 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -1019,6 +1019,7 @@ static void handle_ahash_resp(struct iproc_reqctx_s *rctx) * a SPU response message for an AEAD request. Includes buffers to catch SPU * message headers and the response data. * @mssg: mailbox message containing the receive sg + * @req: Crypto API request * @rctx: crypto request context * @rx_frag_num: number of scatterlist elements required to hold the * SPU response message @@ -2952,9 +2953,9 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher, /** * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC. - * cipher: AEAD structure - * key: Key followed by 4 bytes of salt - * keylen: Length of key plus salt, in bytes + * @cipher: AEAD structure + * @key: Key followed by 4 bytes of salt + * @keylen: Length of key plus salt, in bytes * * Extracts salt from key and stores it to be prepended to IV on each request. * Digest is always 16 bytes diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c index 007abf92cc05..6283e8c6d51d 100644 --- a/drivers/crypto/bcm/spu.c +++ b/drivers/crypto/bcm/spu.c @@ -457,7 +457,7 @@ u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode, * @cipher_mode: Algo type * @data_size: Length of plaintext (bytes) * - * @Return: Length of padding, in bytes + * Return: Length of padding, in bytes */ u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode, unsigned int data_size) @@ -510,10 +510,10 @@ u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode, } /** - * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included + * spum_aead_ivlen() - Calculate the length of the AEAD IV to be included * in a SPU request after the AAD and before the payload. * @cipher_mode: cipher mode - * @iv_ctr_len: initialization vector length in bytes + * @iv_len: initialization vector length in bytes * * In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need * to include the IV as a separate field in the SPU request msg. @@ -543,9 +543,9 @@ enum hash_type spum_hash_type(u32 src_sent) /** * spum_digest_size() - Determine the size of a hash digest to expect the SPU to * return. - * alg_digest_size: Number of bytes in the final digest for the given algo - * alg: The hash algorithm - * htype: Type of hash operation (init, update, full, etc) + * @alg_digest_size: Number of bytes in the final digest for the given algo + * @alg: The hash algorithm + * @htype: Type of hash operation (init, update, full, etc) * * When doing incremental hashing for an algorithm with a truncated hash * (e.g., SHA224), the SPU returns the full digest so that it can be fed back as @@ -580,7 +580,7 @@ u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg, * @aead_parms: Parameters related to AEAD operation * @data_size: Length of data to be encrypted or authenticated. If AEAD, does * not include length of AAD. - + * * Return: the length of the SPU header in bytes. 0 if an error occurs. */ u32 spum_create_request(u8 *spu_hdr, @@ -911,7 +911,7 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) * setkey() time in spu_cipher_req_init(). * @spu_hdr: Start of the request message header (MH field) * @spu_req_hdr_len: Length in bytes of the SPU request header - * @isInbound: 0 encrypt, 1 decrypt + * @is_inbound: 0 encrypt, 1 decrypt * @cipher_parms: Parameters describing cipher operation to be performed * @data_size: Length of the data in the BD field * diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index 2db35b5ccaa2..07989bb8c220 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c @@ -543,7 +543,8 @@ void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len) /** * spu2_fmd_init() - At setkey time, initialize the fixed meta data for * subsequent skcipher requests for this context. - * @spu2_cipher_type: Cipher algorithm + * @fmd: Start of FMD field to be written + * @spu2_type: Cipher algorithm * @spu2_mode: Cipher mode * @cipher_key_len: Length of cipher key, in bytes * @cipher_iv_len: Length of cipher initialization vector, in bytes @@ -598,7 +599,7 @@ static int spu2_fmd_init(struct SPU2_FMD *fmd, * SPU request packet. * @fmd: Start of FMD field to be written * @is_inbound: true if decrypting. false if encrypting. - * @authFirst: true if alg authenticates before encrypting + * @auth_first: true if alg authenticates before encrypting * @protocol: protocol selector * @cipher_type: cipher algorithm * @cipher_mode: cipher mode @@ -640,6 +641,7 @@ static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd, * spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of * SPU request packet. * @fmd: Start of FMD field to be written + * @is_inbound: true if decrypting. false if encrypting. * @assoc_size: Length of additional associated data, in bytes * @auth_key_len: Length of authentication key, in bytes * @cipher_key_len: Length of cipher key, in bytes @@ -793,7 +795,7 @@ u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg, } /** - * spu_payload_length() - Given a SPU2 message header, extract the payload + * spu2_payload_length() - Given a SPU2 message header, extract the payload * length. * @spu_hdr: Start of SPU message header (FMD) * @@ -812,10 +814,11 @@ u32 spu2_payload_length(u8 *spu_hdr) } /** - * spu_response_hdr_len() - Determine the expected length of a SPU response + * spu2_response_hdr_len() - Determine the expected length of a SPU response * header. * @auth_key_len: Length of authentication key, in bytes * @enc_key_len: Length of encryption key, in bytes + * @is_hash: Unused * * For SPU2, includes just FMD. OMD is never requested. * @@ -827,7 +830,7 @@ u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash) } /** - * spu_hash_pad_len() - Calculate the length of hash padding required to extend + * spu2_hash_pad_len() - Calculate the length of hash padding required to extend * data to a full block size. * @hash_alg: hash algorithm * @hash_mode: hash mode @@ -845,8 +848,10 @@ u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode, } /** - * spu2_gcm_ccm_padlen() - Determine the length of GCM/CCM padding for either + * spu2_gcm_ccm_pad_len() - Determine the length of GCM/CCM padding for either * the AAD field or the data. + * @cipher_mode: Unused + * @data_size: Unused * * Return: 0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required. */ @@ -857,7 +862,7 @@ u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode, } /** - * spu_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch + * spu2_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch * associated data in a SPU2 output packet. * @cipher_mode: cipher mode * @assoc_len: length of additional associated data, in bytes @@ -878,11 +883,11 @@ u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode, return resp_len; } -/* - * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included +/** + * spu2_aead_ivlen() - Calculate the length of the AEAD IV to be included * in a SPU request after the AAD and before the payload. * @cipher_mode: cipher mode - * @iv_ctr_len: initialization vector length in bytes + * @iv_len: initialization vector length in bytes * * For SPU2, AEAD IV is included in OMD and does not need to be repeated * prior to the payload. @@ -909,9 +914,9 @@ enum hash_type spu2_hash_type(u32 src_sent) /** * spu2_digest_size() - Determine the size of a hash digest to expect the SPU to * return. - * alg_digest_size: Number of bytes in the final digest for the given algo - * alg: The hash algorithm - * htype: Type of hash operation (init, update, full, etc) + * @alg_digest_size: Number of bytes in the final digest for the given algo + * @alg: The hash algorithm + * @htype: Type of hash operation (init, update, full, etc) * */ u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg, @@ -921,7 +926,7 @@ u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg, } /** - * spu_create_request() - Build a SPU2 request message header, includint FMD and + * spu2_create_request() - Build a SPU2 request message header, includint FMD and * OMD. * @spu_hdr: Start of buffer where SPU request header is to be written * @req_opts: SPU request message options @@ -1105,7 +1110,7 @@ u32 spu2_create_request(u8 *spu_hdr, } /** - * spu_cipher_req_init() - Build an skcipher SPU2 request message header, + * spu2_cipher_req_init() - Build an skcipher SPU2 request message header, * including FMD and OMD. * @spu_hdr: Location of start of SPU request (FMD field) * @cipher_parms: Parameters describing cipher request @@ -1162,11 +1167,11 @@ u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) } /** - * spu_cipher_req_finish() - Finish building a SPU request message header for a + * spu2_cipher_req_finish() - Finish building a SPU request message header for a * block cipher request. * @spu_hdr: Start of the request message header (MH field) * @spu_req_hdr_len: Length in bytes of the SPU request header - * @isInbound: 0 encrypt, 1 decrypt + * @is_inbound: 0 encrypt, 1 decrypt * @cipher_parms: Parameters describing cipher operation to be performed * @data_size: Length of the data in the BD field * @@ -1222,7 +1227,7 @@ void spu2_cipher_req_finish(u8 *spu_hdr, } /** - * spu_request_pad() - Create pad bytes at the end of the data. + * spu2_request_pad() - Create pad bytes at the end of the data. * @pad_start: Start of buffer where pad bytes are to be written * @gcm_padding: Length of GCM padding, in bytes * @hash_pad_len: Number of bytes of padding extend data to full block @@ -1311,7 +1316,7 @@ u8 spu2_rx_status_len(void) } /** - * spu_status_process() - Process the status from a SPU response message. + * spu2_status_process() - Process the status from a SPU response message. * @statp: start of STATUS word * * Return: 0 - if status is good and response should be processed diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c index c4669a96eaec..d5d9cabea55a 100644 --- a/drivers/crypto/bcm/util.c +++ b/drivers/crypto/bcm/util.c @@ -119,8 +119,8 @@ int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes) * @from_skip: number of bytes to skip in from_sg. Non-zero when previous * request included part of the buffer in entry in from_sg. * Assumes from_skip < from_sg->length. - * @from_nents number of entries in from_sg - * @length number of bytes to copy. may reach this limit before exhausting + * @from_nents: number of entries in from_sg + * @length: number of bytes to copy. may reach this limit before exhausting * from_sg. * * Copies the entries themselves, not the data in the entries. Assumes to_sg has diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index a780e627838a..8b8ed77d8715 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -71,6 +71,9 @@ struct caam_skcipher_alg { * @adata: authentication algorithm details * @cdata: encryption algorithm details * @authsize: authentication tag (a.k.a. ICV / MAC) size + * @xts_key_fallback: true if fallback tfm needs to be used due + * to unsupported xts key lengths + * @fallback: xts fallback tfm */ struct caam_ctx { struct caam_flc flc[NUM_OP]; diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index dd5f101e43f8..e313233ec6de 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -187,7 +187,8 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err, } /** - * Count leading zeros, need it to strip, from a given scatterlist + * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip, + * from a given scatterlist * * @sgl : scatterlist to count zeros from * @nbytes: number of zeros, in bytes, to strip diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 711b1acdd4e0..06ee42e8a245 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c @@ -10,7 +10,6 @@ #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/printk.h> -#include <linux/version.h> #include "cptpf.h" diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c index 99b053094f5a..c288c4b51783 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_isr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c @@ -10,7 +10,7 @@ #include "nitrox_isr.h" #include "nitrox_mbx.h" -/** +/* * One vector for each type of ring * - NPS packet ring, AQMQ ring and ZQMQ ring */ @@ -216,7 +216,7 @@ static void nps_core_int_tasklet(unsigned long data) } } -/** +/* * nps_core_int_isr - interrupt handler for NITROX errors and * mailbox communication */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index 53ef06792133..df95ba26b414 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -58,14 +58,15 @@ static void softreq_unmap_sgbufs(struct nitrox_softreq *sr) struct device *dev = DEV(ndev); - dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, sr->in.sg, sg_nents(sr->in.sg), + DMA_BIDIRECTIONAL); dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len, DMA_TO_DEVICE); kfree(sr->in.sgcomp); sr->in.sg = NULL; sr->in.sgmap_cnt = 0; - dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt, + dma_unmap_sg(dev, sr->out.sg, sg_nents(sr->out.sg), DMA_BIDIRECTIONAL); dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len, DMA_TO_DEVICE); @@ -178,7 +179,7 @@ static int dma_map_inbufs(struct nitrox_softreq *sr, return 0; incomp_err: - dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); sr->in.sgmap_cnt = 0; return ret; } @@ -203,7 +204,7 @@ static int dma_map_outbufs(struct nitrox_softreq *sr, return 0; outcomp_map_err: - dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_BIDIRECTIONAL); sr->out.sgmap_cnt = 0; sr->out.sg = NULL; return ret; diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h index 58fb3ed6e644..54f6fb054119 100644 --- a/drivers/crypto/cavium/zip/common.h +++ b/drivers/crypto/cavium/zip/common.h @@ -56,7 +56,6 @@ #include <linux/seq_file.h> #include <linux/string.h> #include <linux/types.h> -#include <linux/version.h> /* Device specific zlib function definitions */ #include "zip_device.h" diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 88275b4867ea..5976530c00a8 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -59,7 +59,7 @@ struct ccp_crypto_queue { #define CCP_CRYPTO_MAX_QLEN 100 static struct ccp_crypto_queue req_queue; -static spinlock_t req_queue_lock; +static DEFINE_SPINLOCK(req_queue_lock); struct ccp_crypto_cmd { struct list_head entry; @@ -410,7 +410,6 @@ static int ccp_crypto_init(void) return ret; } - spin_lock_init(&req_queue_lock); INIT_LIST_HEAD(&req_queue.cmds); req_queue.backlog = &req_queue.cmds; req_queue.cmd_count = 0; diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 0971ee60f840..6777582aa1ce 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -548,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp) return ccp->cmd_q_count == suspended; } -int ccp_dev_suspend(struct sp_device *sp) +void ccp_dev_suspend(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; unsigned long flags; @@ -556,7 +556,7 @@ int ccp_dev_suspend(struct sp_device *sp) /* If there's no device there's nothing to do */ if (!ccp) - return 0; + return; spin_lock_irqsave(&ccp->cmd_lock, flags); @@ -572,11 +572,9 @@ int ccp_dev_suspend(struct sp_device *sp) while (!ccp_queues_suspended(ccp)) wait_event_interruptible(ccp->suspend_queue, ccp_queues_suspended(ccp)); - - return 0; } -int ccp_dev_resume(struct sp_device *sp) +void ccp_dev_resume(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; unsigned long flags; @@ -584,7 +582,7 @@ int ccp_dev_resume(struct sp_device *sp) /* If there's no device there's nothing to do */ if (!ccp) - return 0; + return; spin_lock_irqsave(&ccp->cmd_lock, flags); @@ -597,8 +595,6 @@ int ccp_dev_resume(struct sp_device *sp) } spin_unlock_irqrestore(&ccp->cmd_lock, flags); - - return 0; } int ccp_dev_init(struct sp_device *sp) diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index d6a8f4e4b14a..bb88198c874e 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2418,7 +2418,6 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) dst.address += CCP_ECC_OUTPUT_SIZE; ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, CCP_ECC_MODULUS_BYTES); - dst.address += CCP_ECC_OUTPUT_SIZE; /* Restore the workarea address */ dst.address = save; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index cb9b4c4e371e..da3872c48308 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -21,6 +21,7 @@ #include <linux/ccp.h> #include <linux/firmware.h> #include <linux/gfp.h> +#include <linux/cpufeature.h> #include <asm/smp.h> @@ -972,6 +973,11 @@ int sev_dev_init(struct psp_device *psp) struct sev_device *sev; int ret = -ENOMEM; + if (!boot_cpu_has(X86_FEATURE_SEV)) { + dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); + return 0; + } + sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); if (!sev) goto e_err; diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c index 6284a15e5047..7eb3e4668286 100644 --- a/drivers/crypto/ccp/sp-dev.c +++ b/drivers/crypto/ccp/sp-dev.c @@ -213,12 +213,8 @@ void sp_destroy(struct sp_device *sp) int sp_suspend(struct sp_device *sp) { - int ret; - if (sp->dev_vdata->ccp_vdata) { - ret = ccp_dev_suspend(sp); - if (ret) - return ret; + ccp_dev_suspend(sp); } return 0; @@ -226,12 +222,8 @@ int sp_suspend(struct sp_device *sp) int sp_resume(struct sp_device *sp) { - int ret; - if (sp->dev_vdata->ccp_vdata) { - ret = ccp_dev_resume(sp); - if (ret) - return ret; + ccp_dev_resume(sp); } return 0; diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 0218d0670eee..20377e67f65d 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -134,8 +134,8 @@ struct sp_device *sp_get_psp_master_device(void); int ccp_dev_init(struct sp_device *sp); void ccp_dev_destroy(struct sp_device *sp); -int ccp_dev_suspend(struct sp_device *sp); -int ccp_dev_resume(struct sp_device *sp); +void ccp_dev_suspend(struct sp_device *sp); +void ccp_dev_resume(struct sp_device *sp); #else /* !CONFIG_CRYPTO_DEV_SP_CCP */ @@ -144,15 +144,8 @@ static inline int ccp_dev_init(struct sp_device *sp) return 0; } static inline void ccp_dev_destroy(struct sp_device *sp) { } - -static inline int ccp_dev_suspend(struct sp_device *sp) -{ - return 0; -} -static inline int ccp_dev_resume(struct sp_device *sp) -{ - return 0; -} +static inline void ccp_dev_suspend(struct sp_device *sp) { } +static inline void ccp_dev_resume(struct sp_device *sp) { } #endif /* CONFIG_CRYPTO_DEV_SP_CCP */ #ifdef CONFIG_CRYPTO_DEV_SP_PSP diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index f471dbaef1fb..f468594ef8af 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -356,6 +356,7 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] }, { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] }, + { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] }, /* Last entry must be zero */ { 0, } }; diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c index 5e697a90ea7f..5c9d47f3be37 100644 --- a/drivers/crypto/ccp/tee-dev.c +++ b/drivers/crypto/ccp/tee-dev.c @@ -5,7 +5,7 @@ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com> * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com> * - * Copyright 2019 Advanced Micro Devices, Inc. + * Copyright (C) 2019,2021 Advanced Micro Devices, Inc. */ #include <linux/types.h> @@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size) if (!start_addr) return -ENOMEM; + memset(start_addr, 0x0, ring_size); rb_mgr->ring_start = start_addr; rb_mgr->ring_size = ring_size; rb_mgr->ring_pa = __psp_pa(start_addr); @@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id, void *buf, size_t len, struct tee_ring_cmd **resp) { struct tee_ring_cmd *cmd; - u32 rptr, wptr; int nloop = 1000, ret = 0; + u32 rptr; *resp = NULL; mutex_lock(&tee->rb_mgr.mutex); - wptr = tee->rb_mgr.wptr; - - /* Check if ring buffer is full */ + /* Loop until empty entry found in ring buffer */ do { + /* Get pointer to ring buffer command entry */ + cmd = (struct tee_ring_cmd *) + (tee->rb_mgr.ring_start + tee->rb_mgr.wptr); + rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg); - if (!(wptr + sizeof(struct tee_ring_cmd) == rptr)) + /* Check if ring buffer is full or command entry is waiting + * for response from TEE + */ + if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr || + cmd->flag == CMD_WAITING_FOR_RESPONSE)) break; - dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", - rptr, wptr); + dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", + rptr, tee->rb_mgr.wptr); - /* Wait if ring buffer is full */ + /* Wait if ring buffer is full or TEE is processing data */ mutex_unlock(&tee->rb_mgr.mutex); schedule_timeout_interruptible(msecs_to_jiffies(10)); mutex_lock(&tee->rb_mgr.mutex); } while (--nloop); - if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) { - dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", - rptr, wptr); + if (!nloop && + (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr || + cmd->flag == CMD_WAITING_FOR_RESPONSE)) { + dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n", + rptr, tee->rb_mgr.wptr, cmd->flag); ret = -EBUSY; goto unlock; } - /* Pointer to empty data entry in ring buffer */ - cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr); + /* Do not submit command if PSP got disabled while processing any + * command in another thread + */ + if (psp_dead) { + ret = -EBUSY; + goto unlock; + } /* Write command data into ring buffer */ cmd->cmd_id = cmd_id; @@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id, memset(&cmd->buf[0], 0, sizeof(cmd->buf)); memcpy(&cmd->buf[0], buf, len); + /* Indicate driver is waiting for response */ + cmd->flag = CMD_WAITING_FOR_RESPONSE; + /* Update local copy of write pointer */ tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd); if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size) @@ -309,14 +326,14 @@ static int tee_wait_cmd_completion(struct psp_tee_device *tee, struct tee_ring_cmd *resp, unsigned int timeout) { - /* ~5ms sleep per loop => nloop = timeout * 200 */ - int nloop = timeout * 200; + /* ~1ms sleep per loop => nloop = timeout * 1000 */ + int nloop = timeout * 1000; while (--nloop) { if (resp->cmd_state == TEE_CMD_STATE_COMPLETED) return 0; - usleep_range(5000, 5100); + usleep_range(1000, 1100); } dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n", @@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len, return ret; ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT); - if (ret) + if (ret) { + resp->flag = CMD_RESPONSE_TIMEDOUT; return ret; + } memcpy(buf, &resp->buf[0], len); *status = resp->status; + resp->flag = CMD_RESPONSE_COPIED; + return 0; } EXPORT_SYMBOL(psp_tee_process_cmd); diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h index f09960112115..49d26158b71e 100644 --- a/drivers/crypto/ccp/tee-dev.h +++ b/drivers/crypto/ccp/tee-dev.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright 2019 Advanced Micro Devices, Inc. + * Copyright (C) 2019,2021 Advanced Micro Devices, Inc. * * Author: Rijo Thomas <Rijo-john.Thomas@amd.com> * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com> @@ -18,7 +18,7 @@ #include <linux/mutex.h> #define TEE_DEFAULT_TIMEOUT 10 -#define MAX_BUFFER_SIZE 992 +#define MAX_BUFFER_SIZE 988 /** * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration @@ -82,6 +82,20 @@ enum tee_cmd_state { }; /** + * enum cmd_resp_state - TEE command's response status maintained by driver + * @CMD_RESPONSE_INVALID: initial state when no command is written to ring + * @CMD_WAITING_FOR_RESPONSE: driver waiting for response from TEE + * @CMD_RESPONSE_TIMEDOUT: failed to get response from TEE + * @CMD_RESPONSE_COPIED: driver has copied response from TEE + */ +enum cmd_resp_state { + CMD_RESPONSE_INVALID, + CMD_WAITING_FOR_RESPONSE, + CMD_RESPONSE_TIMEDOUT, + CMD_RESPONSE_COPIED, +}; + +/** * struct tee_ring_cmd - Structure of the command buffer in TEE ring * @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer * interface @@ -91,6 +105,7 @@ enum tee_cmd_state { * @pdata: private data (currently unused) * @res1: reserved region * @buf: TEE command specific buffer + * @flag: refers to &enum cmd_resp_state */ struct tee_ring_cmd { u32 cmd_id; @@ -100,6 +115,7 @@ struct tee_ring_cmd { u64 pdata; u32 res1[2]; u8 buf[MAX_BUFFER_SIZE]; + u32 flag; /* Total size: 1024 bytes */ } __packed; diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index d0e59e942568..e599ac6dc162 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -352,10 +352,8 @@ static int init_cc_resources(struct platform_device *plat_dev) req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); /* Map registers space */ new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs); - if (IS_ERR(new_drvdata->cc_base)) { - dev_err(dev, "Failed to ioremap registers"); + if (IS_ERR(new_drvdata->cc_base)) return PTR_ERR(new_drvdata->cc_base); - } dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name, req_mem_cc_regs); diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index f5a336634daa..6933546f87b1 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -126,11 +126,6 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) return container_of(ctx->dev, struct uld_ctx, dev); } -static inline int is_ofld_imm(const struct sk_buff *skb) -{ - return (skb->len <= SGE_MAX_WR_LEN); -} - static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx) { memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr)); @@ -769,13 +764,14 @@ static inline void create_wreq(struct chcr_context *ctx, struct uld_ctx *u_ctx = ULD_CTX(ctx); unsigned int tx_channel_id, rx_channel_id; unsigned int txqidx = 0, rxqidx = 0; - unsigned int qid, fid; + unsigned int qid, fid, portno; get_qidxs(req, &txqidx, &rxqidx); qid = u_ctx->lldi.rxq_ids[rxqidx]; fid = u_ctx->lldi.rxq_ids[0]; + portno = rxqidx / ctx->rxq_perchan; tx_channel_id = txqidx / ctx->txq_perchan; - rx_channel_id = rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]); chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; @@ -797,15 +793,13 @@ static inline void create_wreq(struct chcr_context *ctx, /** * create_cipher_wr - form the WR for cipher operations - * @req: cipher req. - * @ctx: crypto driver context of the request. - * @qid: ingress qid where response of this WR should be received. - * @op_type: encryption or decryption + * @wrparam: Container for create_cipher_wr()'s parameters */ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct chcr_context *ctx = c_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; @@ -822,6 +816,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, reqctx->dst_ofst); dst_size = get_space_for_phys_dsgl(nents); @@ -1559,7 +1554,8 @@ static inline void chcr_free_shash(struct crypto_shash *base_hash) /** * create_hash_wr - Create hash work request - * @req - Cipher req base + * @req: Cipher req base + * @param: Container for create_hash_wr()'s parameters */ static struct sk_buff *create_hash_wr(struct ahash_request *req, struct hash_wr_param *param) @@ -1580,6 +1576,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, int error = 0; unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len); req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len + param->sg_len) <= SGE_MAX_WR_LEN; @@ -2438,6 +2435,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); @@ -2457,6 +2455,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); if (req->cryptlen == 0) return NULL; @@ -2710,9 +2709,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req, struct dsgl_walk dsgl_walk; unsigned int authsize = crypto_aead_authsize(tfm); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); u32 temp; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma); temp = req->assoclen + req->cryptlen + @@ -2752,9 +2753,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req, struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct chcr_context *ctx = c_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct dsgl_walk dsgl_walk; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, reqctx->dst_ofst); @@ -2958,6 +2961,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; @@ -2967,6 +2971,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, unsigned int tag_offset = 0, auth_offset = 0; unsigned int assoclen; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); + if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) assoclen = req->assoclen - 8; else @@ -3127,6 +3133,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct sk_buff *skb = NULL; @@ -3143,6 +3150,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) assoclen = req->assoclen - 8; diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index f91f9d762a45..39c70e6255f9 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -1,4 +1,4 @@ -/** +/* * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. * * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. @@ -184,7 +184,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld) struct uld_ctx *u_ctx; /* Create the device and add it in the device list */ - pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION); + pr_info_once("%s\n", DRV_DESC); if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) return ERR_PTR(-EOPNOTSUPP); @@ -309,4 +309,3 @@ module_exit(chcr_crypto_exit); MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Chelsio Communications"); -MODULE_VERSION(DRV_VERSION); diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index b02f981e7c32..f7c8bb95a71b 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h @@ -44,7 +44,6 @@ #include "cxgb4_uld.h" #define DRV_MODULE_NAME "chcr" -#define DRV_VERSION "1.0.0.0-ko" #define DRV_DESC "Chelsio T6 Crypto Co-processor Driver" #define MAX_PENDING_REQ_TO_HW 20 diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 4ee010f39912..fa5a9f207bc9 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -21,7 +21,7 @@ /* Static structures */ static void __iomem *_iobase; -static spinlock_t lock; +static DEFINE_SPINLOCK(lock); /* Write a 128 bit field (either a writable key or IV) */ static inline void @@ -383,8 +383,6 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) goto erequest; } - spin_lock_init(&lock); - /* Clear any pending activity */ iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 843192666dc3..e572f9982d4e 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -68,6 +68,8 @@ config CRYPTO_DEV_HISI_HPRE select CRYPTO_DEV_HISI_QM select CRYPTO_DH select CRYPTO_RSA + select CRYPTO_CURVE25519 + select CRYPTO_ECDH help Support for HiSilicon HPRE(High Performance RSA Engine) accelerator, which can accelerate RSA and DH algorithms. diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h index 181c109b19f7..e0b4a1982ee9 100644 --- a/drivers/crypto/hisilicon/hpre/hpre.h +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -10,6 +10,14 @@ #define HPRE_PF_DEF_Q_NUM 64 #define HPRE_PF_DEF_Q_BASE 0 +/* + * type used in qm sqc DW6. + * 0 - Algorithm which has been supported in V2, like RSA, DH and so on; + * 1 - ECC algorithm in V3. + */ +#define HPRE_V2_ALG_TYPE 0 +#define HPRE_V3_ECC_ALG_TYPE 1 + enum { HPRE_CLUSTER0, HPRE_CLUSTER1, @@ -18,7 +26,6 @@ enum { }; enum hpre_ctrl_dbgfs_file { - HPRE_CURRENT_QM, HPRE_CLEAR_ENABLE, HPRE_CLUSTER_CTRL, HPRE_DEBUG_FILE_NUM, @@ -75,6 +82,9 @@ enum hpre_alg_type { HPRE_ALG_KG_CRT = 0x3, HPRE_ALG_DH_G2 = 0x4, HPRE_ALG_DH = 0x5, + HPRE_ALG_ECC_MUL = 0xD, + /* shared by x25519 and x448, but x448 is not supported now */ + HPRE_ALG_CURVE25519_MUL = 0x10, }; struct hpre_sqe { @@ -92,8 +102,8 @@ struct hpre_sqe { __le32 rsvd1[_HPRE_SQE_ALIGN_EXT]; }; -struct hisi_qp *hpre_create_qp(void); -int hpre_algs_register(void); -void hpre_algs_unregister(void); +struct hisi_qp *hpre_create_qp(u8 type); +int hpre_algs_register(struct hisi_qm *qm); +void hpre_algs_unregister(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index a87f9904087a..a380087c83f7 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -1,7 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include <crypto/akcipher.h> +#include <crypto/curve25519.h> #include <crypto/dh.h> +#include <crypto/ecc_curve.h> +#include <crypto/ecdh.h> #include <crypto/internal/akcipher.h> #include <crypto/internal/kpp.h> #include <crypto/internal/rsa.h> @@ -36,6 +39,13 @@ struct hpre_ctx; #define HPRE_DFX_SEC_TO_US 1000000 #define HPRE_DFX_US_TO_NS 1000 +/* size in bytes of the n prime */ +#define HPRE_ECC_NIST_P192_N_SIZE 24 +#define HPRE_ECC_NIST_P256_N_SIZE 32 + +/* size in bytes */ +#define HPRE_ECC_HW256_KSZ_B 32 + typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); struct hpre_rsa_ctx { @@ -61,14 +71,35 @@ struct hpre_dh_ctx { * else if base if the counterpart public key we * compute the shared secret * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1] + * low address: d--->n, please refer to Hisilicon HPRE UM */ - char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */ + char *xa_p; dma_addr_t dma_xa_p; char *g; /* m */ dma_addr_t dma_g; }; +struct hpre_ecdh_ctx { + /* low address: p->a->k->b */ + unsigned char *p; + dma_addr_t dma_p; + + /* low address: x->y */ + unsigned char *g; + dma_addr_t dma_g; +}; + +struct hpre_curve25519_ctx { + /* low address: p->a->k */ + unsigned char *p; + dma_addr_t dma_p; + + /* gx coordinate */ + unsigned char *g; + dma_addr_t dma_g; +}; + struct hpre_ctx { struct hisi_qp *qp; struct hpre_asym_request **req_list; @@ -80,7 +111,11 @@ struct hpre_ctx { union { struct hpre_rsa_ctx rsa; struct hpre_dh_ctx dh; + struct hpre_ecdh_ctx ecdh; + struct hpre_curve25519_ctx curve25519; }; + /* for ecc algorithms */ + unsigned int curve_id; }; struct hpre_asym_request { @@ -91,6 +126,8 @@ struct hpre_asym_request { union { struct akcipher_request *rsa; struct kpp_request *dh; + struct kpp_request *ecdh; + struct kpp_request *curve25519; } areq; int err; int req_id; @@ -152,12 +189,12 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req) } } -static struct hisi_qp *hpre_get_qp_and_start(void) +static struct hisi_qp *hpre_get_qp_and_start(u8 type) { struct hisi_qp *qp; int ret; - qp = hpre_create_qp(); + qp = hpre_create_qp(type); if (!qp) { pr_err("Can not create hpre qp!\n"); return ERR_PTR(-ENODEV); @@ -261,8 +298,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, dma_addr_t tmp; tmp = le64_to_cpu(sqe->in); - if (unlikely(!tmp)) - return; if (src) { if (req->src) @@ -272,8 +307,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, } tmp = le64_to_cpu(sqe->out); - if (unlikely(!tmp)) - return; if (req->dst) { if (dst) @@ -288,13 +321,16 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, void **kreq) { + struct device *dev = HPRE_DEV(ctx); struct hpre_asym_request *req; - int err, id, done; + unsigned int err, done, alg; + int id; #define HPRE_NO_HW_ERR 0 #define HPRE_HW_TASK_DONE 3 #define HREE_HW_ERR_MASK 0x7ff #define HREE_SQE_DONE_MASK 0x3 +#define HREE_ALG_TYPE_MASK 0x1f id = (int)le16_to_cpu(sqe->tag); req = ctx->req_list[id]; hpre_rm_req_from_ctx(req); @@ -307,7 +343,11 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, HREE_SQE_DONE_MASK; if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)) - return 0; + return 0; + + alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK; + dev_err_ratelimited(dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n", + alg, done, err); return -EINVAL; } @@ -413,7 +453,6 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp) struct hpre_sqe *sqe = resp; struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)]; - if (unlikely(!req)) { atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value); return; @@ -422,18 +461,29 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp) req->cb(ctx, resp); } -static int hpre_ctx_init(struct hpre_ctx *ctx) +static void hpre_stop_qp_and_put(struct hisi_qp *qp) +{ + hisi_qm_stop_qp(qp); + hisi_qm_free_qps(&qp, 1); +} + +static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) { struct hisi_qp *qp; + int ret; - qp = hpre_get_qp_and_start(); + qp = hpre_get_qp_and_start(type); if (IS_ERR(qp)) return PTR_ERR(qp); qp->qp_ctx = ctx; qp->req_cb = hpre_alg_cb; - return hpre_ctx_set(ctx, qp, QM_Q_DEPTH); + ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH); + if (ret) + hpre_stop_qp_and_put(qp); + + return ret; } static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) @@ -510,7 +560,6 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg) return ret; } -#ifdef CONFIG_CRYPTO_DH static int hpre_dh_compute_value(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); @@ -674,7 +723,7 @@ static int hpre_dh_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - return hpre_ctx_init(ctx); + return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); } static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) @@ -683,7 +732,6 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) hpre_dh_clear_ctx(ctx, true); } -#endif static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) { @@ -1100,7 +1148,7 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) return PTR_ERR(ctx->rsa.soft_tfm); } - ret = hpre_ctx_init(ctx); + ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); if (ret) crypto_free_akcipher(ctx->rsa.soft_tfm); @@ -1115,6 +1163,734 @@ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) crypto_free_akcipher(ctx->rsa.soft_tfm); } +static void hpre_key_to_big_end(u8 *data, int len) +{ + int i, j; + u8 tmp; + + for (i = 0; i < len / 2; i++) { + j = len - i - 1; + tmp = data[j]; + data[j] = data[i]; + data[i] = tmp; + } +} + +static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all, + bool is_ecdh) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz = ctx->key_sz; + unsigned int shift = sz << 1; + + if (is_clear_all) + hisi_qm_stop_qp(ctx->qp); + + if (is_ecdh && ctx->ecdh.p) { + /* ecdh: p->a->k->b */ + memzero_explicit(ctx->ecdh.p + shift, sz); + dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p); + ctx->ecdh.p = NULL; + } else if (!is_ecdh && ctx->curve25519.p) { + /* curve25519: p->a->k */ + memzero_explicit(ctx->curve25519.p + shift, sz); + dma_free_coherent(dev, sz << 2, ctx->curve25519.p, + ctx->curve25519.dma_p); + ctx->curve25519.p = NULL; + } + + hpre_ctx_clear(ctx, is_clear_all); +} + +static unsigned int hpre_ecdh_supported_curve(unsigned short id) +{ + switch (id) { + case ECC_CURVE_NIST_P192: + case ECC_CURVE_NIST_P256: + return HPRE_ECC_HW256_KSZ_B; + default: + break; + } + + return 0; +} + +static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits) +{ + unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64); + u8 i = 0; + + while (i < ndigits - 1) { + memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64)); + i++; + } + + memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz); + hpre_key_to_big_end((u8 *)addr, cur_sz); +} + +static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params, + unsigned int cur_sz) +{ + unsigned int shifta = ctx->key_sz << 1; + unsigned int shiftb = ctx->key_sz << 2; + void *p = ctx->ecdh.p + ctx->key_sz - cur_sz; + void *a = ctx->ecdh.p + shifta - cur_sz; + void *b = ctx->ecdh.p + shiftb - cur_sz; + void *x = ctx->ecdh.g + ctx->key_sz - cur_sz; + void *y = ctx->ecdh.g + shifta - cur_sz; + const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id); + char *n; + + if (unlikely(!curve)) + return -EINVAL; + + n = kzalloc(ctx->key_sz, GFP_KERNEL); + if (!n) + return -ENOMEM; + + fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits); + fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits); + fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits); + fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits); + fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits); + fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits); + + if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) { + kfree(n); + return -EINVAL; + } + + kfree(n); + return 0; +} + +static unsigned int hpre_ecdh_get_curvesz(unsigned short id) +{ + switch (id) { + case ECC_CURVE_NIST_P192: + return HPRE_ECC_NIST_P192_N_SIZE; + case ECC_CURVE_NIST_P256: + return HPRE_ECC_NIST_P256_N_SIZE; + default: + break; + } + + return 0; +} + +static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz, shift, curve_sz; + int ret; + + ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id); + if (!ctx->key_sz) + return -EINVAL; + + curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); + if (!curve_sz || params->key_size > curve_sz) + return -EINVAL; + + sz = ctx->key_sz; + + if (!ctx->ecdh.p) { + ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p, + GFP_KERNEL); + if (!ctx->ecdh.p) + return -ENOMEM; + } + + shift = sz << 2; + ctx->ecdh.g = ctx->ecdh.p + shift; + ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift; + + ret = hpre_ecdh_fill_curve(ctx, params, curve_sz); + if (ret) { + dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret); + dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p); + ctx->ecdh.p = NULL; + return ret; + } + + return 0; +} + +static bool hpre_key_is_zero(char *key, unsigned short key_sz) +{ + int i; + + for (i = 0; i < key_sz; i++) + if (key[i]) + return false; + + return true; +} + +static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + unsigned int sz, sz_shift; + struct ecdh params; + int ret; + + if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) { + dev_err(dev, "failed to decode ecdh key!\n"); + return -EINVAL; + } + + if (hpre_key_is_zero(params.key, params.key_size)) { + dev_err(dev, "Invalid hpre key!\n"); + return -EINVAL; + } + + hpre_ecc_clear_ctx(ctx, false, true); + + ret = hpre_ecdh_set_param(ctx, ¶ms); + if (ret < 0) { + dev_err(dev, "failed to set hpre param, ret = %d!\n", ret); + return ret; + } + + sz = ctx->key_sz; + sz_shift = (sz << 1) + sz - params.key_size; + memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size); + + return 0; +} + +static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx, + struct hpre_asym_request *req, + struct scatterlist *dst, + struct scatterlist *src) +{ + struct device *dev = HPRE_DEV(ctx); + struct hpre_sqe *sqe = &req->req; + dma_addr_t dma; + + dma = le64_to_cpu(sqe->in); + + if (src && req->src) + dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma); + + dma = le64_to_cpu(sqe->out); + + if (req->dst) + dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma); + if (dst) + dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE); +} + +static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp) +{ + unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + struct hpre_asym_request *req = NULL; + struct kpp_request *areq; + u64 overtime_thrhld; + char *p; + int ret; + + ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + areq = req->areq.ecdh; + areq->dst_len = ctx->key_sz << 1; + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + + p = sg_virt(areq->dst); + memmove(p, p + ctx->key_sz - curve_sz, curve_sz); + memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz); + + hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); + kpp_request_complete(areq, ret); + + atomic64_inc(&dfx[HPRE_RECV_CNT].value); +} + +static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx, + struct kpp_request *req) +{ + struct hpre_asym_request *h_req; + struct hpre_sqe *msg; + int req_id; + void *tmp; + + if (req->dst_len < ctx->key_sz << 1) { + req->dst_len = ctx->key_sz << 1; + return -EINVAL; + } + + tmp = kpp_request_ctx(req); + h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req->cb = hpre_ecdh_cb; + h_req->areq.ecdh = req; + msg = &h_req->req; + memset(msg, 0, sizeof(*msg)); + msg->key = cpu_to_le64(ctx->ecdh.dma_p); + + msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT); + msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; + h_req->ctx = ctx; + + req_id = hpre_add_req_to_ctx(h_req); + if (req_id < 0) + return -EBUSY; + + msg->tag = cpu_to_le16((u16)req_id); + return 0; +} + +static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + unsigned int tmpshift; + dma_addr_t dma = 0; + void *ptr; + int shift; + + /* Src_data include gx and gy. */ + shift = ctx->key_sz - (len >> 1); + if (unlikely(shift < 0)) + return -EINVAL; + + ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL); + if (unlikely(!ptr)) + return -ENOMEM; + + tmpshift = ctx->key_sz << 1; + scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0); + memcpy(ptr + shift, ptr + tmpshift, len >> 1); + memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1); + + hpre_req->src = ptr; + msg->in = cpu_to_le64(dma); + return 0; +} + +static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + dma_addr_t dma = 0; + + if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) { + dev_err(dev, "data or data length is illegal!\n"); + return -EINVAL; + } + + hpre_req->dst = NULL; + dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, dma))) { + dev_err(dev, "dma map data err!\n"); + return -ENOMEM; + } + + msg->out = cpu_to_le64(dma); + return 0; +} + +static int hpre_ecdh_compute_value(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + void *tmp = kpp_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ret; + + ret = hpre_ecdh_msg_request_set(ctx, req); + if (unlikely(ret)) { + dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret); + return ret; + } + + if (req->src) { + ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init src data, ret = %d!\n", ret); + goto clear_all; + } + } else { + msg->in = cpu_to_le64(ctx->ecdh.dma_g); + } + + ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init dst data, ret = %d!\n", ret); + goto clear_all; + } + + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL); + ret = hpre_send(ctx, msg); + if (likely(!ret)) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + return ret; +} + +static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + /* max size is the pub_key_size, include x and y */ + return ctx->key_sz << 1; +} + +static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P192; + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); +} + +static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P256; + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); +} + +static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + hpre_ecc_clear_ctx(ctx, true, true); +} + +static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf, + unsigned int len) +{ + u8 secret[CURVE25519_KEY_SIZE] = { 0 }; + unsigned int sz = ctx->key_sz; + const struct ecc_curve *curve; + unsigned int shift = sz << 1; + void *p; + + /* + * The key from 'buf' is in little-endian, we should preprocess it as + * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64", + * then convert it to big endian. Only in this way, the result can be + * the same as the software curve-25519 that exists in crypto. + */ + memcpy(secret, buf, len); + curve25519_clamp_secret(secret); + hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE); + + p = ctx->curve25519.p + sz - len; + + curve = ecc_get_curve25519(); + + /* fill curve parameters */ + fill_curve_param(p, curve->p, len, curve->g.ndigits); + fill_curve_param(p + sz, curve->a, len, curve->g.ndigits); + memcpy(p + shift, secret, len); + fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits); + memzero_explicit(secret, CURVE25519_KEY_SIZE); +} + +static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf, + unsigned int len) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz = ctx->key_sz; + unsigned int shift = sz << 1; + + /* p->a->k->gx */ + if (!ctx->curve25519.p) { + ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2, + &ctx->curve25519.dma_p, + GFP_KERNEL); + if (!ctx->curve25519.p) + return -ENOMEM; + } + + ctx->curve25519.g = ctx->curve25519.p + shift + sz; + ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz; + + hpre_curve25519_fill_curve(ctx, buf, len); + + return 0; +} + +static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + int ret = -EINVAL; + + if (len != CURVE25519_KEY_SIZE || + !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) { + dev_err(dev, "key is null or key len is not 32bytes!\n"); + return ret; + } + + /* Free old secret if any */ + hpre_ecc_clear_ctx(ctx, false, false); + + ctx->key_sz = CURVE25519_KEY_SIZE; + ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE); + if (ret) { + dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret); + hpre_ecc_clear_ctx(ctx, false, false); + return ret; + } + + return 0; +} + +static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx, + struct hpre_asym_request *req, + struct scatterlist *dst, + struct scatterlist *src) +{ + struct device *dev = HPRE_DEV(ctx); + struct hpre_sqe *sqe = &req->req; + dma_addr_t dma; + + dma = le64_to_cpu(sqe->in); + + if (src && req->src) + dma_free_coherent(dev, ctx->key_sz, req->src, dma); + + dma = le64_to_cpu(sqe->out); + + if (req->dst) + dma_free_coherent(dev, ctx->key_sz, req->dst, dma); + if (dst) + dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE); +} + +static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp) +{ + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + struct hpre_asym_request *req = NULL; + struct kpp_request *areq; + u64 overtime_thrhld; + int ret; + + ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + areq = req->areq.curve25519; + areq->dst_len = ctx->key_sz; + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + + hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE); + + hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); + kpp_request_complete(areq, ret); + + atomic64_inc(&dfx[HPRE_RECV_CNT].value); +} + +static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx, + struct kpp_request *req) +{ + struct hpre_asym_request *h_req; + struct hpre_sqe *msg; + int req_id; + void *tmp; + + if (unlikely(req->dst_len < ctx->key_sz)) { + req->dst_len = ctx->key_sz; + return -EINVAL; + } + + tmp = kpp_request_ctx(req); + h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req->cb = hpre_curve25519_cb; + h_req->areq.curve25519 = req; + msg = &h_req->req; + memset(msg, 0, sizeof(*msg)); + msg->key = cpu_to_le64(ctx->curve25519.dma_p); + + msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT); + msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; + h_req->ctx = ctx; + + req_id = hpre_add_req_to_ctx(h_req); + if (req_id < 0) + return -EBUSY; + + msg->tag = cpu_to_le16((u16)req_id); + return 0; +} + +static void hpre_curve25519_src_modulo_p(u8 *ptr) +{ + int i; + + for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++) + ptr[i] = 0; + + /* The modulus is ptr's last byte minus '0xed'(last byte of p) */ + ptr[i] -= 0xed; +} + +static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + u8 p[CURVE25519_KEY_SIZE] = { 0 }; + const struct ecc_curve *curve; + dma_addr_t dma = 0; + u8 *ptr; + + if (len != CURVE25519_KEY_SIZE) { + dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len); + return -EINVAL; + } + + ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL); + if (unlikely(!ptr)) + return -ENOMEM; + + scatterwalk_map_and_copy(ptr, data, 0, len, 0); + + if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) { + dev_err(dev, "gx is null!\n"); + goto err; + } + + /* + * Src_data(gx) is in little-endian order, MSB in the final byte should + * be masked as described in RFC7748, then transform it to big-endian + * form, then hisi_hpre can use the data. + */ + ptr[31] &= 0x7f; + hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE); + + curve = ecc_get_curve25519(); + + fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits); + + /* + * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p, + * we get its modulus to p, and then use it. + */ + if (memcmp(ptr, p, ctx->key_sz) >= 0) + hpre_curve25519_src_modulo_p(ptr); + + hpre_req->src = ptr; + msg->in = cpu_to_le64(dma); + return 0; + +err: + dma_free_coherent(dev, ctx->key_sz, ptr, dma); + return -EINVAL; +} + +static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + dma_addr_t dma = 0; + + if (!data || !sg_is_last(data) || len != ctx->key_sz) { + dev_err(dev, "data or data length is illegal!\n"); + return -EINVAL; + } + + hpre_req->dst = NULL; + dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, dma))) { + dev_err(dev, "dma map data err!\n"); + return -ENOMEM; + } + + msg->out = cpu_to_le64(dma); + return 0; +} + +static int hpre_curve25519_compute_value(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + void *tmp = kpp_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ret; + + ret = hpre_curve25519_msg_request_set(ctx, req); + if (unlikely(ret)) { + dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret); + return ret; + } + + if (req->src) { + ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init src data, ret = %d!\n", + ret); + goto clear_all; + } + } else { + msg->in = cpu_to_le64(ctx->curve25519.dma_g); + } + + ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init dst data, ret = %d!\n", ret); + goto clear_all; + } + + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL); + ret = hpre_send(ctx, msg); + if (likely(!ret)) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + return ret; +} + +static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + return ctx->key_sz; +} + +static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); +} + +static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + hpre_ecc_clear_ctx(ctx, true, false); +} + static struct akcipher_alg rsa = { .sign = hpre_rsa_dec, .verify = hpre_rsa_enc, @@ -1135,7 +1911,6 @@ static struct akcipher_alg rsa = { }, }; -#ifdef CONFIG_CRYPTO_DH static struct kpp_alg dh = { .set_secret = hpre_dh_set_secret, .generate_public_key = hpre_dh_compute_value, @@ -1152,9 +1927,83 @@ static struct kpp_alg dh = { .cra_module = THIS_MODULE, }, }; -#endif -int hpre_algs_register(void) +static struct kpp_alg ecdh_nist_p192 = { + .set_secret = hpre_ecdh_set_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, + .max_size = hpre_ecdh_max_size, + .init = hpre_ecdh_nist_p192_init_tfm, + .exit = hpre_ecdh_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "ecdh-nist-p192", + .cra_driver_name = "hpre-ecdh", + .cra_module = THIS_MODULE, + }, +}; + +static struct kpp_alg ecdh_nist_p256 = { + .set_secret = hpre_ecdh_set_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, + .max_size = hpre_ecdh_max_size, + .init = hpre_ecdh_nist_p256_init_tfm, + .exit = hpre_ecdh_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "ecdh-nist-p256", + .cra_driver_name = "hpre-ecdh", + .cra_module = THIS_MODULE, + }, +}; + +static struct kpp_alg curve25519_alg = { + .set_secret = hpre_curve25519_set_secret, + .generate_public_key = hpre_curve25519_compute_value, + .compute_shared_secret = hpre_curve25519_compute_value, + .max_size = hpre_curve25519_max_size, + .init = hpre_curve25519_init_tfm, + .exit = hpre_curve25519_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "curve25519", + .cra_driver_name = "hpre-curve25519", + .cra_module = THIS_MODULE, + }, +}; + + +static int hpre_register_ecdh(void) +{ + int ret; + + ret = crypto_register_kpp(&ecdh_nist_p192); + if (ret) + return ret; + + ret = crypto_register_kpp(&ecdh_nist_p256); + if (ret) { + crypto_unregister_kpp(&ecdh_nist_p192); + return ret; + } + + return 0; +} + +static void hpre_unregister_ecdh(void) +{ + crypto_unregister_kpp(&ecdh_nist_p256); + crypto_unregister_kpp(&ecdh_nist_p192); +} + +int hpre_algs_register(struct hisi_qm *qm) { int ret; @@ -1162,19 +2011,37 @@ int hpre_algs_register(void) ret = crypto_register_akcipher(&rsa); if (ret) return ret; -#ifdef CONFIG_CRYPTO_DH + ret = crypto_register_kpp(&dh); if (ret) - crypto_unregister_akcipher(&rsa); -#endif + goto unreg_rsa; + + if (qm->ver >= QM_HW_V3) { + ret = hpre_register_ecdh(); + if (ret) + goto unreg_dh; + ret = crypto_register_kpp(&curve25519_alg); + if (ret) + goto unreg_ecdh; + } + return 0; +unreg_ecdh: + hpre_unregister_ecdh(); +unreg_dh: + crypto_unregister_kpp(&dh); +unreg_rsa: + crypto_unregister_akcipher(&rsa); return ret; } -void hpre_algs_unregister(void) +void hpre_algs_unregister(struct hisi_qm *qm) { - crypto_unregister_akcipher(&rsa); -#ifdef CONFIG_CRYPTO_DH + if (qm->ver >= QM_HW_V3) { + crypto_unregister_kpp(&curve25519_alg); + hpre_unregister_ecdh(); + } + crypto_unregister_kpp(&dh); -#endif + crypto_unregister_akcipher(&rsa); } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index e7a2c70eb9cf..046bc962c8b2 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -13,7 +13,6 @@ #include <linux/uacce.h> #include "hpre.h" -#define HPRE_QUEUE_NUM_V2 1024 #define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) #define HPRE_COMM_CNT_CLR_CE 0x0 @@ -119,7 +118,6 @@ static struct hisi_qm_list hpre_devices = { }; static const char * const hpre_debug_file_name[] = { - [HPRE_CURRENT_QM] = "current_qm", [HPRE_CLEAR_ENABLE] = "rdclr_en", [HPRE_CLUSTER_CTRL] = "cluster_ctrl", }; @@ -226,41 +224,44 @@ static u32 vfs_num; module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); -struct hisi_qp *hpre_create_qp(void) +struct hisi_qp *hpre_create_qp(u8 type) { int node = cpu_to_node(smp_processor_id()); struct hisi_qp *qp = NULL; int ret; - ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp); + if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE) + return NULL; + + /* + * type: 0 - RSA/DH. algorithm supported in V2, + * 1 - ECC algorithm in V3. + */ + ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp); if (!ret) return qp; return NULL; } -static void hpre_pasid_enable(struct hisi_qm *qm) +static void hpre_config_pasid(struct hisi_qm *qm) { - u32 val; - - val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); - val |= BIT(HPRE_PASID_EN_BIT); - writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG); - val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG); - val |= BIT(HPRE_PASID_EN_BIT); - writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG); -} + u32 val1, val2; -static void hpre_pasid_disable(struct hisi_qm *qm) -{ - u32 val; + if (qm->ver >= QM_HW_V3) + return; - val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); - val &= ~BIT(HPRE_PASID_EN_BIT); - writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG); - val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG); - val &= ~BIT(HPRE_PASID_EN_BIT); - writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG); + val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); + val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG); + if (qm->use_sva) { + val1 |= BIT(HPRE_PASID_EN_BIT); + val2 |= BIT(HPRE_PASID_EN_BIT); + } else { + val1 &= ~BIT(HPRE_PASID_EN_BIT); + val2 &= ~BIT(HPRE_PASID_EN_BIT); + } + writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG); + writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG); } static int hpre_cfg_by_dsm(struct hisi_qm *qm) @@ -320,7 +321,7 @@ static int hpre_set_cluster(struct hisi_qm *qm) } /* - * For Kunpeng 920, we shoul disable FLR triggered by hardware (BME/PM/SRIOV). + * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV). * Or it may stay in D3 state when we bind and unbind hpre quickly, * as it does FLR triggered by hardware. */ @@ -383,15 +384,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) if (qm->ver == QM_HW_V2) { ret = hpre_cfg_by_dsm(qm); if (ret) - dev_err(dev, "acpi_evaluate_dsm err.\n"); + return ret; disable_flr_of_bme(qm); - - /* Enable data buffer pasid */ - if (qm->use_sva) - hpre_pasid_enable(qm); } + /* Config data buffer pasid needed by Kunpeng 920 */ + hpre_config_pasid(qm); + return ret; } @@ -401,10 +401,6 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm) unsigned long offset; int i; - /* clear current_qm */ - writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); - writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); - /* clear clusterX/cluster_ctrl */ for (i = 0; i < clusters_num; i++) { offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL; @@ -456,49 +452,6 @@ static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) return &hpre->qm; } -static u32 hpre_current_qm_read(struct hpre_debugfs_file *file) -{ - struct hisi_qm *qm = hpre_file_to_qm(file); - - return readl(qm->io_base + QM_DFX_MB_CNT_VF); -} - -static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val) -{ - struct hisi_qm *qm = hpre_file_to_qm(file); - u32 num_vfs = qm->vfs_num; - u32 vfq_num, tmp; - - if (val > num_vfs) - return -EINVAL; - - /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ - if (val == 0) { - qm->debug.curr_qm_qp_num = qm->qp_num; - } else { - vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs; - if (val == num_vfs) { - qm->debug.curr_qm_qp_num = - qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num; - } else { - qm->debug.curr_qm_qp_num = vfq_num; - } - } - - writel(val, qm->io_base + QM_DFX_MB_CNT_VF); - writel(val, qm->io_base + QM_DFX_DB_CNT_VF); - - tmp = val | - (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); - - tmp = val | - (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); - - return 0; -} - static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file) { struct hisi_qm *qm = hpre_file_to_qm(file); @@ -519,7 +472,7 @@ static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val) ~HPRE_CTRL_CNT_CLR_CE_BIT) | val; writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE); - return 0; + return 0; } static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file) @@ -541,7 +494,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); - return 0; + return 0; } static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, @@ -554,9 +507,6 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, spin_lock_irq(&file->lock); switch (file->type) { - case HPRE_CURRENT_QM: - val = hpre_current_qm_read(file); - break; case HPRE_CLEAR_ENABLE: val = hpre_clear_enable_read(file); break; @@ -597,11 +547,6 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf, spin_lock_irq(&file->lock); switch (file->type) { - case HPRE_CURRENT_QM: - ret = hpre_current_qm_write(file, val); - if (ret) - goto err_input; - break; case HPRE_CLEAR_ENABLE: ret = hpre_clear_enable_write(file, val); if (ret) @@ -740,11 +685,6 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm) { int ret; - ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM, - HPRE_CURRENT_QM); - if (ret) - return ret; - ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE, HPRE_CLEAR_ENABLE); if (ret) @@ -812,9 +752,9 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) } if (pdev->revision >= QM_HW_V3) - qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2\n"; + qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2"; else - qm->algs = "rsa\ndh\n"; + qm->algs = "rsa\ndh"; qm->mode = uacce_mode; qm->pdev = pdev; qm->ver = pdev->revision; @@ -867,6 +807,20 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm) HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); } +static void hpre_err_info_init(struct hisi_qm *qm) +{ + struct hisi_qm_err_info *err_info = &qm->err_info; + + err_info->ce = QM_BASE_CE; + err_info->fe = 0; + err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | + HPRE_OOO_ECC_2BIT_ERR; + err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE; + err_info->msi_wr_port = HPRE_WR_MSI_PORT; + err_info->acpi_rst = "HRST"; + err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT; +} + static const struct hisi_qm_err_ini hpre_err_ini = { .hw_init = hpre_set_user_domain_and_cache, .hw_err_enable = hpre_hw_error_enable, @@ -875,16 +829,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .clear_dev_hw_err_status = hpre_clear_hw_err_status, .log_dev_hw_err = hpre_log_hw_error, .open_axi_master_ooo = hpre_open_axi_master_ooo, - .err_info = { - .ce = QM_BASE_CE, - .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, - .fe = 0, - .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | - HPRE_OOO_ECC_2BIT_ERR, - .dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE, - .msi_wr_port = HPRE_WR_MSI_PORT, - .acpi_rst = "HRST", - } + .err_info_init = hpre_err_info_init, }; static int hpre_pf_probe_init(struct hpre *hpre) @@ -892,13 +837,12 @@ static int hpre_pf_probe_init(struct hpre *hpre) struct hisi_qm *qm = &hpre->qm; int ret; - qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2; - ret = hpre_set_user_domain_and_cache(qm); if (ret) return ret; qm->err_ini = &hpre_err_ini; + qm->err_ini->err_info_init(qm); hisi_qm_dev_err_init(qm); return 0; @@ -1006,8 +950,6 @@ static void hpre_remove(struct pci_dev *pdev) hisi_qm_stop(qm, QM_NORMAL); if (qm->fun_type == QM_HW_PF) { - if (qm->use_sva && qm->ver == QM_HW_V2) - hpre_pasid_disable(qm); hpre_cnt_regs_clear(qm); qm->debug.curr_qm_qp_num = 0; hisi_qm_dev_err_uninit(qm); @@ -1016,7 +958,6 @@ static void hpre_remove(struct pci_dev *pdev) hisi_qm_uninit(qm); } - static const struct pci_error_handlers hpre_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, @@ -1075,4 +1016,5 @@ module_exit(hpre_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); +MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>"); MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator"); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 13cb4216561a..ce439a0c66c9 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -38,6 +38,7 @@ #define QM_MB_CMD_SQC_BT 0x4 #define QM_MB_CMD_CQC_BT 0x5 #define QM_MB_CMD_SQC_VFT_V2 0x6 +#define QM_MB_CMD_STOP_QP 0x8 #define QM_MB_CMD_SEND_BASE 0x300 #define QM_MB_EVENT_SHIFT 8 @@ -93,6 +94,12 @@ #define QM_DB_PRIORITY_SHIFT_V1 48 #define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 #define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 +#define QM_QUE_ISO_CFG_V 0x0030 +#define QM_QUE_ISO_EN 0x100154 +#define QM_CAPBILITY 0x100158 +#define QM_QP_NUN_MASK GENMASK(10, 0) +#define QM_QP_DB_INTERVAL 0x10000 +#define QM_QP_MAX_NUM_SHIFT 11 #define QM_DB_CMD_SHIFT_V2 12 #define QM_DB_RAND_SHIFT_V2 16 #define QM_DB_INDEX_SHIFT_V2 32 @@ -129,9 +136,9 @@ #define QM_DFX_CNT_CLR_CE 0x100118 #define QM_ABNORMAL_INT_SOURCE 0x100000 -#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0) +#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0) #define QM_ABNORMAL_INT_MASK 0x100004 -#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff +#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff #define QM_ABNORMAL_INT_STATUS 0x100008 #define QM_ABNORMAL_INT_SET 0x10000c #define QM_ABNORMAL_INF00 0x100010 @@ -164,6 +171,14 @@ #define ACC_AM_ROB_ECC_INT_STS 0x300104 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) +#define QM_DFX_MB_CNT_VF 0x104010 +#define QM_DFX_DB_CNT_VF 0x104020 +#define QM_DFX_SQE_CNT_VF_SQN 0x104030 +#define QM_DFX_CQE_CNT_VF_CQN 0x104040 +#define QM_DFX_QN_SHIFT 16 +#define CURRENT_FUN_MASK GENMASK(5, 0) +#define CURRENT_Q_MASK GENMASK(31, 16) + #define POLL_PERIOD 10 #define POLL_TIMEOUT 1000 #define WAIT_PERIOD_US_MAX 200 @@ -173,6 +188,7 @@ #define QM_CACHE_WB_DONE 0x208 #define PCI_BAR_2 2 +#define PCI_BAR_4 4 #define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0) #define QMC_ALIGN(sz) ALIGN(sz, 32) @@ -334,6 +350,7 @@ struct hisi_qm_hw_ops { void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe); void (*hw_error_uninit)(struct hisi_qm *qm); enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); + int (*stop_qp)(struct hisi_qp *qp); }; struct qm_dfx_item { @@ -350,6 +367,7 @@ static struct qm_dfx_item qm_dfx_files[] = { }; static const char * const qm_debug_file_name[] = { + [CURRENT_QM] = "current_qm", [CURRENT_Q] = "current_q", [CLEAR_ENABLE] = "clear_enable", }; @@ -373,6 +391,8 @@ static const struct hisi_qm_hw_error qm_hw_error[] = { { .int_msk = BIT(10), .msg = "qm_db_timeout" }, { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, + { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" }, + { .int_msk = BIT(14), .msg = "qm_flr_timeout" }, { /* sentinel */ } }; @@ -557,21 +577,22 @@ static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) { - u64 doorbell; - u64 dbase; + void __iomem *io_base = qm->io_base; u16 randata = 0; + u64 doorbell; if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) - dbase = QM_DOORBELL_SQ_CQ_BASE_V2; + io_base = qm->db_io_base + (u64)qn * qm->db_interval + + QM_DOORBELL_SQ_CQ_BASE_V2; else - dbase = QM_DOORBELL_EQ_AEQ_BASE_V2; + io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | ((u64)randata << QM_DB_RAND_SHIFT_V2) | ((u64)index << QM_DB_INDEX_SHIFT_V2) | ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); - writeq(doorbell, qm->io_base + dbase); + writeq(doorbell, io_base); } static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) @@ -865,6 +886,26 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) return 0; } +static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num) +{ + u32 remain_q_num, vfq_num; + u32 num_vfs = qm->vfs_num; + + vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs; + if (vfq_num >= qm->max_qp_num) + return qm->max_qp_num; + + remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs; + if (vfq_num + remain_q_num <= qm->max_qp_num) + return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num; + + /* + * if vfq_num + remain_q_num > max_qp_num, the last VFs, + * each with one more queue. + */ + return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num; +} + static struct hisi_qm *file_to_qm(struct debugfs_file *file) { struct qm_debug *debug = file->debug; @@ -918,6 +959,41 @@ static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl) return 0; } +static u32 current_qm_read(struct debugfs_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + QM_DFX_MB_CNT_VF); +} + +static int current_qm_write(struct debugfs_file *file, u32 val) +{ + struct hisi_qm *qm = file_to_qm(file); + u32 tmp; + + if (val > qm->vfs_num) + return -EINVAL; + + /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ + if (!val) + qm->debug.curr_qm_qp_num = qm->qp_num; + else + qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val); + + writel(val, qm->io_base + QM_DFX_MB_CNT_VF); + writel(val, qm->io_base + QM_DFX_DB_CNT_VF); + + tmp = val | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + static ssize_t qm_debug_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { @@ -929,6 +1005,9 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf, mutex_lock(&file->lock); switch (index) { + case CURRENT_QM: + val = current_qm_read(file); + break; case CURRENT_Q: val = current_q_read(file); break; @@ -971,27 +1050,24 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf, mutex_lock(&file->lock); switch (index) { + case CURRENT_QM: + ret = current_qm_write(file, val); + break; case CURRENT_Q: ret = current_q_write(file, val); - if (ret) - goto err_input; break; case CLEAR_ENABLE: ret = clear_enable_write(file, val); - if (ret) - goto err_input; break; default: ret = -EINVAL; - goto err_input; } mutex_unlock(&file->lock); - return count; + if (ret) + return ret; -err_input: - mutex_unlock(&file->lock); - return ret; + return count; } static const struct file_operations qm_debug_fops = { @@ -1529,12 +1605,12 @@ static const struct file_operations qm_cmd_fops = { .write = qm_cmd_write, }; -static void qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) +static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, + enum qm_debug_file index) { - struct dentry *qm_d = qm->debug.qm_d; struct debugfs_file *file = qm->debug.files + index; - debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file, + debugfs_create_file(qm_debug_file_name[index], 0600, dir, file, &qm_debug_fops); file->index = index; @@ -1628,7 +1704,7 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) { writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); - writel(qm->err_ini->err_info.nfe, + writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); return ACC_ERR_RECOVERED; } @@ -1639,6 +1715,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) return ACC_ERR_RECOVERED; } +static int qm_stop_qp(struct hisi_qp *qp) +{ + return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); +} + static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { .qm_db = qm_db_v1, .get_irq_num = qm_get_irq_num_v1, @@ -1654,6 +1735,16 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { .hw_error_handle = qm_hw_error_handle_v2, }; +static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { + .get_vft = qm_get_vft_v2, + .qm_db = qm_db_v2, + .get_irq_num = qm_get_irq_num_v2, + .hw_error_init = qm_hw_error_init_v2, + .hw_error_uninit = qm_hw_error_uninit_v2, + .hw_error_handle = qm_hw_error_handle_v2, + .stop_qp = qm_stop_qp, +}; + static void *qm_get_avail_sqe(struct hisi_qp *qp) { struct hisi_qp_status *qp_status = &qp->qp_status; @@ -1933,6 +2024,14 @@ static int qm_drain_qp(struct hisi_qp *qp) if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit) return 0; + /* Kunpeng930 supports drain qp by device */ + if (qm->ops->stop_qp) { + ret = qm->ops->stop_qp(qp); + if (ret) + dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); + return ret; + } + addr = qm_ctx_alloc(qm, size, &dma_addr); if (IS_ERR(addr)) { dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); @@ -2132,6 +2231,8 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q, { struct hisi_qp *qp = q->priv; struct hisi_qm *qm = qp->qm; + resource_size_t phys_base = qm->db_phys_base + + qp->qp_id * qm->db_interval; size_t sz = vma->vm_end - vma->vm_start; struct pci_dev *pdev = qm->pdev; struct device *dev = &pdev->dev; @@ -2143,16 +2244,19 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q, if (qm->ver == QM_HW_V1) { if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) return -EINVAL; - } else { + } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) { if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) return -EINVAL; + } else { + if (sz > qm->db_interval) + return -EINVAL; } vma->vm_flags |= VM_IO; return remap_pfn_range(vma, vma->vm_start, - qm->phys_base >> PAGE_SHIFT, + phys_base >> PAGE_SHIFT, sz, pgprot_noncached(vma->vm_page_prot)); case UACCE_QFRT_DUS: if (sz != qp->qdma.size) @@ -2267,14 +2371,20 @@ static int qm_alloc_uacce(struct hisi_qm *qm) uacce->priv = qm; uacce->algs = qm->algs; - if (qm->ver == QM_HW_V1) { - mmio_page_nr = QM_DOORBELL_PAGE_NR; + if (qm->ver == QM_HW_V1) uacce->api_ver = HISI_QM_API_VER_BASE; - } else { + else if (qm->ver == QM_HW_V2) + uacce->api_ver = HISI_QM_API_VER2_BASE; + else + uacce->api_ver = HISI_QM_API_VER3_BASE; + + if (qm->ver == QM_HW_V1) + mmio_page_nr = QM_DOORBELL_PAGE_NR; + else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) mmio_page_nr = QM_DOORBELL_PAGE_NR + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; - uacce->api_ver = HISI_QM_API_VER2_BASE; - } + else + mmio_page_nr = qm->db_interval / PAGE_SIZE; dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT; @@ -2482,8 +2592,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) if (qm->ver == QM_HW_V1) qm->ops = &qm_hw_ops_v1; - else + else if (qm->ver == QM_HW_V2) qm->ops = &qm_hw_ops_v2; + else + qm->ops = &qm_hw_ops_v3; pci_set_drvdata(pdev, qm); mutex_init(&qm->mailbox_lock); @@ -2492,13 +2604,23 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) qm->misc_ctl = false; } -static void hisi_qm_pci_uninit(struct hisi_qm *qm) +static void qm_put_pci_res(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; - pci_free_irq_vectors(pdev); + if (qm->use_db_isolation) + iounmap(qm->db_io_base); + iounmap(qm->io_base); pci_release_mem_regions(pdev); +} + +static void hisi_qm_pci_uninit(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + + pci_free_irq_vectors(pdev); + qm_put_pci_res(qm); pci_disable_device(pdev); } @@ -2527,7 +2649,6 @@ void hisi_qm_uninit(struct hisi_qm *qm) hisi_qm_cache_wb(qm); dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); - memset(&qm->qdma, 0, sizeof(qm->qdma)); } qm_irq_unregister(qm); @@ -2681,7 +2802,7 @@ static int __hisi_qm_start(struct hisi_qm *qm) { int ret; - WARN_ON(!qm->qdma.dma); + WARN_ON(!qm->qdma.va); if (qm->fun_type == QM_HW_PF) { ret = qm_dev_mem_reset(qm); @@ -2930,9 +3051,11 @@ void hisi_qm_debug_init(struct hisi_qm *qm) qm->debug.qm_d = qm_d; /* only show this in PF */ - if (qm->fun_type == QM_HW_PF) + if (qm->fun_type == QM_HW_PF) { + qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM); for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++) - qm_create_debugfs_file(qm, i); + qm_create_debugfs_file(qm, qm_d, i); + } debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); @@ -2960,6 +3083,10 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm) struct qm_dfx_registers *regs; int i; + /* clear current_qm */ + writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); + writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + /* clear current_q */ writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); @@ -2982,7 +3109,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear); static void qm_hw_error_init(struct hisi_qm *qm) { - const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info; + struct hisi_qm_err_info *err_info = &qm->err_info; if (!qm->ops->hw_error_init) { dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); @@ -3175,30 +3302,46 @@ EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) { - u32 remain_q_num, q_num, i, j; + u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j; + u32 max_qp_num = qm->max_qp_num; u32 q_base = qm->qp_num; int ret; if (!num_vfs) return -EINVAL; - remain_q_num = qm->ctrl_qp_num - qm->qp_num; + vfs_q_num = qm->ctrl_qp_num - qm->qp_num; - /* If remain queues not enough, return error. */ - if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs) + /* If vfs_q_num is less than num_vfs, return error. */ + if (vfs_q_num < num_vfs) return -EINVAL; - q_num = remain_q_num / num_vfs; - for (i = 1; i <= num_vfs; i++) { - if (i == num_vfs) - q_num += remain_q_num % num_vfs; - ret = hisi_qm_set_vft(qm, i, q_base, q_num); + q_num = vfs_q_num / num_vfs; + remain_q_num = vfs_q_num % num_vfs; + + for (i = num_vfs; i > 0; i--) { + /* + * if q_num + remain_q_num > max_qp_num in last vf, divide the + * remaining queues equally. + */ + if (i == num_vfs && q_num + remain_q_num <= max_qp_num) { + act_q_num = q_num + remain_q_num; + remain_q_num = 0; + } else if (remain_q_num > 0) { + act_q_num = q_num + 1; + remain_q_num--; + } else { + act_q_num = q_num; + } + + act_q_num = min_t(int, act_q_num, max_qp_num); + ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); if (ret) { - for (j = i; j > 0; j--) + for (j = num_vfs; j > i; j--) hisi_qm_set_vft(qm, j, 0, 0); return ret; } - q_base += q_num; + q_base += act_q_num; } return 0; @@ -3318,15 +3461,15 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) /* get device hardware error status */ err_sts = qm->err_ini->get_dev_hw_err_status(qm); if (err_sts) { - if (err_sts & qm->err_ini->err_info.ecc_2bits_mask) + if (err_sts & qm->err_info.ecc_2bits_mask) qm->err_status.is_dev_ecc_mbit = true; if (qm->err_ini->log_dev_hw_err) qm->err_ini->log_dev_hw_err(qm, err_sts); /* ce error does not need to be reset */ - if ((err_sts | qm->err_ini->err_info.dev_ce_mask) == - qm->err_ini->err_info.dev_ce_mask) { + if ((err_sts | qm->err_info.dev_ce_mask) == + qm->err_info.dev_ce_mask) { if (qm->err_ini->clear_dev_hw_err_status) qm->err_ini->clear_dev_hw_err_status(qm, err_sts); @@ -3639,7 +3782,7 @@ static int qm_soft_reset(struct hisi_qm *qm) acpi_status s; s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), - qm->err_ini->err_info.acpi_rst, + qm->err_info.acpi_rst, NULL, &value); if (ACPI_FAILURE(s)) { pci_err(pdev, "NO controller reset method!\n"); @@ -3707,12 +3850,11 @@ static void qm_restart_prepare(struct hisi_qm *qm) /* temporarily close the OOO port used for PEH to write out MSI */ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); - writel(value & ~qm->err_ini->err_info.msi_wr_port, + writel(value & ~qm->err_info.msi_wr_port, qm->io_base + ACC_AM_CFG_PORT_WR_EN); /* clear dev ecc 2bit error source if having */ - value = qm_get_dev_err_status(qm) & - qm->err_ini->err_info.ecc_2bits_mask; + value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; if (value && qm->err_ini->clear_dev_hw_err_status) qm->err_ini->clear_dev_hw_err_status(qm, value); @@ -3736,7 +3878,7 @@ static void qm_restart_done(struct hisi_qm *qm) /* open the OOO port for PEH to write out MSI */ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); - value |= qm->err_ini->err_info.msi_wr_port; + value |= qm->err_info.msi_wr_port; writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); qm->err_status.is_qm_ecc_mbit = false; @@ -3875,8 +4017,7 @@ static int qm_check_dev_error(struct hisi_qm *qm) if (ret) return ret; - return (qm_get_dev_err_status(qm) & - qm->err_ini->err_info.ecc_2bits_mask); + return (qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask); } void hisi_qm_reset_prepare(struct pci_dev *pdev) @@ -4084,7 +4225,7 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list) mutex_unlock(&qm_list->lock); if (flag) { - ret = qm_list->register_to_crypto(); + ret = qm_list->register_to_crypto(qm); if (ret) { mutex_lock(&qm_list->lock); list_del(&qm->list); @@ -4115,59 +4256,134 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) mutex_unlock(&qm_list->lock); if (list_empty(&qm_list->list)) - qm_list->unregister_from_crypto(); + qm_list->unregister_from_crypto(qm); } EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); -static int hisi_qm_pci_init(struct hisi_qm *qm) +static int qm_get_qp_num(struct hisi_qm *qm) +{ + if (qm->ver == QM_HW_V1) + qm->ctrl_qp_num = QM_QNUM_V1; + else if (qm->ver == QM_HW_V2) + qm->ctrl_qp_num = QM_QNUM_V2; + else + qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) & + QM_QP_NUN_MASK; + + if (qm->use_db_isolation) + qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >> + QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK; + else + qm->max_qp_num = qm->ctrl_qp_num; + + /* check if qp number is valid */ + if (qm->qp_num > qm->max_qp_num) { + dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n", + qm->qp_num, qm->max_qp_num); + return -EINVAL; + } + + return 0; +} + +static int qm_get_pci_res(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; struct device *dev = &pdev->dev; - unsigned int num_vec; int ret; - ret = pci_enable_device_mem(pdev); - if (ret < 0) { - dev_err(dev, "Failed to enable device mem!\n"); - return ret; - } - ret = pci_request_mem_regions(pdev, qm->dev_name); if (ret < 0) { dev_err(dev, "Failed to request mem regions!\n"); - goto err_disable_pcidev; + return ret; } qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); - qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2); - qm->io_base = ioremap(qm->phys_base, qm->phys_size); + qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); if (!qm->io_base) { ret = -EIO; - goto err_release_mem_regions; + goto err_request_mem_regions; + } + + if (qm->ver > QM_HW_V2) { + if (qm->fun_type == QM_HW_PF) + qm->use_db_isolation = readl(qm->io_base + + QM_QUE_ISO_EN) & BIT(0); + else + qm->use_db_isolation = readl(qm->io_base + + QM_QUE_ISO_CFG_V) & BIT(0); + } + + if (qm->use_db_isolation) { + qm->db_interval = QM_QP_DB_INTERVAL; + qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); + qm->db_io_base = ioremap(qm->db_phys_base, + pci_resource_len(pdev, PCI_BAR_4)); + if (!qm->db_io_base) { + ret = -EIO; + goto err_ioremap; + } + } else { + qm->db_phys_base = qm->phys_base; + qm->db_io_base = qm->io_base; + qm->db_interval = 0; } + if (qm->fun_type == QM_HW_PF) { + ret = qm_get_qp_num(qm); + if (ret) + goto err_db_ioremap; + } + + return 0; + +err_db_ioremap: + if (qm->use_db_isolation) + iounmap(qm->db_io_base); +err_ioremap: + iounmap(qm->io_base); +err_request_mem_regions: + pci_release_mem_regions(pdev); + return ret; +} + +static int hisi_qm_pci_init(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + unsigned int num_vec; + int ret; + + ret = pci_enable_device_mem(pdev); + if (ret < 0) { + dev_err(dev, "Failed to enable device mem!\n"); + return ret; + } + + ret = qm_get_pci_res(qm); + if (ret) + goto err_disable_pcidev; + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret < 0) - goto err_iounmap; + goto err_get_pci_res; pci_set_master(pdev); if (!qm->ops->get_irq_num) { ret = -EOPNOTSUPP; - goto err_iounmap; + goto err_get_pci_res; } num_vec = qm->ops->get_irq_num(qm); ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); if (ret < 0) { dev_err(dev, "Failed to enable MSI vectors!\n"); - goto err_iounmap; + goto err_get_pci_res; } return 0; -err_iounmap: - iounmap(qm->io_base); -err_release_mem_regions: - pci_release_mem_regions(pdev); +err_get_pci_res: + qm_put_pci_res(qm); err_disable_pcidev: pci_disable_device(pdev); return ret; @@ -4187,28 +4403,28 @@ int hisi_qm_init(struct hisi_qm *qm) hisi_qm_pre_init(qm); - ret = qm_alloc_uacce(qm); - if (ret < 0) - dev_warn(dev, "fail to alloc uacce (%d)\n", ret); - ret = hisi_qm_pci_init(qm); if (ret) - goto err_remove_uacce; + return ret; ret = qm_irq_register(qm); if (ret) - goto err_pci_uninit; + goto err_pci_init; if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) { /* v2 starts to support get vft by mailbox */ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); if (ret) - goto err_irq_unregister; + goto err_irq_register; } + ret = qm_alloc_uacce(qm); + if (ret < 0) + dev_warn(dev, "fail to alloc uacce (%d)\n", ret); + ret = hisi_qm_memory_init(qm); if (ret) - goto err_irq_unregister; + goto err_alloc_uacce; INIT_WORK(&qm->work, qm_work_process); if (qm->fun_type == QM_HW_PF) @@ -4218,13 +4434,13 @@ int hisi_qm_init(struct hisi_qm *qm) return 0; -err_irq_unregister: - qm_irq_unregister(qm); -err_pci_uninit: - hisi_qm_pci_uninit(qm); -err_remove_uacce: +err_alloc_uacce: uacce_remove(qm->uacce); qm->uacce = NULL; +err_irq_register: + qm_irq_unregister(qm); +err_pci_init: + hisi_qm_pci_uninit(qm); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_init); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 54967c6b9c78..acefdf8b3a50 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -51,14 +51,6 @@ #define PEH_AXUSER_CFG 0x401001 #define PEH_AXUSER_CFG_ENABLE 0xffffffff -#define QM_DFX_MB_CNT_VF 0x104010 -#define QM_DFX_DB_CNT_VF 0x104020 -#define QM_DFX_SQE_CNT_VF_SQN 0x104030 -#define QM_DFX_CQE_CNT_VF_CQN 0x104040 -#define QM_DFX_QN_SHIFT 16 -#define CURRENT_FUN_MASK GENMASK(5, 0) -#define CURRENT_Q_MASK GENMASK(31, 16) - #define QM_AXI_RRESP BIT(0) #define QM_AXI_BRESP BIT(1) #define QM_ECC_MBIT BIT(2) @@ -72,10 +64,13 @@ #define QM_DB_TIMEOUT BIT(10) #define QM_OF_FIFO_OF BIT(11) #define QM_DB_RANDOM_INVALID BIT(12) +#define QM_MAILBOX_TIMEOUT BIT(13) +#define QM_FLR_TIMEOUT BIT(14) #define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \ QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \ - QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID) + QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \ + QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT) #define QM_BASE_CE QM_ECC_1BIT #define QM_Q_DEPTH 1024 @@ -123,6 +118,7 @@ enum qm_fun_type { }; enum qm_debug_file { + CURRENT_QM, CURRENT_Q, CLEAR_ENABLE, DEBUG_FILE_NUM, @@ -193,14 +189,14 @@ struct hisi_qm_err_ini { void (*open_axi_master_ooo)(struct hisi_qm *qm); void (*close_axi_master_ooo)(struct hisi_qm *qm); void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); - struct hisi_qm_err_info err_info; + void (*err_info_init)(struct hisi_qm *qm); }; struct hisi_qm_list { struct mutex lock; struct list_head list; - int (*register_to_crypto)(void); - void (*unregister_from_crypto)(void); + int (*register_to_crypto)(struct hisi_qm *qm); + void (*unregister_from_crypto)(struct hisi_qm *qm); }; struct hisi_qm { @@ -209,12 +205,15 @@ struct hisi_qm { const char *dev_name; struct pci_dev *pdev; void __iomem *io_base; + void __iomem *db_io_base; u32 sqe_size; u32 qp_base; u32 qp_num; u32 qp_in_used; u32 ctrl_qp_num; + u32 max_qp_num; u32 vfs_num; + u32 db_interval; struct list_head list; struct hisi_qm_list *qm_list; @@ -230,6 +229,7 @@ struct hisi_qm { struct hisi_qm_status status; const struct hisi_qm_err_ini *err_ini; + struct hisi_qm_err_info err_info; struct hisi_qm_err_status err_status; unsigned long misc_ctl; /* driver removing and reset sched */ @@ -252,8 +252,11 @@ struct hisi_qm { const char *algs; bool use_sva; bool is_frozen; + + /* doorbell isolation enable */ + bool use_db_isolation; resource_size_t phys_base; - resource_size_t phys_size; + resource_size_t db_phys_base; struct uacce_device *uacce; int mode; }; diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index 8ca945ac297e..0a3c8f019b02 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2016-2017 Hisilicon Limited. */ +/* Copyright (c) 2016-2017 HiSilicon Limited. */ #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index 91ee2bb575df..c8de1b51c843 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Driver for the Hisilicon SEC units found on Hip06 Hip07 + * Driver for the HiSilicon SEC units found on Hip06 Hip07 * - * Copyright (c) 2016-2017 Hisilicon Limited. + * Copyright (c) 2016-2017 HiSilicon Limited. */ #include <linux/acpi.h> #include <linux/atomic.h> @@ -233,7 +233,7 @@ static int sec_queue_map_io(struct sec_queue *queue) IORESOURCE_MEM, 2 + queue->queue_id); if (!res) { - dev_err(dev, "Failed to get queue %d memory resource\n", + dev_err(dev, "Failed to get queue %u memory resource\n", queue->queue_id); return -ENOMEM; } @@ -653,12 +653,12 @@ static int sec_queue_free(struct sec_queue *queue) struct sec_dev_info *info = queue->dev_info; if (queue->queue_id >= SEC_Q_NUM) { - dev_err(info->dev, "No queue %d\n", queue->queue_id); + dev_err(info->dev, "No queue %u\n", queue->queue_id); return -ENODEV; } if (!queue->in_use) { - dev_err(info->dev, "Queue %d is idle\n", queue->queue_id); + dev_err(info->dev, "Queue %u is idle\n", queue->queue_id); return -ENODEV; } @@ -834,6 +834,7 @@ int sec_queue_stop_release(struct sec_queue *queue) /** * sec_queue_empty() - Is this hardware queue currently empty. + * @queue: The queue to test * * We need to know if we have an empty queue for some of the chaining modes * as if it is not empty we may need to hold the message in a software queue @@ -1315,6 +1316,6 @@ static struct platform_driver sec_driver = { module_platform_driver(sec_driver); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Hisilicon Security Accelerators"); +MODULE_DESCRIPTION("HiSilicon Security Accelerators"); MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com"); MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>"); diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h index 4d9063a8b10b..179a8250d691 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.h +++ b/drivers/crypto/hisilicon/sec/sec_drv.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2016-2017 Hisilicon Limited. */ +/* Copyright (c) 2016-2017 HiSilicon Limited. */ #ifndef _SEC_DRV_H_ #define _SEC_DRV_H_ diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 08491912afd5..dfdce2f21e65 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -4,8 +4,6 @@ #ifndef __HISI_SEC_V2_H #define __HISI_SEC_V2_H -#include <linux/list.h> - #include "../qm.h" #include "sec_crypto.h" @@ -50,7 +48,7 @@ struct sec_req { int err_type; int req_id; - int flag; + u32 flag; /* Status of the SEC request */ bool fake_busy; @@ -139,6 +137,7 @@ struct sec_ctx { bool pbuf_supported; struct sec_cipher_ctx c_ctx; struct sec_auth_ctx a_ctx; + struct device *dev; }; enum sec_endian { @@ -148,7 +147,6 @@ enum sec_endian { }; enum sec_debug_file_index { - SEC_CURRENT_QM, SEC_CLEAR_ENABLE, SEC_DEBUG_FILE_NUM, }; @@ -183,6 +181,6 @@ struct sec_dev { void sec_destroy_qps(struct hisi_qp **qps, int qp_num); struct hisi_qp **sec_create_qps(void); -int sec_register_to_crypto(void); -void sec_unregister_from_crypto(void); +int sec_register_to_crypto(struct hisi_qm *qm); +void sec_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 2eaa516b3231..133aede8bf07 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -7,6 +7,7 @@ #include <crypto/des.h> #include <crypto/hash.h> #include <crypto/internal/aead.h> +#include <crypto/internal/des.h> #include <crypto/sha1.h> #include <crypto/sha2.h> #include <crypto/skcipher.h> @@ -43,7 +44,6 @@ #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) #define SEC_SGL_SGE_NR 128 -#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) #define SEC_CIPHER_AUTH 0xfe #define SEC_AUTH_CIPHER 0x1 #define SEC_MAX_MAC_LEN 64 @@ -96,7 +96,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) 0, QM_Q_DEPTH, GFP_ATOMIC); mutex_unlock(&qp_ctx->req_lock); if (unlikely(req_id < 0)) { - dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); + dev_err(req->ctx->dev, "alloc req id fail!\n"); return req_id; } @@ -112,7 +112,7 @@ static void sec_free_req_id(struct sec_req *req) int req_id = req->req_id; if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { - dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); + dev_err(req->ctx->dev, "free request id invalid!\n"); return; } @@ -138,7 +138,7 @@ static int sec_aead_verify(struct sec_req *req) aead_req->cryptlen + aead_req->assoclen - authsize); if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) { - dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n"); + dev_err(req->ctx->dev, "aead verify failure!\n"); return -EBADMSG; } @@ -177,7 +177,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) if (unlikely(req->err_type || done != SEC_SQE_DONE || (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) || (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) { - dev_err(SEC_CTX_DEV(ctx), + dev_err_ratelimited(ctx->dev, "err_type[%d],done[%d],flag[%d]\n", req->err_type, done, flag); err = -EIO; @@ -326,8 +326,8 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) static int sec_alg_resource_alloc(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); struct sec_alg_res *res = qp_ctx->res; + struct device *dev = ctx->dev; int ret; ret = sec_alloc_civ_resource(dev, res); @@ -360,7 +360,7 @@ alloc_fail: static void sec_alg_resource_free(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; sec_free_civ_resource(dev, qp_ctx->res); @@ -373,7 +373,7 @@ static void sec_alg_resource_free(struct sec_ctx *ctx, static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, int qp_ctx_id, int alg_type) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; struct sec_qp_ctx *qp_ctx; struct hisi_qp *qp; int ret = -ENOMEM; @@ -428,7 +428,7 @@ err_destroy_idr: static void sec_release_qp_ctx(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; hisi_qm_stop_qp(qp_ctx->qp); sec_alg_resource_free(ctx, qp_ctx); @@ -452,6 +452,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); ctx->sec = sec; + ctx->dev = &sec->qm.pdev->dev; ctx->hlf_q_num = sec->ctx_q_num >> 1; ctx->pbuf_supported = ctx->sec->iommu_used; @@ -476,11 +477,9 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) err_sec_release_qp_ctx: for (i = i - 1; i >= 0; i--) sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); - kfree(ctx->qp_ctx); err_destroy_qps: sec_destroy_qps(ctx->qps, sec->ctx_q_num); - return ret; } @@ -499,7 +498,7 @@ static int sec_cipher_init(struct sec_ctx *ctx) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; - c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL); if (!c_ctx->c_key) return -ENOMEM; @@ -512,7 +511,7 @@ static void sec_cipher_uninit(struct sec_ctx *ctx) struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key, c_ctx->c_key_dma); } @@ -520,7 +519,7 @@ static int sec_auth_init(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, &a_ctx->a_key_dma, GFP_KERNEL); if (!a_ctx->a_key) return -ENOMEM; @@ -533,7 +532,7 @@ static void sec_auth_uninit(struct sec_ctx *ctx) struct sec_auth_ctx *a_ctx = &ctx->a_ctx; memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, a_ctx->a_key, a_ctx->a_key_dma); } @@ -546,7 +545,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm) crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { - dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n"); + pr_err("get error skcipher iv size!\n"); return -EINVAL; } @@ -573,10 +572,18 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm) sec_ctx_base_uninit(ctx); } -static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx, +static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen, const enum sec_cmode c_mode) { + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + int ret; + + ret = verify_skcipher_des3_key(tfm, key); + if (ret) + return ret; + switch (keylen) { case SEC_DES3_2KEY_SIZE: c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; @@ -633,12 +640,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct device *dev = ctx->dev; int ret; if (c_mode == SEC_CMODE_XTS) { ret = xts_verify_key(tfm, key, keylen); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n"); + dev_err(dev, "xts mode key err!\n"); return ret; } } @@ -648,7 +656,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, switch (c_alg) { case SEC_CALG_3DES: - ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode); + ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode); break; case SEC_CALG_AES: case SEC_CALG_SM4: @@ -659,7 +667,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n"); + dev_err(dev, "set sec key err!\n"); return ret; } @@ -691,7 +699,7 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; @@ -701,21 +709,14 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, copy_size = c_req->c_len; pbuf_length = sg_copy_to_buffer(src, sg_nents(src), - qp_ctx->res[req_id].pbuf, - copy_size); - + qp_ctx->res[req_id].pbuf, + copy_size); if (unlikely(pbuf_length != copy_size)) { dev_err(dev, "copy src data to pbuf error!\n"); return -EINVAL; } c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma; - - if (!c_req->c_in_dma) { - dev_err(dev, "fail to set pbuffer address!\n"); - return -ENOMEM; - } - c_req->c_out_dma = c_req->c_in_dma; return 0; @@ -727,7 +728,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; @@ -739,7 +740,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf, copy_size); - if (unlikely(pbuf_length != copy_size)) dev_err(dev, "copy pbuf data to dst error!\n"); } @@ -751,7 +751,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, struct sec_aead_req *a_req = &req->aead_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct sec_alg_res *res = &qp_ctx->res[req->req_id]; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int ret; if (req->use_pbuf) { @@ -806,7 +806,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src, struct scatterlist *dst) { struct sec_cipher_req *c_req = &req->c_req; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; if (req->use_pbuf) { sec_cipher_pbuf_unmap(ctx, req, dst); @@ -891,6 +891,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, { struct sec_ctx *ctx = crypto_aead_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct device *dev = ctx->dev; struct crypto_authenc_keys keys; int ret; @@ -904,13 +905,13 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n"); + dev_err(dev, "set sec cipher key err!\n"); goto bad_key; } ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n"); + dev_err(dev, "set sec auth key err!\n"); goto bad_key; } @@ -1062,7 +1063,7 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, cryptlen - iv_size); if (unlikely(sz != iv_size)) - dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); + dev_err(req->ctx->dev, "copy output iv error!\n"); } static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, @@ -1160,7 +1161,7 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) ret = sec_skcipher_bd_fill(ctx, req); if (unlikely(ret)) { - dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n"); + dev_err(ctx->dev, "skcipher bd fill is error!\n"); return ret; } @@ -1194,7 +1195,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) a_req->assoclen); if (unlikely(sz != authsize)) { - dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n"); + dev_err(c->dev, "copy out mac err!\n"); err = -EINVAL; } } @@ -1259,7 +1260,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) ret = ctx->req_op->bd_send(ctx, req); if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { - dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); + dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); goto err_send_req; } @@ -1325,7 +1326,7 @@ static int sec_aead_init(struct crypto_aead *tfm) ctx->alg_type = SEC_AEAD; ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { - dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n"); + dev_err(ctx->dev, "get error aead iv size!\n"); return -EINVAL; } @@ -1374,7 +1375,7 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); if (IS_ERR(auth_ctx->hash_tfm)) { - dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n"); + dev_err(ctx->dev, "aead alloc shash error!\n"); sec_aead_exit(tfm); return PTR_ERR(auth_ctx->hash_tfm); } @@ -1405,10 +1406,40 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) return sec_aead_ctx_init(tfm, "sha512"); } + +static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx, + struct sec_req *sreq) +{ + u32 cryptlen = sreq->c_req.sk_req->cryptlen; + struct device *dev = ctx->dev; + u8 c_mode = ctx->c_ctx.c_mode; + int ret = 0; + + switch (c_mode) { + case SEC_CMODE_XTS: + if (unlikely(cryptlen < AES_BLOCK_SIZE)) { + dev_err(dev, "skcipher XTS mode input length error!\n"); + ret = -EINVAL; + } + break; + case SEC_CMODE_ECB: + case SEC_CMODE_CBC: + if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "skcipher AES input length error!\n"); + ret = -EINVAL; + } + break; + default: + ret = -EINVAL; + } + + return ret; +} + static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct skcipher_request *sk_req = sreq->c_req.sk_req; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; if (unlikely(!sk_req->src || !sk_req->dst)) { @@ -1429,12 +1460,9 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) } return 0; } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { - if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) { - dev_err(dev, "skcipher aes input length error!\n"); - return -EINVAL; - } - return 0; + return sec_skcipher_cryptlen_ckeck(ctx, sreq); } + dev_err(dev, "skcipher algorithm error!\n"); return -EINVAL; @@ -1531,14 +1559,15 @@ static struct skcipher_alg sec_skciphers[] = { static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) { - u8 c_alg = ctx->c_ctx.c_alg; struct aead_request *req = sreq->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t authsize = crypto_aead_authsize(tfm); + struct device *dev = ctx->dev; + u8 c_alg = ctx->c_ctx.c_alg; if (unlikely(!req->src || !req->dst || !req->cryptlen || req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n"); + dev_err(dev, "aead input param error!\n"); return -EINVAL; } @@ -1550,7 +1579,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) /* Support AES only */ if (unlikely(c_alg != SEC_CALG_AES)) { - dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n"); + dev_err(dev, "aead crypto alg error!\n"); return -EINVAL; } if (sreq->c_req.encrypt) @@ -1559,7 +1588,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) sreq->c_req.c_len = req->cryptlen - authsize; if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n"); + dev_err(dev, "aead crypto length error!\n"); return -EINVAL; } @@ -1634,7 +1663,7 @@ static struct aead_alg sec_aeads[] = { AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), }; -int sec_register_to_crypto(void) +int sec_register_to_crypto(struct hisi_qm *qm) { int ret; @@ -1651,7 +1680,7 @@ int sec_register_to_crypto(void) return ret; } -void sec_unregister_from_crypto(void) +void sec_unregister_from_crypto(struct hisi_qm *qm) { crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers)); diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index b2786e17d8fe..9c78edac56a4 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -64,7 +64,6 @@ enum sec_addr_type { }; struct sec_sqe_type2 { - /* * mac_len: 0~4 bits * a_key_len: 5~10 bits @@ -120,7 +119,6 @@ struct sec_sqe_type2 { /* c_pad_len_field: 0~1 bits */ __le16 c_pad_len_field; - __le64 long_a_data_len; __le64 a_ivin_addr; __le64 a_key_addr; @@ -211,6 +209,6 @@ struct sec_sqe { struct sec_sqe_type2 type2; }; -int sec_register_to_crypto(void); -void sec_unregister_from_crypto(void); +int sec_register_to_crypto(struct hisi_qm *qm); +void sec_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index dc68ba76f65e..6f0062d4408c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -19,7 +19,6 @@ #define SEC_VF_NUM 63 #define SEC_QUEUE_NUM_V1 4096 -#define SEC_QUEUE_NUM_V2 1024 #define SEC_PF_PCI_DEVICE_ID 0xa255 #define SEC_VF_PCI_DEVICE_ID 0xa256 @@ -35,18 +34,16 @@ #define SEC_CTX_Q_NUM_MAX 32 #define SEC_CTRL_CNT_CLR_CE 0x301120 -#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) -#define SEC_ENGINE_PF_CFG_OFF 0x300000 -#define SEC_ACC_COMMON_REG_OFF 0x1000 +#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) #define SEC_CORE_INT_SOURCE 0x301010 #define SEC_CORE_INT_MASK 0x301000 #define SEC_CORE_INT_STATUS 0x301008 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 -#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF) -#define SEC_ECC_ADDR(err) ((err) >> 0) +#define SEC_ECC_NUM 16 +#define SEC_ECC_MASH 0xFF #define SEC_CORE_INT_DISABLE 0x0 -#define SEC_CORE_INT_ENABLE 0x1ff -#define SEC_CORE_INT_CLEAR 0x1ff +#define SEC_CORE_INT_ENABLE 0x7c1ff +#define SEC_CORE_INT_CLEAR 0x7c1ff #define SEC_SAA_ENABLE 0x17f #define SEC_RAS_CE_REG 0x301050 @@ -54,24 +51,24 @@ #define SEC_RAS_NFE_REG 0x301058 #define SEC_RAS_CE_ENB_MSK 0x88 #define SEC_RAS_FE_ENB_MSK 0x0 -#define SEC_RAS_NFE_ENB_MSK 0x177 -#define SEC_RAS_DISABLE 0x0 -#define SEC_MEM_START_INIT_REG 0x0100 -#define SEC_MEM_INIT_DONE_REG 0x0104 +#define SEC_RAS_NFE_ENB_MSK 0x7c177 +#define SEC_RAS_DISABLE 0x0 +#define SEC_MEM_START_INIT_REG 0x301100 +#define SEC_MEM_INIT_DONE_REG 0x301104 -#define SEC_CONTROL_REG 0x0200 +#define SEC_CONTROL_REG 0x301200 #define SEC_TRNG_EN_SHIFT 8 #define SEC_CLK_GATE_ENABLE BIT(3) #define SEC_CLK_GATE_DISABLE (~BIT(3)) #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF -#define SEC_INTERFACE_USER_CTRL0_REG 0x0220 -#define SEC_INTERFACE_USER_CTRL1_REG 0x0224 -#define SEC_SAA_EN_REG 0x0270 -#define SEC_BD_ERR_CHK_EN_REG0 0x0380 -#define SEC_BD_ERR_CHK_EN_REG1 0x0384 -#define SEC_BD_ERR_CHK_EN_REG3 0x038c +#define SEC_INTERFACE_USER_CTRL0_REG 0x301220 +#define SEC_INTERFACE_USER_CTRL1_REG 0x301224 +#define SEC_SAA_EN_REG 0x301270 +#define SEC_BD_ERR_CHK_EN_REG0 0x301380 +#define SEC_BD_ERR_CHK_EN_REG1 0x301384 +#define SEC_BD_ERR_CHK_EN_REG3 0x30138c #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) @@ -95,9 +92,6 @@ #define SEC_SQE_MASK_OFFSET 64 #define SEC_SQE_MASK_LEN 48 -#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \ - SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF) - struct sec_hw_error { u32 int_msk; const char *msg; @@ -117,20 +111,66 @@ static struct hisi_qm_list sec_devices = { }; static const struct sec_hw_error sec_hw_errors[] = { - {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"}, - {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"}, - {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"}, - {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"}, - {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"}, - {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"}, - {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"}, - {.int_msk = BIT(7), .msg = "sec_bd_err_rint"}, - {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"}, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "sec_axi_rresp_err_rint" + }, + { + .int_msk = BIT(1), + .msg = "sec_axi_bresp_err_rint" + }, + { + .int_msk = BIT(2), + .msg = "sec_ecc_2bit_err_rint" + }, + { + .int_msk = BIT(3), + .msg = "sec_ecc_1bit_err_rint" + }, + { + .int_msk = BIT(4), + .msg = "sec_req_trng_timeout_rint" + }, + { + .int_msk = BIT(5), + .msg = "sec_fsm_hbeat_rint" + }, + { + .int_msk = BIT(6), + .msg = "sec_channel_req_rng_timeout_rint" + }, + { + .int_msk = BIT(7), + .msg = "sec_bd_err_rint" + }, + { + .int_msk = BIT(8), + .msg = "sec_chain_buff_err_rint" + }, + { + .int_msk = BIT(14), + .msg = "sec_no_secure_access" + }, + { + .int_msk = BIT(15), + .msg = "sec_wrapping_key_auth_err" + }, + { + .int_msk = BIT(16), + .msg = "sec_km_key_crc_fail" + }, + { + .int_msk = BIT(17), + .msg = "sec_axi_poison_err" + }, + { + .int_msk = BIT(18), + .msg = "sec_sva_err" + }, + {} }; static const char * const sec_dbg_file_name[] = { - [SEC_CURRENT_QM] = "current_qm", [SEC_CLEAR_ENABLE] = "clear_enable", }; @@ -277,9 +317,7 @@ static u8 sec_get_endian(struct hisi_qm *qm) "cannot access a register in VF!\n"); return SEC_LE; } - reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF + - SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG); - + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); /* BD little endian mode */ if (!(reg & BIT(0))) return SEC_LE; @@ -299,13 +337,13 @@ static int sec_engine_init(struct hisi_qm *qm) u32 reg; /* disable clock gate control */ - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); reg &= SEC_CLK_GATE_DISABLE; - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); - writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG)); + writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG); - ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG), + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG, reg, reg & 0x1, SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); if (ret) { @@ -313,40 +351,40 @@ static int sec_engine_init(struct hisi_qm *qm) return ret; } - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); reg |= (0x1 << SEC_TRNG_EN_SHIFT); - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); - reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); + reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); reg |= SEC_USER0_SMMU_NORMAL; - writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); + writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); - reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); reg &= SEC_USER1_SMMU_MASK; if (qm->use_sva && qm->ver == QM_HW_V2) reg |= SEC_USER1_SMMU_SVA; else reg |= SEC_USER1_SMMU_NORMAL; - writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); writel(SEC_SINGLE_PORT_MAX_TRANS, qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); - writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG)); + writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); /* Enable sm4 extra mode, as ctr/ecb */ writel_relaxed(SEC_BD_ERR_CHK_EN0, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0)); + qm->io_base + SEC_BD_ERR_CHK_EN_REG0); /* Enable sm4 xts mode multiple iv */ writel_relaxed(SEC_BD_ERR_CHK_EN1, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1)); + qm->io_base + SEC_BD_ERR_CHK_EN_REG1); writel_relaxed(SEC_BD_ERR_CHK_EN3, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3)); + qm->io_base + SEC_BD_ERR_CHK_EN_REG3); /* config endian */ - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); reg |= sec_get_endian(qm); - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); return 0; } @@ -381,10 +419,6 @@ static void sec_debug_regs_clear(struct hisi_qm *qm) { int i; - /* clear current_qm */ - writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); - writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); - /* clear sec dfx regs */ writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) @@ -406,7 +440,7 @@ static void sec_hw_error_enable(struct hisi_qm *qm) return; } - val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); + val = readl(qm->io_base + SEC_CONTROL_REG); /* clear SEC hw error source if having */ writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); @@ -422,14 +456,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm) /* enable SEC block master OOO when m-bit error occur */ val = val | SEC_AXI_SHUTDOWN_ENABLE; - writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel(val, qm->io_base + SEC_CONTROL_REG); } static void sec_hw_error_disable(struct hisi_qm *qm) { u32 val; - val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); + val = readl(qm->io_base + SEC_CONTROL_REG); /* disable RAS int */ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); @@ -442,51 +476,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm) /* disable SEC block master OOO when m-bit error occur */ val = val & SEC_AXI_SHUTDOWN_DISABLE; - writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); -} - -static u32 sec_current_qm_read(struct sec_debug_file *file) -{ - struct hisi_qm *qm = file->qm; - - return readl(qm->io_base + QM_DFX_MB_CNT_VF); -} - -static int sec_current_qm_write(struct sec_debug_file *file, u32 val) -{ - struct hisi_qm *qm = file->qm; - u32 vfq_num; - u32 tmp; - - if (val > qm->vfs_num) - return -EINVAL; - - /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ - if (!val) { - qm->debug.curr_qm_qp_num = qm->qp_num; - } else { - vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num; - - if (val == qm->vfs_num) - qm->debug.curr_qm_qp_num = - qm->ctrl_qp_num - qm->qp_num - - (qm->vfs_num - 1) * vfq_num; - else - qm->debug.curr_qm_qp_num = vfq_num; - } - - writel(val, qm->io_base + QM_DFX_MB_CNT_VF); - writel(val, qm->io_base + QM_DFX_DB_CNT_VF); - - tmp = val | - (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); - - tmp = val | - (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); - - return 0; + writel(val, qm->io_base + SEC_CONTROL_REG); } static u32 sec_clear_enable_read(struct sec_debug_file *file) @@ -523,9 +513,6 @@ static ssize_t sec_debug_read(struct file *filp, char __user *buf, spin_lock_irq(&file->lock); switch (file->index) { - case SEC_CURRENT_QM: - val = sec_current_qm_read(file); - break; case SEC_CLEAR_ENABLE: val = sec_clear_enable_read(file); break; @@ -566,11 +553,6 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf, spin_lock_irq(&file->lock); switch (file->index) { - case SEC_CURRENT_QM: - ret = sec_current_qm_write(file, val); - if (ret) - goto err_input; - break; case SEC_CLEAR_ENABLE: ret = sec_clear_enable_write(file, val); if (ret) @@ -655,7 +637,7 @@ static int sec_debug_init(struct hisi_qm *qm) int i; if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { - for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { + for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) { spin_lock_init(&sec->debug.files[i].lock); sec->debug.files[i].index = i; sec->debug.files[i].qm = qm; @@ -712,7 +694,8 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) err_val = readl(qm->io_base + SEC_CORE_SRAM_ECC_ERR_INFO); dev_err(dev, "multi ecc sram num=0x%x\n", - SEC_ECC_NUM(err_val)); + ((err_val) >> SEC_ECC_NUM) & + SEC_ECC_MASH); } } errs++; @@ -733,9 +716,23 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm) { u32 val; - val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); - writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); - writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); + val = readl(qm->io_base + SEC_CONTROL_REG); + writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG); + writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG); +} + +static void sec_err_info_init(struct hisi_qm *qm) +{ + struct hisi_qm_err_info *err_info = &qm->err_info; + + err_info->ce = QM_BASE_CE; + err_info->fe = 0; + err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; + err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK; + err_info->msi_wr_port = BIT(0); + err_info->acpi_rst = "SRST"; + err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | + QM_ACC_WB_NOT_READY_TIMEOUT; } static const struct hisi_qm_err_ini sec_err_ini = { @@ -746,16 +743,7 @@ static const struct hisi_qm_err_ini sec_err_ini = { .clear_dev_hw_err_status = sec_clear_hw_err_status, .log_dev_hw_err = sec_log_hw_error, .open_axi_master_ooo = sec_open_axi_master_ooo, - .err_info = { - .ce = QM_BASE_CE, - .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | - QM_ACC_WB_NOT_READY_TIMEOUT, - .fe = 0, - .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC, - .dev_ce_mask = SEC_RAS_CE_ENB_MSK, - .msi_wr_port = BIT(0), - .acpi_rst = "SRST", - } + .err_info_init = sec_err_info_init, }; static int sec_pf_probe_init(struct sec_dev *sec) @@ -763,12 +751,8 @@ static int sec_pf_probe_init(struct sec_dev *sec) struct hisi_qm *qm = &sec->qm; int ret; - if (qm->ver == QM_HW_V1) - qm->ctrl_qp_num = SEC_QUEUE_NUM_V1; - else - qm->ctrl_qp_num = SEC_QUEUE_NUM_V2; - qm->err_ini = &sec_err_ini; + qm->err_ini->err_info_init(qm); ret = sec_set_user_domain_and_cache(qm); if (ret) @@ -786,7 +770,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->pdev = pdev; qm->ver = pdev->revision; - qm->algs = "cipher\ndigest\naead\n"; + qm->algs = "cipher\ndigest\naead"; qm->mode = uacce_mode; qm->sqe_size = SEC_SQE_SIZE; qm->dev_name = sec_name; @@ -909,10 +893,15 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) pci_warn(pdev, "Failed to init debugfs!\n"); - ret = hisi_qm_alg_register(qm, &sec_devices); - if (ret < 0) { - pr_err("Failed to register driver to crypto.\n"); - goto err_qm_stop; + if (qm->qp_num >= ctx_q_num) { + ret = hisi_qm_alg_register(qm, &sec_devices); + if (ret < 0) { + pr_err("Failed to register driver to crypto.\n"); + goto err_qm_stop; + } + } else { + pci_warn(qm->pdev, + "Failed to use kernel mode, qp not enough!\n"); } if (qm->uacce) { @@ -948,7 +937,9 @@ static void sec_remove(struct pci_dev *pdev) struct hisi_qm *qm = pci_get_drvdata(pdev); hisi_qm_wait_task_finish(qm, &sec_devices); - hisi_qm_alg_unregister(qm, &sec_devices); + if (qm->qp_num >= ctx_q_num) + hisi_qm_alg_unregister(qm, &sec_devices); + if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, true); diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 3bff6394acaf..057273769f26 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -56,7 +56,7 @@ struct hisi_acc_sgl_pool { struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, u32 count, u32 sge_nr) { - u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0; + u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl; struct hisi_acc_sgl_pool *pool; struct mem_block *block; u32 i, j; @@ -66,6 +66,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, sgl_size = sizeof(struct acc_hw_sge) * sge_nr + sizeof(struct hisi_acc_hw_sgl); + + /* + * the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1), + * block size may exceed 2^31 on ia64, so the max of block size is 2^31 + */ block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ? PAGE_SHIFT + MAX_ORDER - 1 : 31); sgl_num_per_block = block_size / sgl_size; @@ -85,8 +90,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, block[i].sgl = dma_alloc_coherent(dev, block_size, &block[i].sgl_dma, GFP_KERNEL); - if (!block[i].sgl) + if (!block[i].sgl) { + dev_err(dev, "Fail to allocate hw SG buffer!\n"); goto err_free_mem; + } block[i].size = block_size; } @@ -95,8 +102,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size, &block[i].sgl_dma, GFP_KERNEL); - if (!block[i].sgl) + if (!block[i].sgl) { + dev_err(dev, "Fail to allocate remained hw SG buffer!\n"); goto err_free_mem; + } block[i].size = remain_sgl * sgl_size; } @@ -167,6 +176,7 @@ static void sg_map_to_hw_sg(struct scatterlist *sgl, { hw_sge->buf = sg_dma_address(sgl); hw_sge->len = cpu_to_le32(sg_dma_len(sgl)); + hw_sge->page_ctrl = sg_virt(sgl); } static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl) @@ -182,6 +192,18 @@ static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum) hw_sgl->entry_sum_in_chain = cpu_to_le16(sum); } +static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl) +{ + struct acc_hw_sge *hw_sge = hw_sgl->sge_entries; + int i; + + for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) { + hw_sge[i].page_ctrl = NULL; + hw_sge[i].buf = 0; + hw_sge[i].len = 0; + } +} + /** * hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl. * @dev: The device which hw sgl belongs to. @@ -211,16 +233,19 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, sg_n = sg_nents(sgl); sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); - if (!sg_n_mapped) + if (!sg_n_mapped) { + dev_err(dev, "DMA mapping for SG error!\n"); return ERR_PTR(-EINVAL); + } if (sg_n_mapped > pool->sge_nr) { - dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); + dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n"); return ERR_PTR(-EINVAL); } curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); if (IS_ERR(curr_hw_sgl)) { + dev_err(dev, "Get SGL error!\n"); dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); return ERR_PTR(-ENOMEM); @@ -256,7 +281,7 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, return; dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL); - + clear_hw_sgl_sge(hw_sgl); hw_sgl->entry_sum_in_chain = 0; hw_sgl->entry_sum_in_sgl = 0; hw_sgl->entry_length_in_sgl = 0; diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c index 29712685498a..829f2caf0f67 100644 --- a/drivers/crypto/hisilicon/trng/trng.c +++ b/drivers/crypto/hisilicon/trng/trng.c @@ -18,6 +18,8 @@ #define HISI_TRNG_REG 0x00F0 #define HISI_TRNG_BYTES 4 #define HISI_TRNG_QUALITY 512 +#define HISI_TRNG_VERSION 0x01B8 +#define HISI_TRNG_VER_V1 GENMASK(31, 0) #define SLEEP_US 10 #define TIMEOUT_US 10000 #define SW_DRBG_NUM_SHIFT 2 @@ -50,6 +52,7 @@ struct hisi_trng { struct hisi_trng_list *trng_list; struct list_head list; struct hwrng rng; + u32 ver; bool is_used; struct mutex mutex; }; @@ -260,6 +263,7 @@ static int hisi_trng_probe(struct platform_device *pdev) return PTR_ERR(trng->base); trng->is_used = false; + trng->ver = readl(trng->base + HISI_TRNG_VERSION); if (!trng_devices.is_init) { INIT_LIST_HEAD(&trng_devices.list); mutex_init(&trng_devices.lock); @@ -267,7 +271,8 @@ static int hisi_trng_probe(struct platform_device *pdev) } hisi_trng_add_to_list(trng); - if (atomic_inc_return(&trng_active_devs) == 1) { + if (trng->ver != HISI_TRNG_VER_V1 && + atomic_inc_return(&trng_active_devs) == 1) { ret = crypto_register_rng(&hisi_trng_alg); if (ret) { dev_err(&pdev->dev, @@ -289,7 +294,8 @@ static int hisi_trng_probe(struct platform_device *pdev) return ret; err_crypto_unregister: - if (atomic_dec_return(&trng_active_devs) == 0) + if (trng->ver != HISI_TRNG_VER_V1 && + atomic_dec_return(&trng_active_devs) == 0) crypto_unregister_rng(&hisi_trng_alg); err_remove_from_list: @@ -305,7 +311,8 @@ static int hisi_trng_remove(struct platform_device *pdev) while (hisi_trng_del_from_list(trng)) ; - if (atomic_dec_return(&trng_active_devs) == 0) + if (trng->ver != HISI_TRNG_VER_V1 && + atomic_dec_return(&trng_active_devs) == 0) crypto_unregister_rng(&hisi_trng_alg); return 0; diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 92397f993e23..517fdbdff3ea 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -33,35 +33,55 @@ struct hisi_zip_sqe { u32 consumed; u32 produced; u32 comp_data_length; + /* + * status: 0~7 bits + * rsvd: 8~31 bits + */ u32 dw3; u32 input_data_length; - u32 lba_l; - u32 lba_h; + u32 dw5; + u32 dw6; + /* + * in_sge_data_offset: 0~23 bits + * rsvd: 24~27 bits + * sqe_type: 29~31 bits + */ u32 dw7; + /* + * out_sge_data_offset: 0~23 bits + * rsvd: 24~31 bits + */ u32 dw8; + /* + * request_type: 0~7 bits + * buffer_type: 8~11 bits + * rsvd: 13~31 bits + */ u32 dw9; u32 dw10; - u32 priv_info; + u32 dw11; u32 dw12; - u32 tag; + /* tag: in sqe type 0 */ + u32 dw13; u32 dest_avail_out; - u32 rsvd0; - u32 comp_head_addr_l; - u32 comp_head_addr_h; + u32 dw15; + u32 dw16; + u32 dw17; u32 source_addr_l; u32 source_addr_h; u32 dest_addr_l; u32 dest_addr_h; - u32 stream_ctx_addr_l; - u32 stream_ctx_addr_h; - u32 cipher_key1_addr_l; - u32 cipher_key1_addr_h; - u32 cipher_key2_addr_l; - u32 cipher_key2_addr_h; + u32 dw22; + u32 dw23; + u32 dw24; + u32 dw25; + /* tag: in sqe type 3 */ + u32 dw26; + u32 dw27; u32 rsvd1[4]; }; int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node); -int hisi_zip_register_to_crypto(void); -void hisi_zip_unregister_from_crypto(void); +int hisi_zip_register_to_crypto(struct hisi_qm *qm); +void hisi_zip_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 08b4660b014c..9520a4113c81 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -10,6 +10,7 @@ #define HZIP_BD_STATUS_M GENMASK(7, 0) /* hisi_zip_sqe dw7 */ #define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0) +#define HZIP_SQE_TYPE_M GENMASK(31, 28) /* hisi_zip_sqe dw8 */ #define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0) /* hisi_zip_sqe dw9 */ @@ -91,8 +92,22 @@ struct hisi_zip_qp_ctx { struct hisi_zip_ctx *ctx; }; +struct hisi_zip_sqe_ops { + u8 sqe_type; + void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); + void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); + void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type); + void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type); + void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); + void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type); + u32 (*get_tag)(struct hisi_zip_sqe *sqe); + u32 (*get_status)(struct hisi_zip_sqe *sqe); + u32 (*get_dstlen)(struct hisi_zip_sqe *sqe); +}; + struct hisi_zip_ctx { struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM]; + const struct hisi_zip_sqe_ops *ops; }; static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp) @@ -119,35 +134,367 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444); MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)"); -static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) +static u16 get_extra_field_size(const u8 *start) +{ + return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; +} + +static u32 get_name_field_size(const u8 *start) +{ + return strlen(start) + 1; +} + +static u32 get_comment_field_size(const u8 *start) +{ + return strlen(start) + 1; +} + +static u32 __get_gzip_head_size(const u8 *src) +{ + u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT); + u32 size = GZIP_HEAD_FEXTRA_SHIFT; + + if (head_flg & GZIP_HEAD_FEXTRA_BIT) + size += get_extra_field_size(src + size); + if (head_flg & GZIP_HEAD_FNAME_BIT) + size += get_name_field_size(src + size); + if (head_flg & GZIP_HEAD_FCOMMENT_BIT) + size += get_comment_field_size(src + size); + if (head_flg & GZIP_HEAD_FHCRC_BIT) + size += GZIP_HEAD_FHCRC_SIZE; + + return size; +} + +static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl) +{ + char buf[HZIP_GZIP_HEAD_BUF]; + + sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf)); + + return __get_gzip_head_size(buf); +} + +static int add_comp_head(struct scatterlist *dst, u8 req_type) +{ + int head_size = TO_HEAD_SIZE(req_type); + const u8 *head = TO_HEAD(req_type); + int ret; + + ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); + if (ret != head_size) { + pr_err("the head size of buffer is wrong (%d)!\n", ret); + return -ENOMEM; + } + + return head_size; +} + +static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type) +{ + if (!acomp_req->src || !acomp_req->slen) + return -EINVAL; + + if (req_type == HZIP_ALG_TYPE_GZIP && + acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT) + return -EINVAL; + + switch (req_type) { + case HZIP_ALG_TYPE_ZLIB: + return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB); + case HZIP_ALG_TYPE_GZIP: + return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP); + default: + pr_err("request type does not support!\n"); + return -EINVAL; + } +} + +static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, + struct hisi_zip_qp_ctx *qp_ctx, + size_t head_size, bool is_comp) +{ + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + struct hisi_zip_req *q = req_q->q; + struct hisi_zip_req *req_cache; + int req_id; + + write_lock(&req_q->req_lock); + + req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size); + if (req_id >= req_q->size) { + write_unlock(&req_q->req_lock); + dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); + return ERR_PTR(-EAGAIN); + } + set_bit(req_id, req_q->req_bitmap); + + req_cache = q + req_id; + req_cache->req_id = req_id; + req_cache->req = req; + + if (is_comp) { + req_cache->sskip = 0; + req_cache->dskip = head_size; + } else { + req_cache->sskip = head_size; + req_cache->dskip = 0; + } + + write_unlock(&req_q->req_lock); + + return req_cache; +} + +static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, + struct hisi_zip_req *req) +{ + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + + write_lock(&req_q->req_lock); + clear_bit(req->req_id, req_q->req_bitmap); + memset(req, 0, sizeof(struct hisi_zip_req)); + write_unlock(&req_q->req_lock); +} + +static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) +{ + sqe->source_addr_l = lower_32_bits(req->dma_src); + sqe->source_addr_h = upper_32_bits(req->dma_src); + sqe->dest_addr_l = lower_32_bits(req->dma_dst); + sqe->dest_addr_h = upper_32_bits(req->dma_dst); +} + +static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) +{ + struct acomp_req *a_req = req->req; + + sqe->input_data_length = a_req->slen - req->sskip; + sqe->dest_avail_out = a_req->dlen - req->dskip; + sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, req->sskip); + sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, req->dskip); +} + +static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) { u32 val; - val = (sqe->dw9) & ~HZIP_BUF_TYPE_M; + val = sqe->dw9 & ~HZIP_BUF_TYPE_M; val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type); sqe->dw9 = val; } -static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag) +static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type) { - sqe->tag = tag; + u32 val; + + val = sqe->dw9 & ~HZIP_REQ_TYPE_M; + val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type); + sqe->dw9 = val; } -static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type, - dma_addr_t s_addr, dma_addr_t d_addr, u32 slen, - u32 dlen, u32 sskip, u32 dskip) +static void hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) { + sqe->dw13 = req->req_id; +} + +static void hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) +{ + sqe->dw26 = req->req_id; +} + +static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type) +{ + u32 val; + + val = sqe->dw7 & ~HZIP_SQE_TYPE_M; + val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type); + sqe->dw7 = val; +} + +static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe, + u8 req_type, struct hisi_zip_req *req) +{ + const struct hisi_zip_sqe_ops *ops = ctx->ops; + memset(sqe, 0, sizeof(struct hisi_zip_sqe)); - sqe->input_data_length = slen - sskip; - sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip); - sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip); - sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type); - sqe->dest_avail_out = dlen - dskip; - sqe->source_addr_l = lower_32_bits(s_addr); - sqe->source_addr_h = upper_32_bits(s_addr); - sqe->dest_addr_l = lower_32_bits(d_addr); - sqe->dest_addr_h = upper_32_bits(d_addr); + ops->fill_addr(sqe, req); + ops->fill_buf_size(sqe, req); + ops->fill_buf_type(sqe, HZIP_SGL); + ops->fill_req_type(sqe, req_type); + ops->fill_tag(sqe, req); + ops->fill_sqe_type(sqe, ops->sqe_type); +} + +static int hisi_zip_do_work(struct hisi_zip_req *req, + struct hisi_zip_qp_ctx *qp_ctx) +{ + struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; + struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; + struct acomp_req *a_req = req->req; + struct hisi_qp *qp = qp_ctx->qp; + struct device *dev = &qp->qm->pdev->dev; + struct hisi_zip_sqe zip_sqe; + int ret; + + if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen) + return -EINVAL; + + req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool, + req->req_id << 1, &req->dma_src); + if (IS_ERR(req->hw_src)) { + dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n", + PTR_ERR(req->hw_src)); + return PTR_ERR(req->hw_src); + } + + req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool, + (req->req_id << 1) + 1, + &req->dma_dst); + if (IS_ERR(req->hw_dst)) { + ret = PTR_ERR(req->hw_dst); + dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n", + ret); + goto err_unmap_input; + } + + hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req); + + /* send command to start a task */ + atomic64_inc(&dfx->send_cnt); + ret = hisi_qp_send(qp, &zip_sqe); + if (ret < 0) { + atomic64_inc(&dfx->send_busy_cnt); + ret = -EAGAIN; + dev_dbg_ratelimited(dev, "failed to send request!\n"); + goto err_unmap_output; + } + + return -EINPROGRESS; + +err_unmap_output: + hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst); +err_unmap_input: + hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src); + return ret; +} + +static u32 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe) +{ + return sqe->dw13; +} + +static u32 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe) +{ + return sqe->dw26; +} + +static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe) +{ + return sqe->dw3 & HZIP_BD_STATUS_M; +} + +static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe) +{ + return sqe->produced; +} + +static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) +{ + struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx; + const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops; + struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + struct device *dev = &qp->qm->pdev->dev; + struct hisi_zip_sqe *sqe = data; + u32 tag = ops->get_tag(sqe); + struct hisi_zip_req *req = req_q->q + tag; + struct acomp_req *acomp_req = req->req; + u32 status, dlen, head_size; + int err = 0; + + atomic64_inc(&dfx->recv_cnt); + status = ops->get_status(sqe); + if (status != 0 && status != HZIP_NC_ERR) { + dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", + (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, + sqe->produced); + atomic64_inc(&dfx->err_bd_cnt); + err = -EIO; + } + + dlen = ops->get_dstlen(sqe); + + hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src); + hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst); + + head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0; + acomp_req->dlen = dlen + head_size; + + if (acomp_req->base.complete) + acomp_request_complete(acomp_req, err); + + hisi_zip_remove_req(qp_ctx, req); +} + +static int hisi_zip_acompress(struct acomp_req *acomp_req) +{ + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); + struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP]; + struct device *dev = &qp_ctx->qp->qm->pdev->dev; + struct hisi_zip_req *req; + int head_size; + int ret; + + /* let's output compression head now */ + head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); + if (head_size < 0) { + dev_err_ratelimited(dev, "failed to add comp head (%d)!\n", + head_size); + return head_size; + } + + req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = hisi_zip_do_work(req, qp_ctx); + if (ret != -EINPROGRESS) { + dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret); + hisi_zip_remove_req(qp_ctx, req); + } + + return ret; +} + +static int hisi_zip_adecompress(struct acomp_req *acomp_req) +{ + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); + struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP]; + struct device *dev = &qp_ctx->qp->qm->pdev->dev; + struct hisi_zip_req *req; + int head_size, ret; + + head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type); + if (head_size < 0) { + dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n", + head_size); + return head_size; + } + + req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = hisi_zip_do_work(req, qp_ctx); + if (ret != -EINPROGRESS) { + dev_info_ratelimited(dev, "failed to do decompress (%d)!\n", + ret); + hisi_zip_remove_req(qp_ctx, req); + } + + return ret; } static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, @@ -177,9 +524,36 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) hisi_qm_release_qp(ctx->qp); } +static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = { + .sqe_type = 0, + .fill_addr = hisi_zip_fill_addr, + .fill_buf_size = hisi_zip_fill_buf_size, + .fill_buf_type = hisi_zip_fill_buf_type, + .fill_req_type = hisi_zip_fill_req_type, + .fill_tag = hisi_zip_fill_tag_v1, + .fill_sqe_type = hisi_zip_fill_sqe_type, + .get_tag = hisi_zip_get_tag_v1, + .get_status = hisi_zip_get_status, + .get_dstlen = hisi_zip_get_dstlen, +}; + +static const struct hisi_zip_sqe_ops hisi_zip_ops_v2 = { + .sqe_type = 0x3, + .fill_addr = hisi_zip_fill_addr, + .fill_buf_size = hisi_zip_fill_buf_size, + .fill_buf_type = hisi_zip_fill_buf_type, + .fill_req_type = hisi_zip_fill_req_type, + .fill_tag = hisi_zip_fill_tag_v2, + .fill_sqe_type = hisi_zip_fill_sqe_type, + .get_tag = hisi_zip_get_tag_v2, + .get_status = hisi_zip_get_status, + .get_dstlen = hisi_zip_get_dstlen, +}; + static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node) { struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL }; + struct hisi_zip_qp_ctx *qp_ctx; struct hisi_zip *hisi_zip; int ret, i, j; @@ -193,8 +567,9 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int for (i = 0; i < HZIP_CTX_Q_NUM; i++) { /* alg_type = 0 for compress, 1 for decompress in hw sqe */ - ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i, - req_type); + qp_ctx = &hisi_zip_ctx->qp_ctx[i]; + qp_ctx->ctx = hisi_zip_ctx; + ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type); if (ret) { for (j = i - 1; j >= 0; j--) hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp); @@ -203,9 +578,14 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int return ret; } - hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip; + qp_ctx->zip_dev = hisi_zip; } + if (hisi_zip->qm.ver < QM_HW_V3) + hisi_zip_ctx->ops = &hisi_zip_ops_v1; + else + hisi_zip_ctx->ops = &hisi_zip_ops_v2; + return 0; } @@ -217,38 +597,6 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx) hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]); } -static u16 get_extra_field_size(const u8 *start) -{ - return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; -} - -static u32 get_name_field_size(const u8 *start) -{ - return strlen(start) + 1; -} - -static u32 get_comment_field_size(const u8 *start) -{ - return strlen(start) + 1; -} - -static u32 __get_gzip_head_size(const u8 *src) -{ - u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT); - u32 size = GZIP_HEAD_FEXTRA_SHIFT; - - if (head_flg & GZIP_HEAD_FEXTRA_BIT) - size += get_extra_field_size(src + size); - if (head_flg & GZIP_HEAD_FNAME_BIT) - size += get_name_field_size(src + size); - if (head_flg & GZIP_HEAD_FCOMMENT_BIT) - size += get_comment_field_size(src + size); - if (head_flg & GZIP_HEAD_FHCRC_BIT) - size += GZIP_HEAD_FHCRC_SIZE; - - return size; -} - static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) { struct hisi_zip_req_q *req_q; @@ -336,52 +684,6 @@ static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx) ctx->qp_ctx[i].sgl_pool); } -static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, - struct hisi_zip_req *req) -{ - struct hisi_zip_req_q *req_q = &qp_ctx->req_q; - - write_lock(&req_q->req_lock); - clear_bit(req->req_id, req_q->req_bitmap); - memset(req, 0, sizeof(struct hisi_zip_req)); - write_unlock(&req_q->req_lock); -} - -static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) -{ - struct hisi_zip_sqe *sqe = data; - struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx; - struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; - struct hisi_zip_req_q *req_q = &qp_ctx->req_q; - struct hisi_zip_req *req = req_q->q + sqe->tag; - struct acomp_req *acomp_req = req->req; - struct device *dev = &qp->qm->pdev->dev; - u32 status, dlen, head_size; - int err = 0; - - atomic64_inc(&dfx->recv_cnt); - status = sqe->dw3 & HZIP_BD_STATUS_M; - if (status != 0 && status != HZIP_NC_ERR) { - dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", - (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, - sqe->produced); - atomic64_inc(&dfx->err_bd_cnt); - err = -EIO; - } - dlen = sqe->produced; - - hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src); - hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst); - - head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0; - acomp_req->dlen = dlen + head_size; - - if (acomp_req->base.complete) - acomp_request_complete(acomp_req, err); - - hisi_zip_remove_req(qp_ctx, req); -} - static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx, void (*fn)(struct hisi_qp *, void *)) { @@ -439,204 +741,6 @@ static void hisi_zip_acomp_exit(struct crypto_acomp *tfm) hisi_zip_ctx_exit(ctx); } -static int add_comp_head(struct scatterlist *dst, u8 req_type) -{ - int head_size = TO_HEAD_SIZE(req_type); - const u8 *head = TO_HEAD(req_type); - int ret; - - ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); - if (ret != head_size) { - pr_err("the head size of buffer is wrong (%d)!\n", ret); - return -ENOMEM; - } - - return head_size; -} - -static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl) -{ - char buf[HZIP_GZIP_HEAD_BUF]; - - sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf)); - - return __get_gzip_head_size(buf); -} - -static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type) -{ - if (!acomp_req->src || !acomp_req->slen) - return -EINVAL; - - if ((req_type == HZIP_ALG_TYPE_GZIP) && - (acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)) - return -EINVAL; - - switch (req_type) { - case HZIP_ALG_TYPE_ZLIB: - return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB); - case HZIP_ALG_TYPE_GZIP: - return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP); - default: - pr_err("request type does not support!\n"); - return -EINVAL; - } -} - -static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, - struct hisi_zip_qp_ctx *qp_ctx, - size_t head_size, bool is_comp) -{ - struct hisi_zip_req_q *req_q = &qp_ctx->req_q; - struct hisi_zip_req *q = req_q->q; - struct hisi_zip_req *req_cache; - int req_id; - - write_lock(&req_q->req_lock); - - req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size); - if (req_id >= req_q->size) { - write_unlock(&req_q->req_lock); - dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); - return ERR_PTR(-EAGAIN); - } - set_bit(req_id, req_q->req_bitmap); - - req_cache = q + req_id; - req_cache->req_id = req_id; - req_cache->req = req; - - if (is_comp) { - req_cache->sskip = 0; - req_cache->dskip = head_size; - } else { - req_cache->sskip = head_size; - req_cache->dskip = 0; - } - - write_unlock(&req_q->req_lock); - - return req_cache; -} - -static int hisi_zip_do_work(struct hisi_zip_req *req, - struct hisi_zip_qp_ctx *qp_ctx) -{ - struct acomp_req *a_req = req->req; - struct hisi_qp *qp = qp_ctx->qp; - struct device *dev = &qp->qm->pdev->dev; - struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; - struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; - struct hisi_zip_sqe zip_sqe; - dma_addr_t input, output; - int ret; - - if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen) - return -EINVAL; - - req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool, - req->req_id << 1, &input); - if (IS_ERR(req->hw_src)) { - dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n", - PTR_ERR(req->hw_src)); - return PTR_ERR(req->hw_src); - } - req->dma_src = input; - - req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool, - (req->req_id << 1) + 1, - &output); - if (IS_ERR(req->hw_dst)) { - ret = PTR_ERR(req->hw_dst); - dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n", - ret); - goto err_unmap_input; - } - req->dma_dst = output; - - hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen, - a_req->dlen, req->sskip, req->dskip); - hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL); - hisi_zip_config_tag(&zip_sqe, req->req_id); - - /* send command to start a task */ - atomic64_inc(&dfx->send_cnt); - ret = hisi_qp_send(qp, &zip_sqe); - if (ret < 0) { - atomic64_inc(&dfx->send_busy_cnt); - ret = -EAGAIN; - dev_dbg_ratelimited(dev, "failed to send request!\n"); - goto err_unmap_output; - } - - return -EINPROGRESS; - -err_unmap_output: - hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst); -err_unmap_input: - hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src); - return ret; -} - -static int hisi_zip_acompress(struct acomp_req *acomp_req) -{ - struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); - struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP]; - struct device *dev = &qp_ctx->qp->qm->pdev->dev; - struct hisi_zip_req *req; - int head_size; - int ret; - - /* let's output compression head now */ - head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); - if (head_size < 0) { - dev_err_ratelimited(dev, "failed to add comp head (%d)!\n", - head_size); - return head_size; - } - - req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true); - if (IS_ERR(req)) - return PTR_ERR(req); - - ret = hisi_zip_do_work(req, qp_ctx); - if (ret != -EINPROGRESS) { - dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret); - hisi_zip_remove_req(qp_ctx, req); - } - - return ret; -} - -static int hisi_zip_adecompress(struct acomp_req *acomp_req) -{ - struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); - struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP]; - struct device *dev = &qp_ctx->qp->qm->pdev->dev; - struct hisi_zip_req *req; - int head_size, ret; - - head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type); - if (head_size < 0) { - dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n", - head_size); - return head_size; - } - - req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false); - if (IS_ERR(req)) - return PTR_ERR(req); - - ret = hisi_zip_do_work(req, qp_ctx); - if (ret != -EINPROGRESS) { - dev_info_ratelimited(dev, "failed to do decompress (%d)!\n", - ret); - hisi_zip_remove_req(qp_ctx, req); - } - - return ret; -} - static struct acomp_alg hisi_zip_acomp_zlib = { .init = hisi_zip_acomp_init, .exit = hisi_zip_acomp_exit, @@ -665,7 +769,7 @@ static struct acomp_alg hisi_zip_acomp_gzip = { } }; -int hisi_zip_register_to_crypto(void) +int hisi_zip_register_to_crypto(struct hisi_qm *qm) { int ret; @@ -684,7 +788,7 @@ int hisi_zip_register_to_crypto(void) return ret; } -void hisi_zip_unregister_from_crypto(void) +void hisi_zip_unregister_from_crypto(struct hisi_qm *qm) { crypto_unregister_acomp(&hisi_zip_acomp_gzip); crypto_unregister_acomp(&hisi_zip_acomp_zlib); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 02c445722445..2178b40e9f82 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -18,7 +18,6 @@ #define PCI_DEVICE_ID_ZIP_VF 0xa251 #define HZIP_QUEUE_NUM_V1 4096 -#define HZIP_QUEUE_NUM_V2 1024 #define HZIP_CLOCK_GATE_CTRL 0x301004 #define COMP0_ENABLE BIT(0) @@ -69,10 +68,10 @@ #define HZIP_CORE_INT_RAS_CE_ENABLE 0x1 #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164 #define HZIP_CORE_INT_RAS_FE_ENB 0x301168 -#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE +#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24 -#define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0) +#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0) #define HZIP_COMP_CORE_NUM 2 #define HZIP_DECOMP_CORE_NUM 6 #define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \ @@ -134,17 +133,17 @@ static const struct hisi_zip_hw_error zip_hw_error[] = { { .int_msk = BIT(8), .msg = "zip_com_inf_err" }, { .int_msk = BIT(9), .msg = "zip_enc_inf_err" }, { .int_msk = BIT(10), .msg = "zip_pre_out_err" }, + { .int_msk = BIT(11), .msg = "zip_axi_poison_err" }, + { .int_msk = BIT(12), .msg = "zip_sva_err" }, { /* sentinel */ } }; enum ctrl_debug_file_index { - HZIP_CURRENT_QM, HZIP_CLEAR_ENABLE, HZIP_DEBUG_FILE_NUM, }; static const char * const ctrl_debug_file_name[] = { - [HZIP_CURRENT_QM] = "current_qm", [HZIP_CLEAR_ENABLE] = "clear_enable", }; @@ -363,48 +362,6 @@ static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) return &hisi_zip->qm; } -static u32 current_qm_read(struct ctrl_debug_file *file) -{ - struct hisi_qm *qm = file_to_qm(file); - - return readl(qm->io_base + QM_DFX_MB_CNT_VF); -} - -static int current_qm_write(struct ctrl_debug_file *file, u32 val) -{ - struct hisi_qm *qm = file_to_qm(file); - u32 vfq_num; - u32 tmp; - - if (val > qm->vfs_num) - return -EINVAL; - - /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ - if (val == 0) { - qm->debug.curr_qm_qp_num = qm->qp_num; - } else { - vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num; - if (val == qm->vfs_num) - qm->debug.curr_qm_qp_num = qm->ctrl_qp_num - - qm->qp_num - (qm->vfs_num - 1) * vfq_num; - else - qm->debug.curr_qm_qp_num = vfq_num; - } - - writel(val, qm->io_base + QM_DFX_MB_CNT_VF); - writel(val, qm->io_base + QM_DFX_DB_CNT_VF); - - tmp = val | - (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); - - tmp = val | - (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); - - return 0; -} - static u32 clear_enable_read(struct ctrl_debug_file *file) { struct hisi_qm *qm = file_to_qm(file); @@ -438,9 +395,6 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf, spin_lock_irq(&file->lock); switch (file->index) { - case HZIP_CURRENT_QM: - val = current_qm_read(file); - break; case HZIP_CLEAR_ENABLE: val = clear_enable_read(file); break; @@ -478,11 +432,6 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp, spin_lock_irq(&file->lock); switch (file->index) { - case HZIP_CURRENT_QM: - ret = current_qm_write(file, val); - if (ret) - goto err_input; - break; case HZIP_CLEAR_ENABLE: ret = clear_enable_write(file, val); if (ret) @@ -580,7 +529,7 @@ static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm) struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); int i; - for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) { + for (i = HZIP_CLEAR_ENABLE; i < HZIP_DEBUG_FILE_NUM; i++) { spin_lock_init(&zip->ctrl->files[i].lock); zip->ctrl->files[i].ctrl = zip->ctrl; zip->ctrl->files[i].index = i; @@ -627,10 +576,6 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm) { int i, j; - /* clear current_qm */ - writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); - writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); - /* enable register read_clear bit */ writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); for (i = 0; i < ARRAY_SIZE(core_offsets); i++) @@ -714,6 +659,22 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm) qm->io_base + HZIP_CORE_INT_SET); } +static void hisi_zip_err_info_init(struct hisi_qm *qm) +{ + struct hisi_qm_err_info *err_info = &qm->err_info; + + err_info->ce = QM_BASE_CE; + err_info->fe = 0; + err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; + err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE; + err_info->msi_wr_port = HZIP_WR_PORT; + err_info->acpi_rst = "ZRST"; + err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT; + + if (qm->ver >= QM_HW_V3) + err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT; +} + static const struct hisi_qm_err_ini hisi_zip_err_ini = { .hw_init = hisi_zip_set_user_domain_and_cache, .hw_err_enable = hisi_zip_hw_error_enable, @@ -723,16 +684,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .log_dev_hw_err = hisi_zip_log_hw_error, .open_axi_master_ooo = hisi_zip_open_axi_master_ooo, .close_axi_master_ooo = hisi_zip_close_axi_master_ooo, - .err_info = { - .ce = QM_BASE_CE, - .nfe = QM_BASE_NFE | - QM_ACC_WB_NOT_READY_TIMEOUT, - .fe = 0, - .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC, - .dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE, - .msi_wr_port = HZIP_WR_PORT, - .acpi_rst = "ZRST", - } + .err_info_init = hisi_zip_err_info_init, }; static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) @@ -746,13 +698,8 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) hisi_zip->ctrl = ctrl; ctrl->hisi_zip = hisi_zip; - - if (qm->ver == QM_HW_V1) - qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1; - else - qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2; - qm->err_ini = &hisi_zip_err_ini; + qm->err_ini->err_info_init(qm); hisi_zip_set_user_domain_and_cache(qm); hisi_qm_dev_err_init(qm); diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index e813115d5432..aa4c7b2af3e2 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -963,8 +963,6 @@ static int img_hash_probe(struct platform_device *pdev) hdev->io_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hdev->io_base)) { err = PTR_ERR(hdev->io_base); - dev_err(dev, "can't ioremap, returned %d\n", err); - goto res_err; } @@ -972,7 +970,6 @@ static int img_hash_probe(struct platform_device *pdev) hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); hdev->cpu_addr = devm_ioremap_resource(dev, hash_res); if (IS_ERR(hdev->cpu_addr)) { - dev_err(dev, "can't ioremap write port\n"); err = PTR_ERR(hdev->cpu_addr); goto res_err; } diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 6364583b88b2..9ff885d50edf 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -688,7 +688,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) /* Leave the DSE threads reset state */ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); - /* Configure the procesing engine thresholds */ + /* Configure the processing engine thresholds */ writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) | EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi), EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe)); diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 8b0f17fc09fb..0616e369522e 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -265,7 +265,7 @@ static int setup_crypt_desc(void) return 0; } -static spinlock_t desc_lock; +static DEFINE_SPINLOCK(desc_lock); static struct crypt_ctl *get_crypt_desc(void) { int i; @@ -293,7 +293,7 @@ static struct crypt_ctl *get_crypt_desc(void) } } -static spinlock_t emerg_lock; +static DEFINE_SPINLOCK(emerg_lock); static struct crypt_ctl *get_crypt_desc_emerg(void) { int i; @@ -1379,9 +1379,6 @@ static int __init ixp_module_init(void) if (IS_ERR(pdev)) return PTR_ERR(pdev); - spin_lock_init(&desc_lock); - spin_lock_init(&emerg_lock); - err = init_ixp_crypto(&pdev->dev); if (err) { platform_device_unregister(pdev); diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c index b6b25d994af3..e2a39fdaf623 100644 --- a/drivers/crypto/keembay/keembay-ocs-aes-core.c +++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c @@ -1623,10 +1623,8 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev) } aes_dev->base_reg = devm_ioremap_resource(&pdev->dev, aes_mem); - if (IS_ERR(aes_dev->base_reg)) { - dev_err(dev, "Failed to get base address\n"); + if (IS_ERR(aes_dev->base_reg)) return PTR_ERR(aes_dev->base_reg); - } /* Get and request IRQ */ aes_dev->irq = platform_get_irq(pdev, 0); @@ -1649,8 +1647,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev) /* Initialize crypto engine */ aes_dev->engine = crypto_engine_alloc_init(dev, true); - if (!aes_dev->engine) + if (!aes_dev->engine) { + rc = -ENOMEM; goto list_del; + } rc = crypto_engine_start(aes_dev->engine); if (rc) { diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c index c4b97b4160e9..0379dbf32a4c 100644 --- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c @@ -1192,10 +1192,8 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev) } hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem); - if (IS_ERR(hcu_dev->io_base)) { - dev_err(dev, "Could not io-remap mem resource.\n"); + if (IS_ERR(hcu_dev->io_base)) return PTR_ERR(hcu_dev->io_base); - } init_completion(&hcu_dev->irq_done); @@ -1220,8 +1218,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev) /* Initialize crypto engine */ hcu_dev->engine = crypto_engine_alloc_init(dev, 1); - if (!hcu_dev->engine) + if (!hcu_dev->engine) { + rc = -ENOMEM; goto list_del; + } rc = crypto_engine_start(hcu_dev->engine); if (rc) { diff --git a/drivers/crypto/keembay/ocs-hcu.c b/drivers/crypto/keembay/ocs-hcu.c index 81eecacf603a..deb9bd460ee6 100644 --- a/drivers/crypto/keembay/ocs-hcu.c +++ b/drivers/crypto/keembay/ocs-hcu.c @@ -93,7 +93,7 @@ #define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000 /** - * struct ocs_hcu_dma_list - An entry in an OCS DMA linked list. + * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list. * @src_addr: Source address of the data. * @src_len: Length of data to be fetched. * @nxt_desc: Next descriptor to fetch. @@ -107,7 +107,7 @@ struct ocs_hcu_dma_entry { }; /** - * struct ocs_dma_list - OCS-specific DMA linked list. + * struct ocs_hcu_dma_list - OCS-specific DMA linked list. * @head: The head of the list (points to the array backing the list). * @tail: The current tail of the list; NULL if the list is empty. * @dma_addr: The DMA address of @head (i.e., the DMA address of the backing @@ -597,7 +597,7 @@ int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo) } /** - * ocs_hcu_digest() - Perform a hashing iteration. + * ocs_hcu_hash_update() - Perform a hashing iteration. * @hcu_dev: The OCS HCU device to use. * @ctx: The OCS HCU hashing context. * @dma_list: The OCS DMA list mapping the input data to process. @@ -632,7 +632,7 @@ int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev, } /** - * ocs_hcu_hash_final() - Update and finalize hash computation. + * ocs_hcu_hash_finup() - Update and finalize hash computation. * @hcu_dev: The OCS HCU device to use. * @ctx: The OCS HCU hashing context. * @dma_list: The OCS DMA list mapping the input data to process. diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h index 3518fac29834..ecedd91a8d85 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h @@ -121,14 +121,14 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev); int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev); -int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, - struct pci_dev *pdev, u64 reg, u64 *val); +int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, + u64 reg, u64 *val, int blkaddr); int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 val); + u64 reg, u64 val, int blkaddr); int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 *val); + u64 reg, u64 *val, int blkaddr); int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 val); + u64 reg, u64 val, int blkaddr); struct otx2_cptlfs_info; int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs); int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c index 51cb6404ded7..9074876d38e5 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c @@ -43,7 +43,7 @@ int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev) } int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 *val) + u64 reg, u64 *val, int blkaddr) { struct cpt_rd_wr_reg_msg *reg_msg; @@ -62,12 +62,13 @@ int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, reg_msg->is_write = 0; reg_msg->reg_offset = reg; reg_msg->ret_val = val; + reg_msg->blkaddr = blkaddr; return 0; } int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 val) + u64 reg, u64 val, int blkaddr) { struct cpt_rd_wr_reg_msg *reg_msg; @@ -86,16 +87,17 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, reg_msg->is_write = 1; reg_msg->reg_offset = reg; reg_msg->val = val; + reg_msg->blkaddr = blkaddr; return 0; } int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 *val) + u64 reg, u64 *val, int blkaddr) { int ret; - ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val); + ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val, blkaddr); if (ret) return ret; @@ -103,11 +105,11 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, } int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 val) + u64 reg, u64 val, int blkaddr) { int ret; - ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val); + ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val, blkaddr); if (ret) return ret; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c index 823a4571fd67..34aba1532761 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c @@ -56,7 +56,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri) ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, CPT_AF_LFX_CTL(lf->slot), - &lf_ctrl.u); + &lf_ctrl.u, lfs->blkaddr); if (ret) return ret; @@ -64,7 +64,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri) ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, CPT_AF_LFX_CTL(lf->slot), - lf_ctrl.u); + lf_ctrl.u, lfs->blkaddr); return ret; } @@ -77,7 +77,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf, ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, CPT_AF_LFX_CTL(lf->slot), - &lf_ctrl.u); + &lf_ctrl.u, lfs->blkaddr); if (ret) return ret; @@ -85,7 +85,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf, ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, CPT_AF_LFX_CTL(lf->slot), - lf_ctrl.u); + lf_ctrl.u, lfs->blkaddr); return ret; } diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h index 314e97354100..ab1678fc564d 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h @@ -95,6 +95,7 @@ struct otx2_cptlfs_info { u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */ u8 kvf_limits; /* Kernel crypto limits */ atomic_t state; /* LF's state. started/reset */ + int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */ }; static inline void otx2_cpt_free_instruction_queues( diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h index 8c899ad531a5..e19af1356f12 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h @@ -51,6 +51,7 @@ struct otx2_cptpf_dev { u8 max_vfs; /* Maximum number of VFs supported by CPT */ u8 enabled_vfs; /* Number of enabled VFs */ u8 kvf_limits; /* Kernel crypto limits */ + bool has_cpt1; }; irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c index 5277e04badd9..58f47e3ab62e 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c @@ -451,19 +451,19 @@ static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf) return 0; } -static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf) +static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr) { int timeout = 10, ret; u64 reg = 0; ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_BLK_RST, 0x1); + CPT_AF_BLK_RST, 0x1, blkaddr); if (ret) return ret; do { ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_BLK_RST, ®); + CPT_AF_BLK_RST, ®, blkaddr); if (ret) return ret; @@ -478,11 +478,35 @@ static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf) return ret; } +static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf) +{ + int ret = 0; + + if (cptpf->has_cpt1) { + ret = cptx_device_reset(cptpf, BLKADDR_CPT1); + if (ret) + return ret; + } + return cptx_device_reset(cptpf, BLKADDR_CPT0); +} + +static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf) +{ + u64 cfg; + + cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, + RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1)); + if (cfg & BIT_ULL(11)) + cptpf->has_cpt1 = true; +} + static int cptpf_device_init(struct otx2_cptpf_dev *cptpf) { union otx2_cptx_af_constants1 af_cnsts1 = {0}; int ret = 0; + /* check if 'implemented' bit is set for block BLKADDR_CPT1 */ + cptpf_check_block_implemented(cptpf); /* Reset the CPT PF device */ ret = cptpf_device_reset(cptpf); if (ret) @@ -490,7 +514,8 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf) /* Get number of SE, IE and AE engines */ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_CONSTANTS1, &af_cnsts1.u); + CPT_AF_CONSTANTS1, &af_cnsts1.u, + BLKADDR_CPT0); if (ret) return ret; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 1dc3ba298139..a531f4c8b441 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -153,16 +153,16 @@ static int get_ucode_type(struct device *dev, } static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng, - dma_addr_t dma_addr) + dma_addr_t dma_addr, int blkaddr) { return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, CPT_AF_EXEX_UCODE_BASE(eng), - (u64)dma_addr); + (u64)dma_addr, blkaddr); } -static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) +static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, + struct otx2_cptpf_dev *cptpf, int blkaddr) { - struct otx2_cptpf_dev *cptpf = obj; struct otx2_cpt_engs_rsvd *engs; dma_addr_t dma_addr; int i, bit, ret; @@ -170,7 +170,7 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) /* Set PF number for microcode fetches */ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, CPT_AF_PF_FUNC, - cptpf->pf_id << RVU_PFVF_PF_SHIFT); + cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr); if (ret) return ret; @@ -187,7 +187,8 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) */ for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num) if (!eng_grp->g->eng_ref_cnt[bit]) { - ret = __write_ucode_base(cptpf, bit, dma_addr); + ret = __write_ucode_base(cptpf, bit, dma_addr, + blkaddr); if (ret) return ret; } @@ -195,23 +196,32 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) return 0; } -static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, - void *obj) +static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) { struct otx2_cptpf_dev *cptpf = obj; - struct otx2_cpt_bitmap bmap; + int ret; + + if (cptpf->has_cpt1) { + ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1); + if (ret) + return ret; + } + return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0); +} + +static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, + struct otx2_cptpf_dev *cptpf, + struct otx2_cpt_bitmap bmap, + int blkaddr) +{ int i, timeout = 10; int busy, ret; u64 reg = 0; - bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp); - if (!bmap.size) - return -EINVAL; - /* Detach the cores from group */ for_each_set_bit(i, bmap.bits, bmap.size) { ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL2(i), ®); + CPT_AF_EXEX_CTL2(i), ®, blkaddr); if (ret) return ret; @@ -221,7 +231,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL2(i), reg); + CPT_AF_EXEX_CTL2(i), reg, + blkaddr); if (ret) return ret; } @@ -237,7 +248,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, for_each_set_bit(i, bmap.bits, bmap.size) { ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_STS(i), ®); + CPT_AF_EXEX_STS(i), ®, + blkaddr); if (ret) return ret; @@ -253,7 +265,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, if (!eng_grp->g->eng_ref_cnt[i]) { ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL(i), 0x0); + CPT_AF_EXEX_CTL(i), 0x0, + blkaddr); if (ret) return ret; } @@ -262,22 +275,39 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, return 0; } -static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, - void *obj) +static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, + void *obj) { struct otx2_cptpf_dev *cptpf = obj; struct otx2_cpt_bitmap bmap; - u64 reg = 0; - int i, ret; + int ret; bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp); if (!bmap.size) return -EINVAL; + if (cptpf->has_cpt1) { + ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap, + BLKADDR_CPT1); + if (ret) + return ret; + } + return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap, + BLKADDR_CPT0); +} + +static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, + struct otx2_cptpf_dev *cptpf, + struct otx2_cpt_bitmap bmap, + int blkaddr) +{ + u64 reg = 0; + int i, ret; + /* Attach the cores to the group */ for_each_set_bit(i, bmap.bits, bmap.size) { ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL2(i), ®); + CPT_AF_EXEX_CTL2(i), ®, blkaddr); if (ret) return ret; @@ -287,7 +317,8 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL2(i), reg); + CPT_AF_EXEX_CTL2(i), reg, + blkaddr); if (ret) return ret; } @@ -295,15 +326,33 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, /* Enable the cores */ for_each_set_bit(i, bmap.bits, bmap.size) { - ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, - cptpf->pdev, - CPT_AF_EXEX_CTL(i), 0x1); + ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, + CPT_AF_EXEX_CTL(i), 0x1, + blkaddr); if (ret) return ret; } - ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev); + return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev); +} - return ret; +static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, + void *obj) +{ + struct otx2_cptpf_dev *cptpf = obj; + struct otx2_cpt_bitmap bmap; + int ret; + + bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp); + if (!bmap.size) + return -EINVAL; + + if (cptpf->has_cpt1) { + ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, + BLKADDR_CPT1); + if (ret) + return ret; + } + return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0); } static int load_fw(struct device *dev, struct fw_info_t *fw_info, @@ -1140,20 +1189,18 @@ release_fw: return ret; } -int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf) +static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores, + int blkaddr) { - int i, ret, busy, total_cores; - int timeout = 10; - u64 reg = 0; - - total_cores = cptpf->eng_grps.avail.max_se_cnt + - cptpf->eng_grps.avail.max_ie_cnt + - cptpf->eng_grps.avail.max_ae_cnt; + int timeout = 10, ret; + int i, busy; + u64 reg; /* Disengage the cores from groups */ for (i = 0; i < total_cores; i++) { ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL2(i), 0x0); + CPT_AF_EXEX_CTL2(i), 0x0, + blkaddr); if (ret) return ret; @@ -1173,7 +1220,8 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf) for (i = 0; i < total_cores; i++) { ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_STS(i), ®); + CPT_AF_EXEX_STS(i), ®, + blkaddr); if (ret) return ret; @@ -1187,13 +1235,30 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf) /* Disable the cores */ for (i = 0; i < total_cores; i++) { ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL(i), 0x0); + CPT_AF_EXEX_CTL(i), 0x0, + blkaddr); if (ret) return ret; } return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev); } +int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf) +{ + int total_cores, ret; + + total_cores = cptpf->eng_grps.avail.max_se_cnt + + cptpf->eng_grps.avail.max_ie_cnt + + cptpf->eng_grps.avail.max_ae_cnt; + + if (cptpf->has_cpt1) { + ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1); + if (ret) + return ret; + } + return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0); +} + void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev, struct otx2_cpt_eng_grps *eng_grps) { @@ -1354,6 +1419,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) lfs->pdev = pdev; lfs->reg_base = cptpf->reg_base; lfs->mbox = &cptpf->afpf_mbox; + lfs->blkaddr = BLKADDR_CPT0; ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK, OTX2_CPT_QUEUE_HI_PRIO, 1); if (ret) diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index 92e921eceed7..d6314ea9ae89 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES CBC routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index 4c9362eebefd..e7384d107573 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES CCM routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index 6d5ce1a66f1e..13f518802343 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES CTR routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index 77e338dc33f1..7a729dc2bc17 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES ECB routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 19c6ed5baea4..fc9baca13920 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES GCM routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index 48dc1c98ca52..eb5c8f689360 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES XCBC routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index 13c65deda8e9..446f611726df 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -932,8 +932,10 @@ static int __init nx_powernv_probe_vas(struct device_node *pn) ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip); - if (ret) + if (ret) { + of_node_put(dn); return ret; + } } if (!ct_842 || !ct_gzip) { diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 90d9a37a57f6..b0ad665e4bda 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * SHA-256 routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index eb8627a0f317..c29103a1a0b6 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * SHA-512 routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 1d0e8a1ba160..010e87d9da36 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. @@ -200,7 +200,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, * @sg: sg list head * @end: sg lisg end * @delta: is the amount we need to crop in order to bound the list. - * + * @nbytes: length of data in the scatterlists or data length - whichever + * is greater. */ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c index 1975bcbee997..ee7cd88bb10a 100644 --- a/drivers/crypto/nx/nx_debugfs.c +++ b/drivers/crypto/nx/nx_debugfs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * debugfs routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index a45bdcf3026d..0dd4c6b157de 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) dd->err = 0; } - err = pm_runtime_get_sync(dd->dev); + err = pm_runtime_resume_and_get(dd->dev); if (err < 0) { - pm_runtime_put_noidle(dd->dev); dev_err(dd->dev, "failed to get sync: %d\n", err); return err; } @@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); pm_runtime_enable(dev); - err = pm_runtime_get_sync(dev); + err = pm_runtime_resume_and_get(dev); if (err < 0) { dev_err(dev, "%s: failed to get_sync(%d)\n", __func__, err); @@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev) static int omap_aes_resume(struct device *dev) { - pm_runtime_get_sync(dev); + pm_runtime_resume_and_get(dev); return 0; } #endif diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c index 6a9be01fdf33..3524ddd48930 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -224,6 +224,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) hw_data->uof_get_name = uof_get_name; hw_data->uof_get_ae_mask = uof_get_ae_mask; hw_data->set_msix_rttable = set_msix_default_rttable; + hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); } diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c index f5990d042c9a..1dd64af22bea 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -212,6 +212,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; hw_data->reset_device = adf_reset_flr; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; + hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); } diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index 1d1532e8fb6d..067ca5e17d38 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c index cadcf12884c8..30337390513c 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c @@ -214,6 +214,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; hw_data->reset_device = adf_reset_flr; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; + hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); } diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index 04742a6d91ca..51ea88c0b17d 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 5527344546e5..ac435b44f1d2 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -173,6 +173,7 @@ struct adf_hw_device_data { void (*configure_iov_threads)(struct adf_accel_dev *accel_dev, bool enable); void (*enable_ints)(struct adf_accel_dev *accel_dev); + void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev); void (*reset_device)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c index 1aa17303838d..9e560c7d4163 100644 --- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c @@ -179,3 +179,28 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev) return capabilities; } EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap); + +void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; + u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE; + unsigned long accel_mask = hw_data->accel_mask; + void __iomem *pmisc_addr; + struct adf_bar *pmisc; + int pmisc_id; + u32 i = 0; + + pmisc_id = hw_data->get_misc_bar_id(hw_data); + pmisc = &GET_BARS(accel_dev)[pmisc_id]; + pmisc_addr = pmisc->virt_addr; + + /* Configures WDT timers */ + for_each_set_bit(i, &accel_mask, hw_data->num_accel) { + /* Enable WDT for sym and dc */ + ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val); + /* Enable WDT for pke */ + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke); + } +} +EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer); diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h index 3816e6500352..756b0ddfac5e 100644 --- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h +++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h @@ -113,11 +113,24 @@ do { \ /* Power gating */ #define ADF_POWERGATE_PKE BIT(24) +/* WDT timers + * + * Timeout is in cycles. Clock speed may vary across products but this + * value should be a few milli-seconds. + */ +#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000 +#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x2000000 +#define ADF_SSMWDT_OFFSET 0x54 +#define ADF_SSMWDTPKE_OFFSET 0x58 +#define ADF_SSMWDT(i) (ADF_SSMWDT_OFFSET + ((i) * 0x4000)) +#define ADF_SSMWDTPKE(i) (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000)) + void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable, int num_a_regs, int num_b_regs); void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info); void adf_gen2_get_arb_info(struct arb_info *arb_info); u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev); +void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c index b72ff58e0bc7..000528327b29 100644 --- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c @@ -99,3 +99,43 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; } EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); + +static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, + u32 *lower) +{ + *lower = lower_32_bits(value); + *upper = upper_32_bits(value); +} + +void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; + u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE; + u32 ssm_wdt_pke_high = 0; + u32 ssm_wdt_pke_low = 0; + u32 ssm_wdt_high = 0; + u32 ssm_wdt_low = 0; + void __iomem *pmisc_addr; + struct adf_bar *pmisc; + int pmisc_id; + + pmisc_id = hw_data->get_misc_bar_id(hw_data); + pmisc = &GET_BARS(accel_dev)[pmisc_id]; + pmisc_addr = pmisc->virt_addr; + + /* Convert 64bit WDT timer value into 32bit values for + * mmio write to 32bit CSRs. + */ + adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low); + adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high, + &ssm_wdt_pke_low); + + /* Enable WDT for sym and dc */ + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low); + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high); + /* Enable WDT for pke */ + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low); + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high); +} +EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer); diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h index 8ab62b2ac311..b8fca1ff7aab 100644 --- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h @@ -94,6 +94,18 @@ do { \ ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_RING_SRV_ARB_EN, (value)) -void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); +/* WDT timers + * + * Timeout is in cycles. Clock speed may vary across products but this + * value should be a few milli-seconds. + */ +#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000 +#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000 +#define ADF_SSMWDTL_OFFSET 0x54 +#define ADF_SSMWDTH_OFFSET 0x5C +#define ADF_SSMWDTPKEL_OFFSET 0x58 +#define ADF_SSMWDTPKEH_OFFSET 0x60 +void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); +void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); #endif diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 42029153408e..744c40351428 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -162,6 +162,10 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) return -EFAULT; } + /* Set ssm watch dog timer */ + if (hw_data->set_ssm_wdtimer) + hw_data->set_ssm_wdtimer(accel_dev); + list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_START)) { diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index c45853463530..e3ad5587be49 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) ret = adf_isr_alloc_msix_entry_table(accel_dev); if (ret) - return ret; - if (adf_enable_msix(accel_dev)) goto err_out; - if (adf_setup_bh(accel_dev)) - goto err_out; + ret = adf_enable_msix(accel_dev); + if (ret) + goto err_free_msix_table; - if (adf_request_irqs(accel_dev)) - goto err_out; + ret = adf_setup_bh(accel_dev); + if (ret) + goto err_disable_msix; + + ret = adf_request_irqs(accel_dev); + if (ret) + goto err_cleanup_bh; return 0; + +err_cleanup_bh: + adf_cleanup_bh(accel_dev); + +err_disable_msix: + adf_disable_msix(&accel_dev->accel_pci_dev); + +err_free_msix_table: + adf_isr_free_msix_entry_table(accel_dev); + err_out: - adf_isr_resource_free(accel_dev); - return -EFAULT; + return ret; } EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index 8b090b7ae8c6..a1b77bd7a894 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -169,7 +169,7 @@ out: * @msg: Message to send * @vf_nr: VF number to which the message will be sent * - * Function sends a messge from the PF to a VF + * Function sends a message from the PF to a VF * * Return: 0 on success, error code otherwise. */ diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 888c1e047295..8ba28409fb74 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -172,6 +172,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring) dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n"); dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes, ring->base_addr, ring->dma_addr); + ring->base_addr = NULL; return -EFAULT; } diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c index 2c98fb63f7b7..e85bd62d134a 100644 --- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c @@ -8,7 +8,7 @@ * adf_vf2pf_init() - send init msg to PF * @accel_dev: Pointer to acceleration VF device. * - * Function sends an init messge from the VF to a PF + * Function sends an init message from the VF to a PF * * Return: 0 on success, error code otherwise. */ @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(adf_vf2pf_init); * adf_vf2pf_shutdown() - send shutdown msg to PF * @accel_dev: Pointer to acceleration VF device. * - * Function sends a shutdown messge from the VF to a PF + * Function sends a shutdown message from the VF to a PF * * Return: void */ diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index 38d316a42ba6..888388acb6bd 100644 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) goto err_out; if (adf_setup_pf2vf_bh(accel_dev)) - goto err_out; + goto err_disable_msi; if (adf_setup_bh(accel_dev)) - goto err_out; + goto err_cleanup_pf2vf_bh; if (adf_request_msi_irq(accel_dev)) - goto err_out; + goto err_cleanup_bh; return 0; + +err_cleanup_bh: + adf_cleanup_bh(accel_dev); + +err_cleanup_pf2vf_bh: + adf_cleanup_pf2vf_bh(accel_dev); + +err_disable_msi: + adf_disable_msi(accel_dev); + err_out: - adf_vf_isr_resource_free(accel_dev); return -EFAULT; } EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc); diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index ff78c73c47e3..f998ed58457c 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -718,8 +718,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, int n = sg_nents(sgl); struct qat_alg_buf_list *bufl; struct qat_alg_buf_list *buflout = NULL; - dma_addr_t blp; - dma_addr_t bloutp = 0; + dma_addr_t blp = DMA_MAPPING_ERROR; + dma_addr_t bloutp = DMA_MAPPING_ERROR; struct scatterlist *sg; size_t sz_out, sz = struct_size(bufl, bufers, n + 1); @@ -731,9 +731,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, if (unlikely(!bufl)) return -ENOMEM; - blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, blp))) - goto err_in; + for_each_sg(sgl, sg, n, i) + bufl->bufers[i].addr = DMA_MAPPING_ERROR; for_each_sg(sgl, sg, n, i) { int y = sg_nctr; @@ -750,6 +749,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, sg_nctr++; } bufl->num_bufs = sg_nctr; + blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, blp))) + goto err_in; qat_req->buf.bl = bufl; qat_req->buf.blp = blp; qat_req->buf.sz = sz; @@ -764,10 +766,11 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, dev_to_node(&GET_DEV(inst->accel_dev))); if (unlikely(!buflout)) goto err_in; - bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, bloutp))) - goto err_out; + bufers = buflout->bufers; + for_each_sg(sglout, sg, n, i) + bufers[i].addr = DMA_MAPPING_ERROR; + for_each_sg(sglout, sg, n, i) { int y = sg_nctr; @@ -784,6 +787,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, } buflout->num_bufs = sg_nctr; buflout->num_mapped_bufs = sg_nctr; + bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, bloutp))) + goto err_out; qat_req->buf.blout = buflout; qat_req->buf.bloutp = bloutp; qat_req->buf.sz_out = sz_out; @@ -795,17 +801,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, return 0; err_out: + if (!dma_mapping_error(dev, bloutp)) + dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); + n = sg_nents(sglout); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, buflout->bufers[i].addr)) dma_unmap_single(dev, buflout->bufers[i].addr, buflout->bufers[i].len, DMA_BIDIRECTIONAL); - if (!dma_mapping_error(dev, bloutp)) - dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); kfree(buflout); err_in: + if (!dma_mapping_error(dev, blp)) + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + n = sg_nents(sgl); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, bufl->bufers[i].addr)) @@ -813,8 +823,6 @@ err_in: bufl->bufers[i].len, DMA_BIDIRECTIONAL); - if (!dma_mapping_error(dev, blp)) - dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); kfree(bufl); dev_err(dev, "Failed to map buf for dma\n"); diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index c972554a755e..29999da716cc 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h index cffa9fc628ff..850f257d00f3 100644 --- a/drivers/crypto/qce/cipher.h +++ b/drivers/crypto/qce/cipher.h @@ -40,7 +40,6 @@ struct qce_cipher_reqctx { struct scatterlist result_sg; struct sg_table dst_tbl; struct scatterlist *dst_sg; - struct sg_table src_tbl; struct scatterlist *src_sg; unsigned int cryptlen; struct skcipher_request fallback_req; // keep at the end diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c index a73db2a5637f..dceb9579d87a 100644 --- a/drivers/crypto/qce/common.c +++ b/drivers/crypto/qce/common.c @@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size) return cfg; } -static int qce_setup_regs_ahash(struct crypto_async_request *async_req, - u32 totallen, u32 offset) +static int qce_setup_regs_ahash(struct crypto_async_request *async_req) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); @@ -295,19 +294,18 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey, { u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); - unsigned int xtsdusize; qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, enckeylen / 2); qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); - /* xts du size 512B */ - xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); - qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); + /* Set data unit size to cryptlen. Anything else causes + * crypto engine to return back incorrect results. + */ + qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen); } -static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, - u32 totallen, u32 offset) +static int qce_setup_regs_skcipher(struct crypto_async_request *async_req) { struct skcipher_request *req = skcipher_request_cast(async_req); struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); @@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); - qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff); + qce_write(qce, REG_ENCR_SEG_START, 0); if (IS_CTR(flags)) { qce_write(qce, REG_CNTR_MASK, ~0); @@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, qce_write(qce, REG_CNTR_MASK2, ~0); } - qce_write(qce, REG_SEG_SIZE, totallen); + qce_write(qce, REG_SEG_SIZE, rctx->cryptlen); /* get little endianness */ config = qce_config_reg(qce, 1); @@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, } #endif -int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, - u32 offset) +int qce_start(struct crypto_async_request *async_req, u32 type) { switch (type) { #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER case CRYPTO_ALG_TYPE_SKCIPHER: - return qce_setup_regs_skcipher(async_req, totallen, offset); + return qce_setup_regs_skcipher(async_req); #endif #ifdef CONFIG_CRYPTO_DEV_QCE_SHA case CRYPTO_ALG_TYPE_AHASH: - return qce_setup_regs_ahash(async_req, totallen, offset); + return qce_setup_regs_ahash(async_req); #endif default: return -EINVAL; diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h index 85ba16418a04..3bc244bcca2d 100644 --- a/drivers/crypto/qce/common.h +++ b/drivers/crypto/qce/common.h @@ -94,7 +94,6 @@ struct qce_alg_template { void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len); int qce_check_status(struct qce_device *qce, u32 *status); void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step); -int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, - u32 offset); +int qce_start(struct crypto_async_request *async_req, u32 type); #endif /* _COMMON_H_ */ diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 61c418c12345..8e6fcf2c21cc 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -12,9 +12,15 @@ #include "core.h" #include "sha.h" -/* crypto hw padding constant for first operation */ -#define SHA_PADDING 64 -#define SHA_PADDING_MASK (SHA_PADDING - 1) +struct qce_sha_saved_state { + u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE]; + u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE]; + __be32 byte_count[2]; + unsigned int pending_buflen; + unsigned int flags; + u64 count; + bool first_blk; +}; static LIST_HEAD(ahash_algs); @@ -107,7 +113,7 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) qce_dma_issue_pending(&qce->dma); - ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); + ret = qce_start(async_req, tmpl->crypto_alg_type); if (ret) goto error_terminate; @@ -139,97 +145,37 @@ static int qce_ahash_init(struct ahash_request *req) static int qce_ahash_export(struct ahash_request *req, void *out) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); - unsigned long flags = rctx->flags; - unsigned int digestsize = crypto_ahash_digestsize(ahash); - unsigned int blocksize = - crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); - - if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { - struct sha1_state *out_state = out; - - out_state->count = rctx->count; - qce_cpu_to_be32p_array((__be32 *)out_state->state, - rctx->digest, digestsize); - memcpy(out_state->buffer, rctx->buf, blocksize); - } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { - struct sha256_state *out_state = out; - - out_state->count = rctx->count; - qce_cpu_to_be32p_array((__be32 *)out_state->state, - rctx->digest, digestsize); - memcpy(out_state->buf, rctx->buf, blocksize); - } else { - return -EINVAL; - } + struct qce_sha_saved_state *export_state = out; - return 0; -} - -static int qce_import_common(struct ahash_request *req, u64 in_count, - const u32 *state, const u8 *buffer, bool hmac) -{ - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); - unsigned int digestsize = crypto_ahash_digestsize(ahash); - unsigned int blocksize; - u64 count = in_count; - - blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); - rctx->count = in_count; - memcpy(rctx->buf, buffer, blocksize); - - if (in_count <= blocksize) { - rctx->first_blk = 1; - } else { - rctx->first_blk = 0; - /* - * For HMAC, there is a hardware padding done when first block - * is set. Therefore the byte_count must be incremened by 64 - * after the first block operation. - */ - if (hmac) - count += SHA_PADDING; - } - - rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); - rctx->byte_count[1] = (__force __be32)(count >> 32); - qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, - digestsize); - rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); + memcpy(export_state->pending_buf, rctx->buf, rctx->buflen); + memcpy(export_state->partial_digest, rctx->digest, sizeof(rctx->digest)); + export_state->byte_count[0] = rctx->byte_count[0]; + export_state->byte_count[1] = rctx->byte_count[1]; + export_state->pending_buflen = rctx->buflen; + export_state->count = rctx->count; + export_state->first_blk = rctx->first_blk; + export_state->flags = rctx->flags; return 0; } static int qce_ahash_import(struct ahash_request *req, const void *in) { - struct qce_sha_reqctx *rctx; - unsigned long flags; - bool hmac; - int ret; - - ret = qce_ahash_init(req); - if (ret) - return ret; - - rctx = ahash_request_ctx(req); - flags = rctx->flags; - hmac = IS_SHA_HMAC(flags); - - if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { - const struct sha1_state *state = in; - - ret = qce_import_common(req, state->count, state->state, - state->buffer, hmac); - } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { - const struct sha256_state *state = in; + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + const struct qce_sha_saved_state *import_state = in; - ret = qce_import_common(req, state->count, state->state, - state->buf, hmac); - } + memset(rctx, 0, sizeof(*rctx)); + rctx->count = import_state->count; + rctx->buflen = import_state->pending_buflen; + rctx->first_blk = import_state->first_blk; + rctx->flags = import_state->flags; + rctx->byte_count[0] = import_state->byte_count[0]; + rctx->byte_count[1] = import_state->byte_count[1]; + memcpy(rctx->buf, import_state->pending_buf, rctx->buflen); + memcpy(rctx->digest, import_state->partial_digest, sizeof(rctx->digest)); - return ret; + return 0; } static int qce_ahash_update(struct ahash_request *req) @@ -270,6 +216,25 @@ static int qce_ahash_update(struct ahash_request *req) /* calculate how many bytes will be hashed later */ hash_later = total % blocksize; + + /* + * At this point, there is more than one block size of data. If + * the available data to transfer is exactly a multiple of block + * size, save the last block to be transferred in qce_ahash_final + * (with the last block bit set) if this is indeed the end of data + * stream. If not this saved block will be transferred as part of + * next update. If this block is not held back and if this is + * indeed the end of data stream, the digest obtained will be wrong + * since qce_ahash_final will see that rctx->buflen is 0 and return + * doing nothing which in turn means that a digest will not be + * copied to the destination result buffer. qce_ahash_final cannot + * be made to alter this behavior and allowed to proceed if + * rctx->buflen is 0 because the crypto engine BAM does not allow + * for zero length transfers. + */ + if (!hash_later) + hash_later = blocksize; + if (hash_later) { unsigned int src_offset = req->nbytes - hash_later; scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, @@ -450,7 +415,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "sha1-qce", .digestsize = SHA1_DIGEST_SIZE, .blocksize = SHA1_BLOCK_SIZE, - .statesize = sizeof(struct sha1_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha1, }, { @@ -459,7 +424,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "sha256-qce", .digestsize = SHA256_DIGEST_SIZE, .blocksize = SHA256_BLOCK_SIZE, - .statesize = sizeof(struct sha256_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha256, }, { @@ -468,7 +433,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "hmac-sha1-qce", .digestsize = SHA1_DIGEST_SIZE, .blocksize = SHA1_BLOCK_SIZE, - .statesize = sizeof(struct sha1_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha1, }, { @@ -477,7 +442,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "hmac-sha256-qce", .digestsize = SHA256_DIGEST_SIZE, .blocksize = SHA256_BLOCK_SIZE, - .statesize = sizeof(struct sha256_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha256, }, }; diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index a2d3da0ad95f..c0a0d8c4fce1 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -8,6 +8,7 @@ #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/types.h> +#include <linux/errno.h> #include <crypto/aes.h> #include <crypto/internal/des.h> #include <crypto/internal/skcipher.h> @@ -143,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) qce_dma_issue_pending(&qce->dma); - ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0); + ret = qce_start(async_req, tmpl->crypto_alg_type); if (ret) goto error_terminate; @@ -167,16 +168,33 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); unsigned long flags = to_cipher_tmpl(ablk)->alg_flags; + unsigned int __keylen; int ret; if (!key || !keylen) return -EINVAL; - switch (IS_XTS(flags) ? keylen >> 1 : keylen) { + /* + * AES XTS key1 = key2 not supported by crypto engine. + * Revisit to request a fallback cipher in this case. + */ + if (IS_XTS(flags)) { + __keylen = keylen >> 1; + if (!memcmp(key, key + __keylen, __keylen)) + return -ENOKEY; + } else { + __keylen = keylen; + } + + switch (__keylen) { case AES_KEYSIZE_128: case AES_KEYSIZE_256: memcpy(ctx->enc_key, key, keylen); break; + case AES_KEYSIZE_192: + break; + default: + return -EINVAL; } ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); @@ -204,12 +222,27 @@ static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key, unsigned int keylen) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); + u32 _key[6]; int err; err = verify_skcipher_des3_key(ablk, key); if (err) return err; + /* + * The crypto engine does not support any two keys + * being the same for triple des algorithms. The + * verify_skcipher_des3_key does not check for all the + * below conditions. Return -ENOKEY in case any two keys + * are the same. Revisit to see if a fallback cipher + * is needed to handle this condition. + */ + memcpy(_key, key, DES3_EDE_KEY_SIZE); + if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) || + !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) || + !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5]))) + return -ENOKEY; + ctx->enc_keylen = keylen; memcpy(ctx->enc_key, key, keylen); return 0; @@ -221,6 +254,7 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); + unsigned int blocksize = crypto_skcipher_blocksize(tfm); int keylen; int ret; @@ -228,14 +262,31 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; - /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and - * is not a multiple of it; pass such requests to the fallback + /* CE does not handle 0 length messages */ + if (!req->cryptlen) + return 0; + + /* + * ECB and CBC algorithms require message lengths to be + * multiples of block size. + */ + if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags)) + if (!IS_ALIGNED(req->cryptlen, blocksize)) + return -EINVAL; + + /* + * Conditions for requesting a fallback cipher + * AES-192 (not supported by crypto engine (CE)) + * AES-XTS request with len <= 512 byte (not recommended to use CE) + * AES-XTS request with len > QCE_SECTOR_SIZE and + * is not a multiple of it.(Revisit this condition to check if it is + * needed in all versions of CE) */ if (IS_AES(rctx->flags) && - (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || - req->cryptlen <= aes_sw_max_len) || - (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE && - req->cryptlen % QCE_SECTOR_SIZE))) { + ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || + (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) || + (req->cryptlen > QCE_SECTOR_SIZE && + req->cryptlen % QCE_SECTOR_SIZE))))) { skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, @@ -307,7 +358,7 @@ static const struct qce_skcipher_def skcipher_def[] = { .name = "ecb(aes)", .drv_name = "ecb-aes-qce", .blocksize = AES_BLOCK_SIZE, - .ivsize = AES_BLOCK_SIZE, + .ivsize = 0, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, }, diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 81befe7febaa..ed03058497bc 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -48,7 +48,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev) { struct ahash_request *req = ahash_request_cast(dev->async_req); struct rk_ahash_rctx *rctx = ahash_request_ctx(req); - int reg_status = 0; + int reg_status; reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16); diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 682c8a450a57..55aa3a71169b 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -20,6 +20,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> @@ -401,7 +402,7 @@ static const struct samsung_aes_variant exynos_aes_data = { static const struct samsung_aes_variant exynos5433_slim_aes_data = { .aes_offset = 0x400, .hash_offset = 0x800, - .clk_names = { "pclk", "aclk", }, + .clk_names = { "aclk", "pclk", }, }; static const struct of_device_id s5p_sss_dt_match[] = { @@ -424,13 +425,9 @@ MODULE_DEVICE_TABLE(of, s5p_sss_dt_match); static inline const struct samsung_aes_variant *find_s5p_sss_version (const struct platform_device *pdev) { - if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) { - const struct of_device_id *match; + if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) + return of_device_get_match_data(&pdev->dev); - match = of_match_node(s5p_sss_dt_match, - pdev->dev.of_node); - return (const struct samsung_aes_variant *)match->data; - } return (const struct samsung_aes_variant *) platform_get_device_id(pdev)->driver_data; } @@ -2159,7 +2156,7 @@ static struct skcipher_alg algs[] = { static int s5p_aes_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - int i, j, err = -ENODEV; + int i, j, err; const struct samsung_aes_variant *variant; struct s5p_aes_dev *pdata; struct resource *res; @@ -2189,14 +2186,14 @@ static int s5p_aes_probe(struct platform_device *pdev) } pdata->res = res; - pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); + pdata->ioaddr = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->ioaddr)) { if (!pdata->use_hash) return PTR_ERR(pdata->ioaddr); /* try AES without HASH */ res->end -= 0x300; pdata->use_hash = false; - pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); + pdata->ioaddr = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->ioaddr)) return PTR_ERR(pdata->ioaddr); } diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index f300b0a5958a..1c6929fb3a13 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -69,8 +69,24 @@ /* Max Authentication tag size */ #define SA_MAX_AUTH_TAG_SZ 64 -#define PRIV_ID 0x1 -#define PRIV 0x1 +enum sa_algo_id { + SA_ALG_CBC_AES = 0, + SA_ALG_EBC_AES, + SA_ALG_CBC_DES3, + SA_ALG_ECB_DES3, + SA_ALG_SHA1, + SA_ALG_SHA256, + SA_ALG_SHA512, + SA_ALG_AUTHENC_SHA1_AES, + SA_ALG_AUTHENC_SHA256_AES, +}; + +struct sa_match_data { + u8 priv; + u8 priv_id; + u32 supported_algos; + bool skip_engine_control; +}; static struct device *sa_k3_dev; @@ -696,8 +712,9 @@ static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr) } static -int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key, - u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz, +int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data, + const u8 *enc_key, u16 enc_key_sz, + const u8 *auth_key, u16 auth_key_sz, struct algo_data *ad, u8 enc, u32 *swinfo) { int enc_sc_offset = 0; @@ -732,8 +749,8 @@ int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key, sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0; memcpy(&sc_buf[2], &sc_id, 2); sc_buf[4] = 0x0; - sc_buf[5] = PRIV_ID; - sc_buf[6] = PRIV; + sc_buf[5] = match_data->priv_id; + sc_buf[6] = match_data->priv; sc_buf[7] = 0x0; /* Prepare context for encryption engine */ @@ -892,8 +909,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key, return ret; /* Setup Encryption Security Context & Command label template */ - if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1, - &ctx->enc.epib[1])) + if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0, + ad, 1, &ctx->enc.epib[1])) goto badkey; cmdl_len = sa_format_cmdl_gen(&cfg, @@ -905,8 +922,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key, ctx->enc.cmdl_size = cmdl_len; /* Setup Decryption Security Context & Command label template */ - if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0, - &ctx->dec.epib[1])) + if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0, + ad, 0, &ctx->dec.epib[1])) goto badkey; cfg.enc_eng_id = ad->enc_eng.eng_id; @@ -1106,7 +1123,7 @@ static int sa_run(struct sa_req *req) else dma_rx = pdata->dma_rx1; - ddev = dma_rx->device->dev; + ddev = dmaengine_get_dma_device(pdata->dma_tx); rxd->ddev = ddev; memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size); @@ -1146,8 +1163,10 @@ static int sa_run(struct sa_req *req) mapped_sg->sgt.sgl = src; mapped_sg->sgt.orig_nents = src_nents; ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0); - if (ret) + if (ret) { + kfree(rxd); return ret; + } mapped_sg->dir = dir_src; mapped_sg->mapped = true; @@ -1155,8 +1174,10 @@ static int sa_run(struct sa_req *req) mapped_sg->sgt.sgl = req->src; mapped_sg->sgt.orig_nents = sg_nents; ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0); - if (ret) + if (ret) { + kfree(rxd); return ret; + } mapped_sg->dir = dir_src; mapped_sg->mapped = true; @@ -1446,9 +1467,10 @@ static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad) cfg.akey = NULL; cfg.akey_len = 0; + ctx->dev_data = dev_get_drvdata(sa_k3_dev); /* Setup Encryption Security Context & Command label template */ - if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0, - &ctx->enc.epib[1])) + if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0, + ad, 0, &ctx->enc.epib[1])) goto badkey; cmdl_len = sa_format_cmdl_gen(&cfg, @@ -1716,6 +1738,7 @@ static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash, int ret; memzero_explicit(ctx, sizeof(*ctx)); + ctx->dev_data = data; ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->shash)) { @@ -1817,8 +1840,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc, cfg.akey_len = keys.authkeylen; /* Setup Encryption Security Context & Command label template */ - if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen, - keys.authkey, keys.authkeylen, + if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey, + keys.enckeylen, keys.authkey, keys.authkeylen, ad, 1, &ctx->enc.epib[1])) return -EINVAL; @@ -1831,8 +1854,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc, ctx->enc.cmdl_size = cmdl_len; /* Setup Decryption Security Context & Command label template */ - if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen, - keys.authkey, keys.authkeylen, + if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey, + keys.enckeylen, keys.authkey, keys.authkeylen, ad, 0, &ctx->dec.epib[1])) return -EINVAL; @@ -1950,7 +1973,7 @@ static int sa_aead_decrypt(struct aead_request *req) } static struct sa_alg_tmpl sa_algs[] = { - { + [SA_ALG_CBC_AES] = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "cbc(aes)", @@ -1973,7 +1996,7 @@ static struct sa_alg_tmpl sa_algs[] = { .decrypt = sa_decrypt, } }, - { + [SA_ALG_EBC_AES] = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ecb(aes)", @@ -1995,7 +2018,7 @@ static struct sa_alg_tmpl sa_algs[] = { .decrypt = sa_decrypt, } }, - { + [SA_ALG_CBC_DES3] = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "cbc(des3_ede)", @@ -2018,7 +2041,7 @@ static struct sa_alg_tmpl sa_algs[] = { .decrypt = sa_decrypt, } }, - { + [SA_ALG_ECB_DES3] = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ecb(des3_ede)", @@ -2040,7 +2063,7 @@ static struct sa_alg_tmpl sa_algs[] = { .decrypt = sa_decrypt, } }, - { + [SA_ALG_SHA1] = { .type = CRYPTO_ALG_TYPE_AHASH, .alg.ahash = { .halg.base = { @@ -2069,7 +2092,7 @@ static struct sa_alg_tmpl sa_algs[] = { .import = sa_sha_import, }, }, - { + [SA_ALG_SHA256] = { .type = CRYPTO_ALG_TYPE_AHASH, .alg.ahash = { .halg.base = { @@ -2098,7 +2121,7 @@ static struct sa_alg_tmpl sa_algs[] = { .import = sa_sha_import, }, }, - { + [SA_ALG_SHA512] = { .type = CRYPTO_ALG_TYPE_AHASH, .alg.ahash = { .halg.base = { @@ -2127,7 +2150,7 @@ static struct sa_alg_tmpl sa_algs[] = { .import = sa_sha_import, }, }, - { + [SA_ALG_AUTHENC_SHA1_AES] = { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { @@ -2154,7 +2177,7 @@ static struct sa_alg_tmpl sa_algs[] = { .decrypt = sa_aead_decrypt, }, }, - { + [SA_ALG_AUTHENC_SHA256_AES] = { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { @@ -2185,13 +2208,19 @@ static struct sa_alg_tmpl sa_algs[] = { }; /* Register the algorithms in crypto framework */ -static void sa_register_algos(const struct device *dev) +static void sa_register_algos(struct sa_crypto_data *dev_data) { + const struct sa_match_data *match_data = dev_data->match_data; + struct device *dev = dev_data->dev; char *alg_name; u32 type; int i, err; for (i = 0; i < ARRAY_SIZE(sa_algs); i++) { + /* Skip unsupported algos */ + if (!(match_data->supported_algos & BIT(i))) + continue; + type = sa_algs[i].type; if (type == CRYPTO_ALG_TYPE_SKCIPHER) { alg_name = sa_algs[i].alg.skcipher.base.cra_name; @@ -2329,14 +2358,39 @@ static int sa_link_child(struct device *dev, void *data) return 0; } +static struct sa_match_data am654_match_data = { + .priv = 1, + .priv_id = 1, + .supported_algos = GENMASK(SA_ALG_AUTHENC_SHA256_AES, 0), +}; + +static struct sa_match_data am64_match_data = { + .priv = 0, + .priv_id = 0, + .supported_algos = BIT(SA_ALG_CBC_AES) | + BIT(SA_ALG_EBC_AES) | + BIT(SA_ALG_SHA256) | + BIT(SA_ALG_SHA512) | + BIT(SA_ALG_AUTHENC_SHA256_AES), + .skip_engine_control = true, +}; + +static const struct of_device_id of_match[] = { + { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, }, + { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, }, + { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_match); + static int sa_ul_probe(struct platform_device *pdev) { + const struct of_device_id *match; struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct resource *res; static void __iomem *saul_base; struct sa_crypto_data *dev_data; - u32 val; int ret; dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL); @@ -2350,7 +2404,7 @@ static int sa_ul_probe(struct platform_device *pdev) dev_set_drvdata(sa_k3_dev, dev_data); pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__, ret); @@ -2362,18 +2416,28 @@ static int sa_ul_probe(struct platform_device *pdev) if (ret) goto disable_pm_runtime; + match = of_match_node(of_match, dev->of_node); + if (!match) { + dev_err(dev, "No compatible match found\n"); + return -ENODEV; + } + dev_data->match_data = match->data; + spin_lock_init(&dev_data->scid_lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); saul_base = devm_ioremap_resource(dev, res); dev_data->base = saul_base; - val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN | - SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | - SA_EEC_TRNG_EN; - writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL); + if (!dev_data->match_data->skip_engine_control) { + u32 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN | + SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | + SA_EEC_TRNG_EN; - sa_register_algos(dev); + writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL); + } + + sa_register_algos(dev_data); ret = of_platform_populate(node, NULL, NULL, &pdev->dev); if (ret) @@ -2419,13 +2483,6 @@ static int sa_ul_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id of_match[] = { - {.compatible = "ti,j721e-sa2ul",}, - {.compatible = "ti,am654-sa2ul",}, - {}, -}; -MODULE_DEVICE_TABLE(of, of_match); - static struct platform_driver sa_ul_driver = { .probe = sa_ul_probe, .remove = sa_ul_remove, diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h index f597ddecde34..ed66d1f111db 100644 --- a/drivers/crypto/sa2ul.h +++ b/drivers/crypto/sa2ul.h @@ -171,9 +171,12 @@ struct sa_tfm_ctx; #define SA_UNSAFE_DATA_SZ_MIN 240 #define SA_UNSAFE_DATA_SZ_MAX 256 +struct sa_match_data; + /** * struct sa_crypto_data - Crypto driver instance data * @base: Base address of the register space + * @soc_data: Pointer to SoC specific data * @pdev: Platform device pointer * @sc_pool: security context pool * @dev: Device pointer @@ -189,6 +192,7 @@ struct sa_tfm_ctx; */ struct sa_crypto_data { void __iomem *base; + const struct sa_match_data *match_data; struct platform_device *pdev; struct dma_pool *sc_pool; struct device *dev; diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 2a4793176c71..7389a0536ff0 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) int ret; u32 cfg, hw_mode; - pm_runtime_get_sync(cryp->dev); + pm_runtime_resume_and_get(cryp->dev); /* Disable interrupt */ stm32_cryp_write(cryp, CRYP_IMSCR, 0); @@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev) if (!cryp) return -ENODEV; - ret = pm_runtime_get_sync(cryp->dev); + ret = pm_runtime_resume_and_get(cryp->dev); if (ret < 0) return ret; diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 7ac0573ef663..389de9e3302d 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err) static int stm32_hash_hw_init(struct stm32_hash_dev *hdev, struct stm32_hash_request_ctx *rctx) { - pm_runtime_get_sync(hdev->dev); + pm_runtime_resume_and_get(hdev->dev); if (!(HASH_FLAGS_INIT & hdev->flags)) { stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT); @@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out) u32 *preg; unsigned int i; - pm_runtime_get_sync(hdev->dev); + pm_runtime_resume_and_get(hdev->dev); while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY)) cpu_relax(); @@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in) preg = rctx->hw_context; - pm_runtime_get_sync(hdev->dev); + pm_runtime_resume_and_get(hdev->dev); stm32_hash_write(hdev, HASH_IMR, *preg++); stm32_hash_write(hdev, HASH_STR, *preg++); @@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev) if (!hdev) return -ENODEV; - ret = pm_runtime_get_sync(hdev->dev); + ret = pm_runtime_resume_and_get(hdev->dev); if (ret < 0) return ret; diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index 9866c2a5e9a7..759d0d9786fd 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. @@ -15,7 +15,7 @@ #include "cryp_p.h" #include "cryp.h" -/** +/* * cryp_wait_until_done - wait until the device logic is not busy */ void cryp_wait_until_done(struct cryp_device_data *device_data) @@ -285,6 +285,7 @@ int cryp_configure_init_vector(struct cryp_device_data *device_data, * other device context parameter * @device_data: Pointer to the device data struct for base address. * @ctx: Crypto device context + * @cryp_mode: Mode: Polling, Interrupt or DMA */ void cryp_save_device_context(struct cryp_device_data *device_data, struct cryp_device_context *ctx, diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h index 8da7f87b339b..db5713d7c940 100644 --- a/drivers/crypto/ux500/cryp/cryp.h +++ b/drivers/crypto/ux500/cryp/cryp.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index c3adeb2e5823..30cdd5253929 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson. @@ -62,7 +62,7 @@ struct cryp_driver_data { /** * struct cryp_ctx - Crypto context * @config: Crypto mode. - * @key[CRYP_MAX_KEY_SIZE]: Key. + * @key: Key array. * @keylen: Length of key. * @iv: Pointer to initialization vector. * @indata: Pointer to indata. @@ -73,6 +73,7 @@ struct cryp_driver_data { * @updated: Updated flag. * @dev_ctx: Device dependent context. * @device: Pointer to the device. + * @session_id: Atomic session ID. */ struct cryp_ctx { struct cryp_config config; @@ -608,12 +609,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx) chan = ctx->device->dma.chan_mem2cryp; dmaengine_terminate_all(chan); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, - ctx->device->dma.sg_src_len, DMA_TO_DEVICE); + ctx->device->dma.nents_src, DMA_TO_DEVICE); chan = ctx->device->dma.chan_cryp2mem; dmaengine_terminate_all(chan); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, - ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); + ctx->device->dma.nents_dst, DMA_FROM_DEVICE); } static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg, @@ -1290,7 +1291,6 @@ static int ux500_cryp_probe(struct platform_device *pdev) device_data->phybase = res->start; device_data->base = devm_ioremap_resource(dev, res); if (IS_ERR(device_data->base)) { - dev_err(dev, "[%s]: ioremap failed!", __func__); ret = PTR_ERR(device_data->base); goto out; } diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c index 7ebde69e8c76..6d2f07bec98a 100644 --- a/drivers/crypto/ux500/cryp/cryp_irq.c +++ b/drivers/crypto/ux500/cryp/cryp_irq.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h index 1984f30100ff..da90029ea141 100644 --- a/drivers/crypto/ux500/cryp/cryp_irq.h +++ b/drivers/crypto/ux500/cryp/cryp_irq.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. @@ -19,7 +19,7 @@ enum cryp_irq_src_id { CRYP_IRQ_SRC_ALL = 0x3 }; -/** +/* * M0 Funtions */ void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src); diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h index 879ed68a12d7..4981a3f461e5 100644 --- a/drivers/crypto/ux500/cryp/cryp_irqp.h +++ b/drivers/crypto/ux500/cryp/cryp_irqp.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. @@ -13,7 +13,7 @@ #include "cryp_irq.h" -/** +/* * * CRYP Registers - Offset mapping * +-----------------+ diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h index 0df84eaa8531..60b47fe4de35 100644 --- a/drivers/crypto/ux500/cryp/cryp_p.h +++ b/drivers/crypto/ux500/cryp/cryp_p.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. @@ -17,7 +17,7 @@ #include "cryp.h" #include "cryp_irqp.h" -/** +/* * Generic Macros */ #define CRYP_SET_BITS(reg_name, mask) \ @@ -34,7 +34,7 @@ writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \ (((u32)val << shift) & (mask))), reg) -/** +/* * CRYP specific Macros */ #define CRYP_PERIPHERAL_ID0 0xE3 @@ -48,7 +48,7 @@ #define CRYP_PCELL_ID2 0x05 #define CRYP_PCELL_ID3 0xB1 -/** +/* * CRYP register default values */ #define MAX_DEVICE_SUPPORT 2 @@ -62,7 +62,7 @@ #define CRYP_KEY_DEFAULT 0x0 #define CRYP_INIT_VECT_DEFAULT 0x0 -/** +/* * CRYP Control register specific mask */ #define CRYP_CR_SECURE_MASK BIT(0) @@ -81,7 +81,6 @@ CRYP_CR_PRLG_MASK |\ CRYP_CR_ALGODIR_MASK |\ CRYP_CR_ALGOMODE_MASK |\ - CRYP_CR_DATATYPE_MASK |\ CRYP_CR_KEYSIZE_MASK |\ CRYP_CR_KEYRDEN_MASK |\ CRYP_CR_DATATYPE_MASK) @@ -91,7 +90,7 @@ #define CRYP_SR_IFEM_MASK BIT(0) #define CRYP_SR_BUSY_MASK BIT(4) -/** +/* * Bit position used while setting bits in register */ #define CRYP_CR_PRLG_POS 1 @@ -107,7 +106,7 @@ #define CRYP_SR_BUSY_POS 4 -/** +/* * CRYP PCRs------PC_NAND control register * BIT_MASK */ diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index da284b0ea1b2..ecb7412e84e3 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -190,7 +190,7 @@ static void hash_dma_done(struct hash_ctx *ctx) chan = ctx->device->dma.chan_mem2hash; dmaengine_terminate_all(chan); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, - ctx->device->dma.sg_len, DMA_TO_DEVICE); + ctx->device->dma.nents, DMA_TO_DEVICE); } static int hash_dma_write(struct hash_ctx *ctx, @@ -356,7 +356,7 @@ out: /** * hash_get_device_data - Checks for an available hash device and return it. - * @hash_ctx: Structure for the hash context. + * @ctx: Structure for the hash context. * @device_data: Structure for the hash device. * * This function check for an available hash device and return it to @@ -542,7 +542,7 @@ static bool hash_dma_valid_data(struct scatterlist *sg, int datasize) } /** - * hash_init - Common hash init function for SHA1/SHA2 (SHA256). + * ux500_hash_init - Common hash init function for SHA1/SHA2 (SHA256). * @req: The hash request for the job. * * Initialize structures. @@ -585,6 +585,7 @@ static int ux500_hash_init(struct ahash_request *req) * @device_data: Structure for the hash device. * @message: Block (512 bits) of message to be written to * the HASH hardware. + * @length: Message length * */ static void hash_processblock(struct hash_device_data *device_data, @@ -1295,7 +1296,7 @@ void hash_get_digest(struct hash_device_data *device_data, } /** - * hash_update - The hash update function for SHA1/SHA2 (SHA256). + * ahash_update - The hash update function for SHA1/SHA2 (SHA256). * @req: The hash request for the job. */ static int ahash_update(struct ahash_request *req) @@ -1315,7 +1316,7 @@ static int ahash_update(struct ahash_request *req) } /** - * hash_final - The hash final function for SHA1/SHA2 (SHA256). + * ahash_final - The hash final function for SHA1/SHA2 (SHA256). * @req: The hash request for the job. */ static int ahash_final(struct ahash_request *req) @@ -1615,9 +1616,6 @@ static struct hash_algo_template hash_algs[] = { } }; -/** - * hash_algs_register_all - - */ static int ahash_algs_register_all(struct hash_device_data *device_data) { int ret; @@ -1640,9 +1638,6 @@ unreg: return ret; } -/** - * hash_algs_unregister_all - - */ static void ahash_algs_unregister_all(struct hash_device_data *device_data) { int i; @@ -1681,7 +1676,6 @@ static int ux500_hash_probe(struct platform_device *pdev) device_data->phybase = res->start; device_data->base = devm_ioremap_resource(dev, res); if (IS_ERR(device_data->base)) { - dev_err(dev, "%s: ioremap() failed!\n", __func__); ret = PTR_ERR(device_data->base); goto out; } diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index d05c02baebcf..ec06189fbf99 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index d88084447f1c..ed0debc7acb5 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES CBC routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 79ba062ee1c1..9a3da8cd62f3 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES CTR routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 9fee1b1532a4..dabbccb41550 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES XTS routines supporting VMX In-core instructions on Power 8 * * Copyright (C) 2015 International Business Machines Inc. diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 14807ac2e3b9..5bc5710a6de0 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * GHASH routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015, 2019 International Business Machines Inc. diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c index a40d08e75fc0..7eb713cc87c8 100644 --- a/drivers/crypto/vmx/vmx.c +++ b/drivers/crypto/vmx/vmx.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index 244cb7d89678..2acc6173da36 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -4,6 +4,7 @@ #include <linux/security.h> #include <linux/debugfs.h> #include <linux/module.h> +#include <linux/sizes.h> #include <linux/mutex.h> #include <linux/cdev.h> #include <linux/idr.h> @@ -96,21 +97,18 @@ struct mbox_cmd { * @dev: driver core device object * @cdev: char dev core object for ioctl operations * @cxlm: pointer to the parent device driver data - * @ops_active: active user of @cxlm in ops handlers - * @ops_dead: completion when all @cxlm ops users have exited * @id: id number of this memdev instance. */ struct cxl_memdev { struct device dev; struct cdev cdev; struct cxl_mem *cxlm; - struct percpu_ref ops_active; - struct completion ops_dead; int id; }; static int cxl_mem_major; static DEFINE_IDA(cxl_memdev_ida); +static DECLARE_RWSEM(cxl_memdev_rwsem); static struct dentry *cxl_debugfs; static bool cxl_raw_allow_all; @@ -169,7 +167,7 @@ struct cxl_mem_command { * table will be validated against the user's input. For example, if size_in is * 0, and the user passed in 1, it is an error. */ -static struct cxl_mem_command mem_commands[] = { +static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), #ifdef CONFIG_CXL_MEM_RAW_COMMANDS CXL_CMD(RAW, ~0, ~0, 0), @@ -776,26 +774,43 @@ static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct cxl_memdev *cxlmd; - struct inode *inode; - int rc = -ENOTTY; + struct cxl_memdev *cxlmd = file->private_data; + int rc = -ENXIO; - inode = file_inode(file); - cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev); + down_read(&cxl_memdev_rwsem); + if (cxlmd->cxlm) + rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); + up_read(&cxl_memdev_rwsem); - if (!percpu_ref_tryget_live(&cxlmd->ops_active)) - return -ENXIO; + return rc; +} - rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); +static int cxl_memdev_open(struct inode *inode, struct file *file) +{ + struct cxl_memdev *cxlmd = + container_of(inode->i_cdev, typeof(*cxlmd), cdev); - percpu_ref_put(&cxlmd->ops_active); + get_device(&cxlmd->dev); + file->private_data = cxlmd; - return rc; + return 0; +} + +static int cxl_memdev_release_file(struct inode *inode, struct file *file) +{ + struct cxl_memdev *cxlmd = + container_of(inode->i_cdev, typeof(*cxlmd), cdev); + + put_device(&cxlmd->dev); + + return 0; } static const struct file_operations cxl_memdev_fops = { .owner = THIS_MODULE, .unlocked_ioctl = cxl_memdev_ioctl, + .open = cxl_memdev_open, + .release = cxl_memdev_release_file, .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; @@ -984,7 +999,7 @@ static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo, return NULL; } - offset = ((u64)reg_hi << 32) | FIELD_GET(CXL_REGLOC_ADDR_MASK, reg_lo); + offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); /* Basic sanity check that BAR is big enough */ @@ -1049,7 +1064,6 @@ static void cxl_memdev_release(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - percpu_ref_exit(&cxlmd->ops_active); ida_free(&cxl_memdev_ida, cxlmd->id); kfree(cxlmd); } @@ -1066,7 +1080,7 @@ static ssize_t firmware_version_show(struct device *dev, struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_mem *cxlm = cxlmd->cxlm; - return sprintf(buf, "%.16s\n", cxlm->firmware_version); + return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version); } static DEVICE_ATTR_RO(firmware_version); @@ -1076,7 +1090,7 @@ static ssize_t payload_max_show(struct device *dev, struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_mem *cxlm = cxlmd->cxlm; - return sprintf(buf, "%zu\n", cxlm->payload_size); + return sysfs_emit(buf, "%zu\n", cxlm->payload_size); } static DEVICE_ATTR_RO(payload_max); @@ -1087,7 +1101,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, struct cxl_mem *cxlm = cxlmd->cxlm; unsigned long long len = range_len(&cxlm->ram_range); - return sprintf(buf, "%#llx\n", len); + return sysfs_emit(buf, "%#llx\n", len); } static struct device_attribute dev_attr_ram_size = @@ -1100,7 +1114,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, struct cxl_mem *cxlm = cxlmd->cxlm; unsigned long long len = range_len(&cxlm->pmem_range); - return sprintf(buf, "%#llx\n", len); + return sysfs_emit(buf, "%#llx\n", len); } static struct device_attribute dev_attr_pmem_size = @@ -1150,27 +1164,24 @@ static const struct device_type cxl_memdev_type = { .groups = cxl_memdev_attribute_groups, }; -static void cxlmdev_unregister(void *_cxlmd) +static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd) { - struct cxl_memdev *cxlmd = _cxlmd; - struct device *dev = &cxlmd->dev; - - percpu_ref_kill(&cxlmd->ops_active); - cdev_device_del(&cxlmd->cdev, dev); - wait_for_completion(&cxlmd->ops_dead); + down_write(&cxl_memdev_rwsem); cxlmd->cxlm = NULL; - put_device(dev); + up_write(&cxl_memdev_rwsem); } -static void cxlmdev_ops_active_release(struct percpu_ref *ref) +static void cxl_memdev_unregister(void *_cxlmd) { - struct cxl_memdev *cxlmd = - container_of(ref, typeof(*cxlmd), ops_active); + struct cxl_memdev *cxlmd = _cxlmd; + struct device *dev = &cxlmd->dev; - complete(&cxlmd->ops_dead); + cdev_device_del(&cxlmd->cdev, dev); + cxl_memdev_shutdown(cxlmd); + put_device(dev); } -static int cxl_mem_add_memdev(struct cxl_mem *cxlm) +static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm) { struct pci_dev *pdev = cxlm->pdev; struct cxl_memdev *cxlmd; @@ -1180,22 +1191,11 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm) cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL); if (!cxlmd) - return -ENOMEM; - init_completion(&cxlmd->ops_dead); - - /* - * @cxlm is deallocated when the driver unbinds so operations - * that are using it need to hold a live reference. - */ - cxlmd->cxlm = cxlm; - rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0, - GFP_KERNEL); - if (rc) - goto err_ref; + return ERR_PTR(-ENOMEM); rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL); if (rc < 0) - goto err_id; + goto err; cxlmd->id = rc; dev = &cxlmd->dev; @@ -1204,30 +1204,54 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm) dev->bus = &cxl_bus_type; dev->devt = MKDEV(cxl_mem_major, cxlmd->id); dev->type = &cxl_memdev_type; - dev_set_name(dev, "mem%d", cxlmd->id); + device_set_pm_not_required(dev); cdev = &cxlmd->cdev; cdev_init(cdev, &cxl_memdev_fops); + return cxlmd; + +err: + kfree(cxlmd); + return ERR_PTR(rc); +} + +static int cxl_mem_add_memdev(struct cxl_mem *cxlm) +{ + struct cxl_memdev *cxlmd; + struct device *dev; + struct cdev *cdev; + int rc; + + cxlmd = cxl_memdev_alloc(cxlm); + if (IS_ERR(cxlmd)) + return PTR_ERR(cxlmd); + + dev = &cxlmd->dev; + rc = dev_set_name(dev, "mem%d", cxlmd->id); + if (rc) + goto err; + + /* + * Activate ioctl operations, no cxl_memdev_rwsem manipulation + * needed as this is ordered with cdev_add() publishing the device. + */ + cxlmd->cxlm = cxlm; + cdev = &cxlmd->cdev; rc = cdev_device_add(cdev, dev); if (rc) - goto err_add; + goto err; - return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd); + return devm_add_action_or_reset(dev->parent, cxl_memdev_unregister, + cxlmd); -err_add: - ida_free(&cxl_memdev_ida, cxlmd->id); -err_id: +err: /* - * Theoretically userspace could have already entered the fops, - * so flush ops_active. + * The cdev was briefly live, shutdown any ioctl operations that + * saw that state. */ - percpu_ref_kill(&cxlmd->ops_active); - wait_for_completion(&cxlmd->ops_dead); - percpu_ref_exit(&cxlmd->ops_active); -err_ref: - kfree(cxlmd); - + cxl_memdev_shutdown(cxlmd); + put_device(dev); return rc; } @@ -1396,6 +1420,7 @@ out: */ static int cxl_mem_identify(struct cxl_mem *cxlm) { + /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ struct cxl_mbox_identify { char fw_revision[0x10]; __le64 total_capacity; @@ -1424,10 +1449,11 @@ static int cxl_mem_identify(struct cxl_mem *cxlm) * For now, only the capacity is exported in sysfs */ cxlm->ram_range.start = 0; - cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) - 1; + cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1; cxlm->pmem_range.start = 0; - cxlm->pmem_range.end = le64_to_cpu(id.persistent_capacity) - 1; + cxlm->pmem_range.end = + le64_to_cpu(id.persistent_capacity) * SZ_256M - 1; memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index 452e85ae87a8..5aee26e1bbd6 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -90,13 +90,11 @@ static ssize_t do_id_store(struct device_driver *drv, const char *buf, list_add(&dax_id->list, &dax_drv->ids); } else rc = -ENOMEM; - } else - /* nothing to remove */; + } } else if (action == ID_REMOVE) { list_del(&dax_id->list); kfree(dax_id); - } else - /* dax_id already added */; + } mutex_unlock(&dax_bus_lock); if (rc < 0) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index fe6a460c4373..af3ee288bc11 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device, kfree(chan->dev); err_free_local: free_percpu(chan->local); + chan->local = NULL; return rc; } diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index e5162690de8f..db25f9b7778c 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -10,6 +10,7 @@ config DW_DMAC_CORE config DW_DMAC tristate "Synopsys DesignWare AHB DMA platform driver" + depends on HAS_IOMEM select DW_DMAC_CORE help Support the Synopsys DesignWare AHB DMA controller. This @@ -18,6 +19,7 @@ config DW_DMAC config DW_DMAC_PCI tristate "Synopsys DesignWare AHB DMA PCI driver" depends on PCI + depends on HAS_IOMEM select DW_DMAC_CORE help Support the Synopsys DesignWare AHB DMA controller on the diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 84a6ea60ecf0..31c819544a22 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -282,6 +282,22 @@ void idxd_wq_drain(struct idxd_wq *wq) idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL); } +void idxd_wq_reset(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 operand; + + if (wq->state != IDXD_WQ_ENABLED) { + dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); + return; + } + + operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); + idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL); + wq->state = IDXD_WQ_DISABLED; +} + int idxd_wq_map_portal(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; @@ -363,8 +379,6 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq) void idxd_wq_disable_cleanup(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; - struct device *dev = &idxd->pdev->dev; - int i, wq_offset; lockdep_assert_held(&idxd->dev_lock); memset(wq->wqcfg, 0, idxd->wqcfg_size); @@ -376,14 +390,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq) wq->ats_dis = 0; clear_bit(WQ_FLAG_DEDICATED, &wq->flags); memset(wq->name, 0, WQ_NAME_SIZE); - - for (i = 0; i < WQCFG_STRIDES(idxd); i++) { - wq_offset = WQCFG_OFFSET(idxd, wq->id, i); - iowrite32(0, idxd->reg_base + wq_offset); - dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", - wq->id, i, wq_offset, - ioread32(idxd->reg_base + wq_offset)); - } } /* Device control bits */ @@ -574,6 +580,36 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid) } /* Device configuration bits */ +void idxd_msix_perm_setup(struct idxd_device *idxd) +{ + union msix_perm mperm; + int i, msixcnt; + + msixcnt = pci_msix_vec_count(idxd->pdev); + if (msixcnt < 0) + return; + + mperm.bits = 0; + mperm.pasid = idxd->pasid; + mperm.pasid_en = device_pasid_enabled(idxd); + for (i = 1; i < msixcnt; i++) + iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8); +} + +void idxd_msix_perm_clear(struct idxd_device *idxd) +{ + union msix_perm mperm; + int i, msixcnt; + + msixcnt = pci_msix_vec_count(idxd->pdev); + if (msixcnt < 0) + return; + + mperm.bits = 0; + for (i = 1; i < msixcnt; i++) + iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8); +} + static void idxd_group_config_write(struct idxd_group *group) { struct idxd_device *idxd = group->idxd; @@ -642,7 +678,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq) if (!wq->group) return 0; - memset(wq->wqcfg, 0, idxd->wqcfg_size); + /* + * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after + * wq reset. This will copy back the sticky values that are present on some devices. + */ + for (i = 0; i < WQCFG_STRIDES(idxd); i++) { + wq_offset = WQCFG_OFFSET(idxd, wq->id, i); + wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset); + } /* byte 0-3 */ wq->wqcfg->wq_size = wq->size; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 81a0e65fd316..76014c14f473 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -316,6 +316,8 @@ void idxd_unregister_driver(void); struct bus_type *idxd_get_bus_type(struct idxd_device *idxd); /* device interrupt control */ +void idxd_msix_perm_setup(struct idxd_device *idxd); +void idxd_msix_perm_clear(struct idxd_device *idxd); irqreturn_t idxd_irq_handler(int vec, void *data); irqreturn_t idxd_misc_thread(int vec, void *data); irqreturn_t idxd_wq_thread(int irq, void *data); @@ -341,6 +343,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq); int idxd_wq_enable(struct idxd_wq *wq); int idxd_wq_disable(struct idxd_wq *wq); void idxd_wq_drain(struct idxd_wq *wq); +void idxd_wq_reset(struct idxd_wq *wq); int idxd_wq_map_portal(struct idxd_wq *wq); void idxd_wq_unmap_portal(struct idxd_wq *wq); void idxd_wq_disable_cleanup(struct idxd_wq *wq); diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 085a0c3b62c6..6584b0ec07d5 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -65,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) struct idxd_irq_entry *irq_entry; int i, msixcnt; int rc = 0; - union msix_perm mperm; msixcnt = pci_msix_vec_count(pdev); if (msixcnt < 0) { @@ -144,14 +143,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) } idxd_unmask_error_interrupts(idxd); - - /* Setup MSIX permission table */ - mperm.bits = 0; - mperm.pasid = idxd->pasid; - mperm.pasid_en = device_pasid_enabled(idxd); - for (i = 1; i < msixcnt; i++) - iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8); - + idxd_msix_perm_setup(idxd); return 0; err_no_irq: @@ -510,6 +502,7 @@ static void idxd_shutdown(struct pci_dev *pdev) idxd_flush_work_list(irq_entry); } + idxd_msix_perm_clear(idxd); destroy_workqueue(idxd->wq); } diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index a60ca11a5784..f1463fc58112 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -124,7 +124,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) for (i = 0; i < 4; i++) idxd->sw_err.bits[i] = ioread64(idxd->reg_base + IDXD_SWERR_OFFSET + i * sizeof(u64)); - iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET); + + iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK, + idxd->reg_base + IDXD_SWERR_OFFSET); if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { int id = idxd->sw_err.wq_idx; diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 4dbb03c545e4..18bf4d148989 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -275,7 +275,6 @@ static void disable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; - int rc; mutex_lock(&wq->wq_lock); dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); @@ -296,17 +295,13 @@ static void disable_wq(struct idxd_wq *wq) idxd_wq_unmap_portal(wq); idxd_wq_drain(wq); - rc = idxd_wq_disable(wq); + idxd_wq_reset(wq); idxd_wq_free_resources(wq); wq->client_count = 0; mutex_unlock(&wq->wq_lock); - if (rc < 0) - dev_warn(dev, "Failed to disable %s: %d\n", - dev_name(&wq->conf_dev), rc); - else - dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); + dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); } static int idxd_config_bus_remove(struct device *dev) @@ -989,7 +984,7 @@ static ssize_t wq_size_store(struct device *dev, if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; - if (wq->state != IDXD_WQ_DISABLED) + if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) @@ -1449,8 +1444,14 @@ static ssize_t op_cap_show(struct device *dev, { struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); + int i, rc = 0; + + for (i = 0; i < 4; i++) + rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]); - return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]); + rc--; + rc += sysfs_emit_at(buf, rc, "\n"); + return rc; } static DEVICE_ATTR_RO(op_cap); diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c index f387c5bbc170..166934544161 100644 --- a/drivers/dma/plx_dma.c +++ b/drivers/dma/plx_dma.c @@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev) rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0, KBUILD_MODNAME, plxdev); - if (rc) { - kfree(plxdev); - return rc; - } + if (rc) + goto free_plx; spin_lock_init(&plxdev->ring_lock); tasklet_setup(&plxdev->desc_task, plx_dma_desc_task); @@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev) rc = dma_async_device_register(dma); if (rc) { pci_err(pdev, "Failed to register dma device: %d\n", rc); - free_irq(pci_irq_vector(pdev, 0), plxdev); - kfree(plxdev); - return rc; + goto put_device; } pci_set_drvdata(pdev, plxdev); return 0; + +put_device: + put_device(&pdev->dev); + free_irq(pci_irq_vector(pdev, 0), plxdev); +free_plx: + kfree(plxdev); + + return rc; } static int plx_dma_probe(struct pci_dev *pdev, diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 71827d9b0aa1..b7260749e8ee 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc) goto end; } if (!tdc->busy) { - err = pm_runtime_get_sync(tdc->tdma->dev); + err = pm_runtime_resume_and_get(tdc->tdma->dev); if (err < 0) { dev_err(tdc2dev(tdc), "Failed to enable DMA\n"); goto end; @@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc) struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); int err; - err = pm_runtime_get_sync(tdc->tdma->dev); + err = pm_runtime_resume_and_get(tdc->tdma->dev); if (err < 0) { dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err); return; diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c index 55df63dead8d..70b29bd079c9 100644 --- a/drivers/dma/xilinx/xilinx_dpdma.c +++ b/drivers/dma/xilinx/xilinx_dpdma.c @@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) struct xilinx_dpdma_tx_desc *desc; struct virt_dma_desc *vdesc; u32 reg, channels; + bool first_frame; lockdep_assert_held(&chan->lock); @@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) chan->running = true; } - if (chan->video_group) - channels = xilinx_dpdma_chan_video_group_ready(chan); - else - channels = BIT(chan->id); - - if (!channels) - return; - vdesc = vchan_next_desc(&chan->vchan); if (!vdesc) return; @@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK, upper_32_bits(sw_desc->dma_addr))); - if (chan->first_frame) + first_frame = chan->first_frame; + chan->first_frame = false; + + if (chan->video_group) { + channels = xilinx_dpdma_chan_video_group_ready(chan); + /* + * Trigger the transfer only when all channels in the group are + * ready. + */ + if (!channels) + return; + } else { + channels = BIT(chan->id); + } + + if (first_frame) reg = XILINX_DPDMA_GBL_TRIG_MASK(channels); else reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels); - chan->first_frame = false; - dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg); } @@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan) */ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) { - struct xilinx_dpdma_tx_desc *active = chan->desc.active; + struct xilinx_dpdma_tx_desc *active; unsigned long flags; spin_lock_irqsave(&chan->lock, flags); xilinx_dpdma_debugfs_desc_done_irq(chan); + active = chan->desc.active; if (active) vchan_cyclic_callback(&active->vdesc); else diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 0a6438cbb3f3..e7a9561a826d 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev) sizeof(*edev->nh), GFP_KERNEL); if (!edev->nh) { ret = -ENOMEM; + device_unregister(&edev->dev); goto err_dev; } diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 5fd6a60b6741..88ed971e32c0 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -346,6 +346,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct client *client = file->private_data; spinlock_t *client_list_lock = &client->lynx->client_list_lock; struct nosy_stats stats; + int ret; switch (cmd) { case NOSY_IOC_GET_STATS: @@ -360,11 +361,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; case NOSY_IOC_START: + ret = -EBUSY; spin_lock_irq(client_list_lock); - list_add_tail(&client->link, &client->lynx->client_list); + if (list_empty(&client->link)) { + list_add_tail(&client->link, &client->lynx->client_list); + ret = 0; + } spin_unlock_irq(client_list_lock); - return 0; + return ret; case NOSY_IOC_STOP: spin_lock_irq(client_list_lock); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 9811c40956e5..17c9d825188b 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -2545,7 +2545,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) struct driver_data *driver_data = packet->driver_data; int ret = -ENOENT; - tasklet_disable(&ctx->tasklet); + tasklet_disable_in_atomic(&ctx->tasklet); if (packet->ack != 0) goto out; @@ -3465,7 +3465,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base) struct iso_context *ctx = container_of(base, struct iso_context, base); int ret = 0; - tasklet_disable(&ctx->context.tasklet); + tasklet_disable_in_atomic(&ctx->context.tasklet); if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { context_tasklet((unsigned long)&ctx->context); diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index c23466e05e60..d0537573501e 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -13,7 +13,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \ -Wno-pointer-sign \ $(call cc-disable-warning, address-of-packed-member) \ $(call cc-disable-warning, gnu) \ - -fno-asynchronous-unwind-tables + -fno-asynchronous-unwind-tables \ + $(CLANG_FLAGS) # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly # disable the stackleak plugin diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 50bb2a6d6ccf..62f0d1a5dd32 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -2,7 +2,7 @@ /* * Turris Mox rWTM firmware driver * - * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> + * Copyright (C) 2019 Marek BehĂșn <kabel@kernel.org> */ #include <linux/armada-37xx-rwtm-mailbox.h> @@ -547,4 +547,4 @@ module_platform_driver(turris_mox_rwtm_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Turris Mox rWTM firmware driver"); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c index 8299909318f4..61f9efd6c64f 100644 --- a/drivers/gpio/gpio-moxtet.c +++ b/drivers/gpio/gpio-moxtet.c @@ -2,7 +2,7 @@ /* * Turris Mox Moxtet GPIO expander * - * Copyright (C) 2018 Marek Behun <marek.behun@nic.cz> + * Copyright (C) 2018 Marek BehĂșn <kabel@kernel.org> */ #include <linux/bitops.h> @@ -174,6 +174,6 @@ static struct moxtet_driver moxtet_gpio_driver = { }; module_moxtet_driver(moxtet_gpio_driver); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); MODULE_DESCRIPTION("Turris Mox Moxtet GPIO expander"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 41952bb818ad..56152263ab38 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -29,6 +29,7 @@ #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF struct gpio_regs { + u32 sysconfig; u32 irqenable1; u32 irqenable2; u32 wake_en; @@ -1069,6 +1070,7 @@ static void omap_gpio_init_context(struct gpio_bank *p) const struct omap_gpio_reg_offs *regs = p->regs; void __iomem *base = p->base; + p->context.sysconfig = readl_relaxed(base + regs->sysconfig); p->context.ctrl = readl_relaxed(base + regs->ctrl); p->context.oe = readl_relaxed(base + regs->direction); p->context.wake_en = readl_relaxed(base + regs->wkup_en); @@ -1088,6 +1090,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank) const struct omap_gpio_reg_offs *regs = bank->regs; void __iomem *base = bank->base; + writel_relaxed(bank->context.sysconfig, base + regs->sysconfig); writel_relaxed(bank->context.wake_en, base + regs->wkup_en); writel_relaxed(bank->context.ctrl, base + regs->ctrl); writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0); @@ -1115,6 +1118,10 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context) bank->saved_datain = readl_relaxed(base + bank->regs->datain); + /* Save syconfig, it's runtime value can be different from init value */ + if (bank->loses_context) + bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig); + if (!bank->enabled_non_wakeup_gpios) goto update_gpio_context_count; @@ -1279,6 +1286,7 @@ out_unlock: static const struct omap_gpio_reg_offs omap2_gpio_regs = { .revision = OMAP24XX_GPIO_REVISION, + .sysconfig = OMAP24XX_GPIO_SYSCONFIG, .direction = OMAP24XX_GPIO_OE, .datain = OMAP24XX_GPIO_DATAIN, .dataout = OMAP24XX_GPIO_DATAOUT, @@ -1302,6 +1310,7 @@ static const struct omap_gpio_reg_offs omap2_gpio_regs = { static const struct omap_gpio_reg_offs omap4_gpio_regs = { .revision = OMAP4_GPIO_REVISION, + .sysconfig = OMAP4_GPIO_SYSCONFIG, .direction = OMAP4_GPIO_OE, .datain = OMAP4_GPIO_DATAIN, .dataout = OMAP4_GPIO_DATAOUT, diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 26c5466b8179..ae49bb23c6ed 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class, long gpio; struct gpio_desc *desc; int status; + struct gpio_chip *gc; + int offset; status = kstrtol(buf, 0, &gpio); if (status < 0) @@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class, pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); return -EINVAL; } + gc = desc->gdev->chip; + offset = gpio_chip_hwgpio(desc); + if (!gpiochip_line_is_valid(gc, offset)) { + pr_warn("%s: GPIO %ld masked\n", __func__, gpio); + return -EINVAL; + } /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 49267eb64302..29885febc0b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1007,13 +1007,9 @@ struct amdgpu_device { /* s3/s4 mask */ bool in_suspend; - bool in_hibernate; - - /* - * The combination flag in_poweroff_reboot_com used to identify the poweroff - * and reboot opt in the s0i3 system-wide suspend. - */ - bool in_poweroff_reboot_com; + bool in_s3; + bool in_s4; + bool in_s0ix; atomic_t in_gpu_reset; enum pp_mp1_state mp1_state; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6447cd6ca5a8..8a5a8ff5d362 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2371,6 +2371,10 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; if (!adev->ip_blocks[i].status.late_initialized) continue; + /* skip CG for GFX on S0ix */ + if (adev->in_s0ix && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) + continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && @@ -2402,6 +2406,10 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; if (!adev->ip_blocks[i].status.late_initialized) continue; + /* skip PG for GFX on S0ix */ + if (adev->in_s0ix && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) + continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && @@ -2678,11 +2686,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) { int i, r; - if (adev->in_poweroff_reboot_com || - !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) { - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); - amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); - } + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) @@ -2722,6 +2727,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) { int i, r; + if (adev->in_s0ix) + amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; @@ -2734,6 +2742,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; continue; } + + /* skip suspend of gfx and psp for S0ix + * gfx is in gfxoff state, so on resume it will exit gfxoff just + * like at runtime. PSP is also part of the always on hardware + * so no need to suspend it. + */ + if (adev->in_s0ix && + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)) + continue; + /* XXX handle errors */ r = adev->ip_blocks[i].version->funcs->suspend(adev); /* XXX handle errors */ @@ -3673,14 +3692,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev) */ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) { - struct amdgpu_device *adev; - struct drm_crtc *crtc; - struct drm_connector *connector; - struct drm_connector_list_iter iter; + struct amdgpu_device *adev = drm_to_adev(dev); int r; - adev = drm_to_adev(dev); - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -3692,61 +3706,19 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) cancel_delayed_work_sync(&adev->delayed_init_work); - if (!amdgpu_device_has_dc_support(adev)) { - /* turn off display hw */ - drm_modeset_lock_all(dev); - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) - drm_helper_connector_dpms(connector, - DRM_MODE_DPMS_OFF); - drm_connector_list_iter_end(&iter); - drm_modeset_unlock_all(dev); - /* unpin the front buffers and cursors */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_framebuffer *fb = crtc->primary->fb; - struct amdgpu_bo *robj; - - if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - r = amdgpu_bo_reserve(aobj, true); - if (r == 0) { - amdgpu_bo_unpin(aobj); - amdgpu_bo_unreserve(aobj); - } - } - - if (fb == NULL || fb->obj[0] == NULL) { - continue; - } - robj = gem_to_amdgpu_bo(fb->obj[0]); - /* don't unpin kernel fb objects */ - if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { - r = amdgpu_bo_reserve(robj, true); - if (r == 0) { - amdgpu_bo_unpin(robj); - amdgpu_bo_unreserve(robj); - } - } - } - } - amdgpu_ras_suspend(adev); r = amdgpu_device_ip_suspend_phase1(adev); - amdgpu_amdkfd_suspend(adev, adev->in_runpm); + if (!adev->in_s0ix) + amdgpu_amdkfd_suspend(adev, adev->in_runpm); /* evict vram memory */ amdgpu_bo_evict_vram(adev); amdgpu_fence_driver_suspend(adev); - if (adev->in_poweroff_reboot_com || - !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) - r = amdgpu_device_ip_suspend_phase2(adev); - else - amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); + r = amdgpu_device_ip_suspend_phase2(adev); /* evict remaining vram memory * This second call to evict vram is to evict the gart page table * using the CPU. @@ -3768,16 +3740,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) */ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) { - struct drm_connector *connector; - struct drm_connector_list_iter iter; struct amdgpu_device *adev = drm_to_adev(dev); - struct drm_crtc *crtc; int r = 0; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (amdgpu_acpi_is_s0ix_supported(adev)) + if (adev->in_s0ix) amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry); /* post card */ @@ -3802,50 +3771,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) queue_delayed_work(system_wq, &adev->delayed_init_work, msecs_to_jiffies(AMDGPU_RESUME_MS)); - if (!amdgpu_device_has_dc_support(adev)) { - /* pin cursors */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - r = amdgpu_bo_reserve(aobj, true); - if (r == 0) { - r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); - if (r != 0) - dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); - amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); - amdgpu_bo_unreserve(aobj); - } - } - } + if (!adev->in_s0ix) { + r = amdgpu_amdkfd_resume(adev, adev->in_runpm); + if (r) + return r; } - r = amdgpu_amdkfd_resume(adev, adev->in_runpm); - if (r) - return r; /* Make sure IB tests flushed */ flush_delayed_work(&adev->delayed_init_work); - /* blat the mode back in */ - if (fbcon) { - if (!amdgpu_device_has_dc_support(adev)) { - /* pre DCE11 */ - drm_helper_resume_force_mode(dev); - - /* turn on display hw */ - drm_modeset_lock_all(dev); - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) - drm_helper_connector_dpms(connector, - DRM_MODE_DPMS_ON); - drm_connector_list_iter_end(&iter); - - drm_modeset_unlock_all(dev); - } + if (fbcon) amdgpu_fbdev_set_suspend(adev, 0); - } drm_kms_helper_poll_enable(dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 48cb33e5b382..f753e04fee99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1310,3 +1310,92 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, stime, etime, mode); } + +int amdgpu_display_suspend_helper(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_crtc *crtc; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + int r; + + /* turn off display hw */ + drm_modeset_lock_all(dev); + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_OFF); + drm_connector_list_iter_end(&iter); + drm_modeset_unlock_all(dev); + /* unpin the front buffers and cursors */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_framebuffer *fb = crtc->primary->fb; + struct amdgpu_bo *robj; + + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); + if (r == 0) { + amdgpu_bo_unpin(aobj); + amdgpu_bo_unreserve(aobj); + } + } + + if (fb == NULL || fb->obj[0] == NULL) { + continue; + } + robj = gem_to_amdgpu_bo(fb->obj[0]); + /* don't unpin kernel fb objects */ + if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { + r = amdgpu_bo_reserve(robj, true); + if (r == 0) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); + } + } + } + return r; +} + +int amdgpu_display_resume_helper(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_connector *connector; + struct drm_connector_list_iter iter; + struct drm_crtc *crtc; + int r; + + /* pin cursors */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); + if (r == 0) { + r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); + if (r != 0) + dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); + amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); + amdgpu_bo_unreserve(aobj); + } + } + } + + drm_helper_resume_force_mode(dev); + + /* turn on display hw */ + drm_modeset_lock_all(dev); + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_ON); + drm_connector_list_iter_end(&iter); + + drm_modeset_unlock_all(dev); + + return 0; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index dc7b7d116549..7b6d83e2b13c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -47,4 +47,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, const struct drm_format_info * amdgpu_lookup_format_info(u32 format, uint64_t modifier); +int amdgpu_display_suspend_helper(struct amdgpu_device *adev); +int amdgpu_display_resume_helper(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b26e2fd1c538..e92e7dea71da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1107,6 +1107,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, /* Van Gogh */ @@ -1274,24 +1275,35 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) */ if (!amdgpu_passthrough(adev)) adev->mp1_state = PP_MP1_STATE_UNLOAD; - adev->in_poweroff_reboot_com = true; amdgpu_device_ip_suspend(adev); - adev->in_poweroff_reboot_com = false; adev->mp1_state = PP_MP1_STATE_NONE; } static int amdgpu_pmops_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; - return amdgpu_device_suspend(drm_dev, true); + if (amdgpu_acpi_is_s0ix_supported(adev)) + adev->in_s0ix = true; + adev->in_s3 = true; + r = amdgpu_device_suspend(drm_dev, true); + adev->in_s3 = false; + + return r; } static int amdgpu_pmops_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; - return amdgpu_device_resume(drm_dev, true); + r = amdgpu_device_resume(drm_dev, true); + if (amdgpu_acpi_is_s0ix_supported(adev)) + adev->in_s0ix = false; + return r; } static int amdgpu_pmops_freeze(struct device *dev) @@ -1300,9 +1312,9 @@ static int amdgpu_pmops_freeze(struct device *dev) struct amdgpu_device *adev = drm_to_adev(drm_dev); int r; - adev->in_hibernate = true; + adev->in_s4 = true; r = amdgpu_device_suspend(drm_dev, true); - adev->in_hibernate = false; + adev->in_s4 = false; if (r) return r; return amdgpu_asic_reset(adev); @@ -1318,13 +1330,8 @@ static int amdgpu_pmops_thaw(struct device *dev) static int amdgpu_pmops_poweroff(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(drm_dev); - int r; - adev->in_poweroff_reboot_com = true; - r = amdgpu_device_suspend(drm_dev, true); - adev->in_poweroff_reboot_com = false; - return r; + return amdgpu_device_suspend(drm_dev, true); } static int amdgpu_pmops_restore(struct device *dev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 64beb3399604..a4e2cf7cada1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -778,9 +778,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) dev_info->high_va_offset = AMDGPU_GMC_HOLE_END; dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size; } - dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); + dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; - dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE; + dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info->cu_active_number = adev->gfx.cu_info.number; dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; dev_info->ce_ram_size = adev->gfx.ce_ram_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 4b29b8205442..072050429a2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1028,13 +1028,10 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) { struct ttm_resource_manager *man; - /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ -#ifndef CONFIG_HIBERNATION - if (adev->flags & AMD_IS_APU) { - /* Useless to evict on IGP chips */ + if (adev->in_s3 && (adev->flags & AMD_IS_APU)) { + /* No need to evict vram on APUs for suspend to ram */ return 0; } -#endif man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return ttm_resource_manager_evict_all(&adev->mman.bdev, man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 9fd2157b133a..5efa331e3ee8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -906,7 +906,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, /* Allocate an SG array and squash pages into it */ r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, - ttm->num_pages << PAGE_SHIFT, + (u64)ttm->num_pages << PAGE_SHIFT, GFP_KERNEL); if (r) goto release_sg; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ad91c0c3c423..326dae31b675 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2197,8 +2197,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, uint64_t eaddr; /* validate the parameters */ - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || + size == 0 || size & ~PAGE_MASK) return -EINVAL; /* make sure object fit at this offset */ @@ -2263,8 +2263,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, int r; /* validate the parameters */ - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || + size == 0 || size & ~PAGE_MASK) return -EINVAL; /* make sure object fit at this offset */ @@ -2409,7 +2409,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, after->start = eaddr + 1; after->last = tmp->last; after->offset = tmp->offset; - after->offset += after->start - tmp->start; + after->offset += (after->start - tmp->start) << PAGE_SHIFT; after->flags = tmp->flags; after->bo_va = tmp->bo_va; list_add(&after->list, &tmp->bo_va->invalids); @@ -3300,7 +3300,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, struct amdgpu_bo *root; uint64_t value, flags; struct amdgpu_vm *vm; - long r; + int r; spin_lock(&adev->vm_manager.pasid_lock); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); @@ -3349,6 +3349,12 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, value = 0; } + r = dma_resv_reserve_shared(root->tbo.base.resv, 1); + if (r) { + pr_debug("failed %d to reserve fence slot\n", r); + goto error_unlock; + } + r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr, addr, flags, value, NULL, NULL, NULL); @@ -3360,7 +3366,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, error_unlock: amdgpu_bo_unreserve(root); if (r < 0) - DRM_ERROR("Can't handle page fault (%ld)\n", r); + DRM_ERROR("Can't handle page fault (%d)\n", r); error_unref: amdgpu_bo_unref(&root); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 7944781e1086..19abb740a169 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2897,6 +2897,11 @@ static int dce_v10_0_hw_fini(void *handle) static int dce_v10_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -2921,8 +2926,10 @@ static int dce_v10_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v10_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 1b6ff0470011..320ec35bfd37 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3027,6 +3027,11 @@ static int dce_v11_0_hw_fini(void *handle) static int dce_v11_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -3051,8 +3056,10 @@ static int dce_v11_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v11_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 83a88385b762..13322000ebd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2770,7 +2770,11 @@ static int dce_v6_0_hw_fini(void *handle) static int dce_v6_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -2794,8 +2798,10 @@ static int dce_v6_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v6_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 224b30214427..04ebf02e5b8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2796,6 +2796,11 @@ static int dce_v8_0_hw_fini(void *handle) static int dce_v8_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -2820,8 +2825,10 @@ static int dce_v8_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v8_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 9810af712cc0..5c11144da051 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -39,6 +39,7 @@ #include "dce_v11_0.h" #include "dce_virtual.h" #include "ivsrcid/ivsrcid_vislands30.h" +#include "amdgpu_display.h" #define DCE_VIRTUAL_VBLANK_PERIOD 16666666 @@ -491,12 +492,24 @@ static int dce_virtual_hw_fini(void *handle) static int dce_virtual_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; return dce_virtual_hw_fini(handle); } static int dce_virtual_resume(void *handle) { - return dce_virtual_hw_init(handle); + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = dce_virtual_hw_init(handle); + if (r) + return r; + return amdgpu_display_resume_helper(adev); } static bool dce_virtual_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 45d1172b7bff..63691deb7df3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3280,7 +3280,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00001d00, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x00001d00, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index b258a3dae767..159add0f5aaa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, /* Wait till CP writes sync code: */ status = amdkfd_fence_wait_timeout( - (unsigned int *) rm_state, + rm_state, QUEUESTATE__ACTIVE, 1500); kfd_gtt_sa_free(dbgdev->dev, mem_obj); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index e686ce2bf3b3..4598a9a58125 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm) if (retval) goto fail_allocate_vidmem; - dqm->fence_addr = dqm->fence_mem->cpu_ptr; + dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr; dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; init_interrupts(dqm); @@ -1340,8 +1340,8 @@ out: return retval; } -int amdkfd_fence_wait_timeout(unsigned int *fence_addr, - unsigned int fence_value, +int amdkfd_fence_wait_timeout(uint64_t *fence_addr, + uint64_t fence_value, unsigned int timeout_ms) { unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 7351dd195274..45f815946554 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -192,7 +192,7 @@ struct device_queue_manager { uint16_t vmid_pasid[VMID_NUM]; uint64_t pipelines_addr; uint64_t fence_gpu_addr; - unsigned int *fence_addr; + uint64_t *fence_addr; struct kfd_mem_obj *fence_mem; bool active_runlist; int sched_policy; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 5d541e0cc8ca..f71a7fa6680c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -347,7 +347,7 @@ fail_create_runlist_ib: } int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, - uint32_t fence_value) + uint64_t fence_value) { uint32_t *buffer, size; int retval = 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index dfaf771a42e6..e3ba0cd3b6fa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, } static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer, - uint64_t fence_address, uint32_t fence_value) + uint64_t fence_address, uint64_t fence_value) { struct pm4_mes_query_status *packet; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index a852e0d7d804..08442e7d9944 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, } static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer, - uint64_t fence_address, uint32_t fence_value) + uint64_t fence_address, uint64_t fence_value) { struct pm4_mes_query_status *packet; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 09599efa41fc..f304d1f8df5f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm, u32 *ctl_stack_used_size, u32 *save_area_used_size); -int amdkfd_fence_wait_timeout(unsigned int *fence_addr, - unsigned int fence_value, +int amdkfd_fence_wait_timeout(uint64_t *fence_addr, + uint64_t fence_value, unsigned int timeout_ms); /* Packet Manager */ @@ -1040,7 +1040,7 @@ struct packet_manager_funcs { uint32_t filter_param, bool reset, unsigned int sdma_engine); int (*query_status)(struct packet_manager *pm, uint32_t *buffer, - uint64_t fence_address, uint32_t fence_value); + uint64_t fence_address, uint64_t fence_value); int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); /* Packet sizes */ @@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm, struct scheduling_resources *res); int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, - uint32_t fence_value); + uint64_t fence_value); int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, enum kfd_unmap_queues_filter mode, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 573cf17262da..d699a5cf6c11 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4072,13 +4072,6 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane, return true; /* - * The arbitrary tiling support for multiplane formats has not been hooked - * up. - */ - if (info->num_planes > 1) - return false; - - /* * For D swizzle the canonical modifier depends on the bpp, so check * it here. */ @@ -4096,6 +4089,10 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane, /* Per radeonsi comments 16/64 bpp are more complicated. */ if (info->cpp[0] != 4) return false; + /* We support multi-planar formats, but not when combined with + * additional DCC metadata planes. */ + if (info->num_planes > 1) + return false; } return true; @@ -4296,7 +4293,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev, AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | @@ -4308,7 +4305,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev, AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); add_modifier(mods, size, capacity, AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index fa013496e26b..2f9bfaeaba8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc) } else { AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); - AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d); - + AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a); } //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h index 705fbfc37502..8a32772d4e91 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h @@ -134,6 +134,7 @@ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\ diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index a2681fe875ed..c0565a932a12 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -587,6 +587,48 @@ static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) tmp, MC_CG_ARB_FREQ_F0); } +static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint16_t pcie_gen = 0; + + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 && + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4) + pcie_gen = 3; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 && + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3) + pcie_gen = 2; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 && + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2) + pcie_gen = 1; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 && + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1) + pcie_gen = 0; + + return pcie_gen; +} + +static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint16_t pcie_width = 0; + + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + pcie_width = 16; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) + pcie_width = 12; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) + pcie_width = 8; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) + pcie_width = 4; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) + pcie_width = 2; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) + pcie_width = 1; + + return pcie_width; +} + static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -683,6 +725,11 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) PP_Min_PCIEGen), get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + + if (data->pcie_dpm_key_disabled) + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, + data->dpm_table.pcie_speed_table.count, + smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr)); } return 0; } @@ -1177,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (hwmgr->chip_id == CHIP_POLARIS10) || (hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12) || - (hwmgr->chip_id == CHIP_TONGA)) + (hwmgr->chip_id == CHIP_TONGA) || + (hwmgr->chip_id == CHIP_TOPAZ)) PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); @@ -1248,6 +1296,13 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) NULL)), "Failed to enable pcie DPM during DPM Start Function!", return -EINVAL); + } else { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_PCIeDPM_Disable, + NULL)), + "Failed to disble pcie DPM during DPM Start Function!", + return -EINVAL); } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -3276,7 +3331,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) && !hwmgr->display_config->multi_monitor_in_sync) || - smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time); + (hwmgr->display_config->num_display && + smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time)); disable_mclk_switching = disable_mclk_switching_for_frame_lock || disable_mclk_switching_for_display; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 22b636e2b89b..599ec9726601 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -54,6 +54,9 @@ #include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_sh_mask.h" +#define smnPCIE_LC_SPEED_CNTL 0x11140290 +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 + #define HBM_MEMORY_CHANNEL_WIDTH 128 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; @@ -443,8 +446,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (PP_CAP(PHM_PlatformCaps_VCEDPM)) data->smu_features[GNLD_DPM_VCE].supported = true; - if (!data->registry_data.pcie_dpm_key_disabled) - data->smu_features[GNLD_DPM_LINK].supported = true; + data->smu_features[GNLD_DPM_LINK].supported = true; if (!data->registry_data.dcefclk_dpm_key_disabled) data->smu_features[GNLD_DPM_DCEFCLK].supported = true; @@ -1544,6 +1546,13 @@ static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr) pp_table->PcieLaneCount[i] = pcie_width; } + if (data->registry_data.pcie_dpm_key_disabled) { + for (i = 0; i < NUM_LINK_LEVELS; i++) { + pp_table->PcieGenSpeed[i] = pcie_gen; + pp_table->PcieLaneCount[i] = pcie_width; + } + } + return 0; } @@ -2966,6 +2975,14 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) } } + if (data->registry_data.pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, + false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap), + "Attempt to Disable Link DPM feature Failed!", return -EINVAL); + data->smu_features[GNLD_DPM_LINK].enabled = false; + data->smu_features[GNLD_DPM_LINK].supported = false; + } + return 0; } @@ -4584,6 +4601,24 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return 0; } +static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; +} + +static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) + >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; +} + static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf) { @@ -4592,8 +4627,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table); struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table); - struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; + uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; + PPTable_t *pptable = &(data->smc_state_table.pp_table); int i, now, size = 0, count = 0; @@ -4650,15 +4686,31 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, "*" : ""); break; case PP_PCIE: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now); - - for (i = 0; i < pcie_table->count; i++) - size += sprintf(buf + size, "%d: %s %s\n", i, - (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" : - (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" : - (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "", - (i == now) ? "*" : ""); + current_gen_speed = + vega10_get_current_pcie_link_speed_level(hwmgr); + current_lane_width = + vega10_get_current_pcie_link_width_level(hwmgr); + for (i = 0; i < NUM_LINK_LEVELS; i++) { + gen_speed = pptable->PcieGenSpeed[i]; + lane_width = pptable->PcieLaneCount[i]; + + size += sprintf(buf + size, "%d: %s %s %s\n", i, + (gen_speed == 0) ? "2.5GT/s," : + (gen_speed == 1) ? "5.0GT/s," : + (gen_speed == 2) ? "8.0GT/s," : + (gen_speed == 3) ? "16.0GT/s," : "", + (lane_width == 1) ? "x1" : + (lane_width == 2) ? "x2" : + (lane_width == 3) ? "x4" : + (lane_width == 4) ? "x8" : + (lane_width == 5) ? "x12" : + (lane_width == 6) ? "x16" : "", + (current_gen_speed == gen_speed) && + (current_lane_width == lane_width) ? + "*" : ""); + } break; + case OD_SCLK: if (hwmgr->od_enabled) { size = sprintf(buf, "%s:\n", "OD_SCLK"); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 43e01d880f7c..4f6da11e8f10 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -133,6 +133,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.auto_wattman_debug = 0; data->registry_data.auto_wattman_sample_period = 100; data->registry_data.auto_wattman_threshold = 50; + data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); } static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr) @@ -539,6 +540,29 @@ static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr) pp_table->PcieLaneCount[i] = pcie_width_arg; } + /* override to the highest if it's disabled from ppfeaturmask */ + if (data->registry_data.pcie_dpm_key_disabled) { + for (i = 0; i < NUM_LINK_LEVELS; i++) { + smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width; + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, + NULL); + PP_ASSERT_WITH_CODE(!ret, + "[OverridePcieParameters] Attempt to override pcie params failed!", + return ret); + + pp_table->PcieGenSpeed[i] = pcie_gen; + pp_table->PcieLaneCount[i] = pcie_width; + } + ret = vega12_enable_smc_features(hwmgr, + false, + data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to Disable DPM LINK Failed!", + return ret); + data->smu_features[GNLD_DPM_LINK].enabled = false; + data->smu_features[GNLD_DPM_LINK].supported = false; + } return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index f19964c69a00..b6ee3a285c9d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -171,6 +171,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.gfxoff_controlled_by_driver = 1; data->gfxoff_allowed = false; data->counter_gfxoff = 0; + data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); } static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) @@ -884,6 +885,30 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) pp_table->PcieLaneCount[i] = pcie_width_arg; } + /* override to the highest if it's disabled from ppfeaturmask */ + if (data->registry_data.pcie_dpm_key_disabled) { + for (i = 0; i < NUM_LINK_LEVELS; i++) { + smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width; + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, + NULL); + PP_ASSERT_WITH_CODE(!ret, + "[OverridePcieParameters] Attempt to override pcie params failed!", + return ret); + + pp_table->PcieGenSpeed[i] = pcie_gen; + pp_table->PcieLaneCount[i] = pcie_width; + } + ret = vega20_enable_smc_features(hwmgr, + false, + data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to Disable DPM LINK Failed!", + return ret); + data->smu_features[GNLD_DPM_LINK].enabled = false; + data->smu_features[GNLD_DPM_LINK].supported = false; + } + return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index d143ef1b460b..cd905e41080e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1294,7 +1294,7 @@ static int smu_disable_dpms(struct smu_context *smu) bool use_baco = !smu->is_apu && ((amdgpu_in_reset(adev) && (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || - ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); + ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); /* * For custom pptable uploading, skip the DPM features @@ -1431,7 +1431,8 @@ static int smu_suspend(void *handle) smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); - if (smu->is_apu) + /* skip CGPG when in S0ix */ + if (smu->is_apu && !adev->in_s0ix) smu_set_gfx_cgpg(&adev->smu, false); return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 7ddbaecb11c2..101eaa20db9b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) static bool vangogh_is_dpm_running(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; uint32_t feature_mask[2]; uint64_t feature_enabled; + /* we need to re-init after suspend so return false */ + if (adev->in_suspend) + return false; + ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); if (ret) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 6d38c5c17f23..db69f19ab5bc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -689,7 +689,8 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) struct page **pages = pvec + pinned; ret = pin_user_pages_fast(ptr, num_pages, - !userptr->ro ? FOLL_WRITE : 0, pages); + FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM, + pages); if (ret < 0) { unpin_user_pages(pvec, pinned); kvfree(pvec); diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 1f79bc2a881e..1510e4e2973c 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -13,7 +13,6 @@ #include <linux/irq.h> #include <linux/mfd/syscon.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c index e21fb14d5e07..833d0c1be4f1 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.c +++ b/drivers/gpu/drm/i915/display/intel_acpi.c @@ -84,13 +84,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle) return; } + if (!pkg->package.count) { + DRM_DEBUG_DRIVER("no connection in _DSM\n"); + return; + } + connector_count = &pkg->package.elements[0]; DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", (unsigned long long)connector_count->integer.value); for (i = 1; i < pkg->package.count; i++) { union acpi_object *obj = &pkg->package.elements[i]; - union acpi_object *connector_id = &obj->package.elements[0]; - union acpi_object *info = &obj->package.elements[1]; + union acpi_object *connector_id; + union acpi_object *info; + + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) { + DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i); + continue; + } + + connector_id = &obj->package.elements[0]; + info = &obj->package.elements[1]; + if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) { + DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i); + continue; + } + DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", (unsigned long long)connector_id->integer.value); DRM_DEBUG_DRIVER(" port id: %s\n", diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 4683f98f7e54..c3f2962aa1eb 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -317,12 +317,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc) return 0; - new_crtc_state->enabled_planes |= BIT(plane->id); - ret = plane->check_plane(new_crtc_state, new_plane_state); if (ret) return ret; + if (fb) + new_crtc_state->enabled_planes |= BIT(plane->id); + /* FIXME pre-g4x don't work like this */ if (new_plane_state->uapi.visible) new_crtc_state->active_planes |= BIT(plane->id); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 8c12d5375607..775d89b6c3fc 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3619,9 +3619,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) { int ret; - intel_dp_lttpr_init(intel_dp); - - if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) + if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) return false; /* diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index eaebf123310a..10fe17b7280d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -133,6 +133,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, else precharge = 5; + /* Max timeout value on G4x-BDW: 1.6ms */ if (IS_BROADWELL(dev_priv)) timeout = DP_AUX_CH_CTL_TIME_OUT_600us; else @@ -159,6 +160,12 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, enum phy phy = intel_port_to_phy(i915, dig_port->base.port); u32 ret; + /* + * Max timeout values: + * SKL-GLK: 1.6ms + * CNL: 3.2ms + * ICL+: 4ms + */ ret = DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_INTERRUPT | diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 651884390137..4f8337c7fd2e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -646,7 +646,6 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) break; case INTEL_BACKLIGHT_DISPLAY_DDI: try_intel_interface = true; - try_vesa_interface = true; break; default: return -ENODEV; diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 892d7db7d94f..2ed309534e97 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -34,6 +34,11 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE]) link_status[3], link_status[4], link_status[5]); } +static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) +{ + memset(&intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); +} + static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) { intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT - @@ -81,19 +86,36 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp) { - if (drm_dp_read_lttpr_common_caps(&intel_dp->aux, - intel_dp->lttpr_common_caps) < 0) { - memset(intel_dp->lttpr_common_caps, 0, - sizeof(intel_dp->lttpr_common_caps)); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + if (intel_dp_is_edp(intel_dp)) return false; - } + + /* + * Detecting LTTPRs must be avoided on platforms with an AUX timeout + * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). + */ + if (INTEL_GEN(i915) < 10) + return false; + + if (drm_dp_read_lttpr_common_caps(&intel_dp->aux, + intel_dp->lttpr_common_caps) < 0) + goto reset_caps; drm_dbg_kms(&dp_to_i915(intel_dp)->drm, "LTTPR common capabilities: %*ph\n", (int)sizeof(intel_dp->lttpr_common_caps), intel_dp->lttpr_common_caps); + /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */ + if (intel_dp->lttpr_common_caps[0] < 0x14) + goto reset_caps; + return true; + +reset_caps: + intel_dp_reset_lttpr_common_caps(intel_dp); + return false; } static bool @@ -106,33 +128,49 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) } /** - * intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode + * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode * @intel_dp: Intel DP struct * - * Read the LTTPR common capabilities, switch to non-transparent link training - * mode if any is detected and read the PHY capabilities for all detected - * LTTPRs. In case of an LTTPR detection error or if the number of + * Read the LTTPR common and DPRX capabilities and switch to non-transparent + * link training mode if any is detected and read the PHY capabilities for all + * detected LTTPRs. In case of an LTTPR detection error or if the number of * LTTPRs is more than is supported (8), fall back to the no-LTTPR, * transparent mode link training mode. * * Returns: - * >0 if LTTPRs were detected and the non-transparent LT mode was set + * >0 if LTTPRs were detected and the non-transparent LT mode was set. The + * DPRX capabilities are read out. * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a - * detection failure and the transparent LT mode was set + * detection failure and the transparent LT mode was set. The DPRX + * capabilities are read out. + * <0 Reading out the DPRX capabilities failed. */ -int intel_dp_lttpr_init(struct intel_dp *intel_dp) +int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) { int lttpr_count; bool ret; int i; - if (intel_dp_is_edp(intel_dp)) - return 0; - ret = intel_dp_read_lttpr_common_caps(intel_dp); + + /* The DPTX shall read the DPRX caps after LTTPR detection. */ + if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) { + intel_dp_reset_lttpr_common_caps(intel_dp); + return -EIO; + } + if (!ret) return 0; + /* + * The 0xF0000-0xF02FF range is only valid if the DPCD revision is + * at least 1.4. + */ + if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) { + intel_dp_reset_lttpr_common_caps(intel_dp); + return 0; + } + lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); /* * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are @@ -172,7 +210,7 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp) return lttpr_count; } -EXPORT_SYMBOL(intel_dp_lttpr_init); +EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps); static u8 dp_voltage_max(u8 preemph) { @@ -807,7 +845,11 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp, * TODO: Reiniting LTTPRs here won't be needed once proper connector * HW state readout is added. */ - int lttpr_count = intel_dp_lttpr_init(intel_dp); + int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); + + if (lttpr_count < 0) + /* Still continue with enabling the port and link training. */ + lttpr_count = 0; if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count)) intel_dp_schedule_fallback_link_training(intel_dp, crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 6a1f76bd8c75..9cb7c28027f0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -11,7 +11,7 @@ struct intel_crtc_state; struct intel_dp; -int intel_dp_lttpr_init(struct intel_dp *intel_dp); +int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp); void intel_dp_get_adjust_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index f58cc5700784..a86c57d117f2 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -1014,20 +1014,14 @@ static i915_reg_t dss_ctl1_reg(const struct intel_crtc_state *crtc_state) { enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - if (crtc_state->cpu_transcoder == TRANSCODER_EDP) - return DSS_CTL1; - - return ICL_PIPE_DSS_CTL1(pipe); + return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL1(pipe) : DSS_CTL1; } static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state) { enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - if (crtc_state->cpu_transcoder == TRANSCODER_EDP) - return DSS_CTL2; - - return ICL_PIPE_DSS_CTL2(pipe); + return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2; } void intel_dsc_enable(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index f94025ec603a..a9a8ba1d3aba 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -992,14 +992,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, * FIXME As we do with eDP, just make a note of the time here * and perform the wait before the next panel power on. */ - intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay); + msleep(intel_dsi->panel_pwr_cycle_delay); } static void intel_dsi_shutdown(struct intel_encoder *encoder) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay); + msleep(intel_dsi->panel_pwr_cycle_delay); } static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index a357bb431815..67de2b189598 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -316,7 +316,18 @@ void i915_vma_revoke_fence(struct i915_vma *vma) WRITE_ONCE(fence->vma, NULL); vma->fence = NULL; - with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref) + /* + * Skip the write to HW if and only if the device is currently + * suspended. + * + * If the driver does not currently hold a wakeref (if_in_use == 0), + * the device may currently be runtime suspended, or it may be woken + * up before the suspend takes place. If the device is not suspended + * (powered down) and we skip clearing the fence register, the HW is + * left in an undefined state where we may end up with multiple + * registers overlapping. + */ + with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref) fence_write(fence); } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index fef1e857cefc..01c1d1b36acd 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -916,19 +916,26 @@ static int cmd_reg_handler(struct parser_exec_state *s, if (!strncmp(cmd, "srm", 3) || !strncmp(cmd, "lrm", 3)) { - if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) && - offset != 0x21f0) { + if (offset == i915_mmio_reg_offset(GEN8_L3SQCREG4) || + offset == 0x21f0 || + (IS_BROADWELL(gvt->gt->i915) && + offset == i915_mmio_reg_offset(INSTPM))) + return 0; + else { gvt_vgpu_err("%s access to register (%x)\n", cmd, offset); return -EPERM; - } else - return 0; + } } if (!strncmp(cmd, "lrr-src", 7) || !strncmp(cmd, "lrr-dst", 7)) { - gvt_vgpu_err("not allowed cmd %s\n", cmd); - return -EPERM; + if (IS_BROADWELL(gvt->gt->i915) && offset == 0x215c) + return 0; + else { + gvt_vgpu_err("not allowed cmd %s reg (%x)\n", cmd, offset); + return -EPERM; + } } if (!strncmp(cmd, "pipe_ctrl", 9)) { diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index e622aee6e4be..440c35f1abc9 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -105,7 +105,7 @@ static inline bool tasklet_is_locked(const struct tasklet_struct *t) static inline void __tasklet_disable_sync_once(struct tasklet_struct *t) { if (!atomic_fetch_inc(&t->count)) - tasklet_unlock_wait(t); + tasklet_unlock_spin_wait(t); } static inline bool __tasklet_is_enabled(const struct tasklet_struct *t) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 97b57acc02e2..4b4d8d034782 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5471,12 +5471,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; int ret; - memset(wm, 0, sizeof(*wm)); - /* Watermarks calculated in master */ if (plane_state->planar_slave) return 0; + memset(wm, 0, sizeof(*wm)); + if (plane_state->planar_linked_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; enum plane_id y_plane_id = plane_state->planar_linked_plane->id; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 153ca9e65382..8b725efb2254 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -412,12 +412,20 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm) } /** - * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use + * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active * @rpm: the intel_runtime_pm structure + * @ignore_usecount: get a ref even if dev->power.usage_count is 0 * * This function grabs a device-level runtime pm reference if the device is - * already in use and ensures that it is powered up. It is illegal to try - * and access the HW should intel_runtime_pm_get_if_in_use() report failure. + * already active and ensures that it is powered up. It is illegal to try + * and access the HW should intel_runtime_pm_get_if_active() report failure. + * + * If @ignore_usecount=true, a reference will be acquired even if there is no + * user requiring the device to be powered up (dev->power.usage_count == 0). + * If the function returns false in this case then it's guaranteed that the + * device's runtime suspend hook has been called already or that it will be + * called (and hence it's also guaranteed that the device's runtime resume + * hook will be called eventually). * * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. @@ -425,7 +433,8 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm) * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates * as True if the wakeref was acquired, or False otherwise. */ -intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) +static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm, + bool ignore_usecount) { if (IS_ENABLED(CONFIG_PM)) { /* @@ -434,7 +443,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) * function, since the power state is undefined. This applies * atm to the late/early system suspend/resume handlers. */ - if (pm_runtime_get_if_in_use(rpm->kdev) <= 0) + if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0) return 0; } @@ -443,6 +452,16 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) return track_intel_runtime_pm_wakeref(rpm); } +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) +{ + return __intel_runtime_pm_get_if_active(rpm, false); +} + +intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm) +{ + return __intel_runtime_pm_get_if_active(rpm, true); +} + /** * intel_runtime_pm_get_noresume - grab a runtime pm reference * @rpm: the intel_runtime_pm structure diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index ae64ff14c642..1e4ddd11c12b 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -177,6 +177,7 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); +intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); @@ -188,6 +189,10 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \ intel_runtime_pm_put((rpm), (wf)), (wf) = 0) +#define with_intel_runtime_pm_if_active(rpm, wf) \ + for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \ + intel_runtime_pm_put((rpm), (wf)), (wf) = 0) + void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref); diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index d1a9841adeed..e6a88c8cbd69 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev) ret = drmm_mode_config_init(drm); if (ret) - return ret; + goto err_kms; ret = drm_vblank_init(drm, MAX_CRTC); if (ret) diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index dbfe39e2f7f6..ffdc492c5bc5 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); + if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { + dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); + return; + } + drm_panel_prepare(imx_ldb_ch->panel); if (dual) { @@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder, int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); u32 bus_format = imx_ldb_ch->bus_format; + if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { + dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); + return; + } + if (mode->clock > 170000) { dev_warn(ldb->dev, "%s: mode exceeds 170 MHz pixel clock\n", __func__); @@ -583,7 +593,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) struct imx_ldb_channel *channel = &imx_ldb->channel[i]; if (!channel->ldb) - break; + continue; ret = imx_ldb_register(drm, channel); if (ret) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 7e553d3efeb2..ce13d49e615b 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1386,8 +1386,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) { - *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO, - REG_A5XX_RBBM_PERFCTR_CP_0_HI); + *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO, + REG_A5XX_RBBM_ALWAYSON_COUNTER_HI); return 0; } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 5ccc9da455a1..c35b06b46fcc 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -304,7 +304,7 @@ int a5xx_power_init(struct msm_gpu *gpu) /* Set up the limits management */ if (adreno_is_a530(adreno_gpu)) a530_lm_setup(gpu); - else + else if (adreno_is_a540(adreno_gpu)) a540_lm_setup(gpu); /* Set up SP/TP power collpase */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 71c917f909af..91cf46f84025 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -339,7 +339,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) else bit = a6xx_gmu_oob_bits[state].ack_new; - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit); + gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit); } /* Enable CPU control of SPTP power power collapse */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index ba8e9d3cf0fe..d553f62f4eeb 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -522,28 +522,73 @@ static int a6xx_cp_init(struct msm_gpu *gpu) return a6xx_idle(gpu, ring) ? 0 : -EINVAL; } -static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, +/* + * Check that the microcode version is new enough to include several key + * security fixes. Return true if the ucode is safe. + */ +static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, struct drm_gem_object *obj) { + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; u32 *buf = msm_gem_get_vaddr(obj); + bool ret = false; if (IS_ERR(buf)) - return; + return false; /* - * If the lowest nibble is 0xa that is an indication that this microcode - * has been patched. The actual version is in dword [3] but we only care - * about the patchlevel which is the lowest nibble of dword [3] - * - * Otherwise check that the firmware is greater than or equal to 1.90 - * which was the first version that had this fix built in + * Targets up to a640 (a618, a630 and a640) need to check for a + * microcode version that is patched to support the whereami opcode or + * one that is new enough to include it by default. */ - if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) - a6xx_gpu->has_whereami = true; - else if ((buf[0] & 0xfff) > 0x190) - a6xx_gpu->has_whereami = true; + if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) || + adreno_is_a640(adreno_gpu)) { + /* + * If the lowest nibble is 0xa that is an indication that this + * microcode has been patched. The actual version is in dword + * [3] but we only care about the patchlevel which is the lowest + * nibble of dword [3] + * + * Otherwise check that the firmware is greater than or equal + * to 1.90 which was the first version that had this fix built + * in + */ + if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) || + (buf[0] & 0xfff) >= 0x190) { + a6xx_gpu->has_whereami = true; + ret = true; + goto out; + } + DRM_DEV_ERROR(&gpu->pdev->dev, + "a630 SQE ucode is too old. Have version %x need at least %x\n", + buf[0] & 0xfff, 0x190); + } else { + /* + * a650 tier targets don't need whereami but still need to be + * equal to or newer than 0.95 for other security fixes + */ + if (adreno_is_a650(adreno_gpu)) { + if ((buf[0] & 0xfff) >= 0x095) { + ret = true; + goto out; + } + + DRM_DEV_ERROR(&gpu->pdev->dev, + "a650 SQE ucode is too old. Have version %x need at least %x\n", + buf[0] & 0xfff, 0x095); + } + + /* + * When a660 is added those targets should return true here + * since those have all the critical security fixes built in + * from the start + */ + } +out: msm_gem_put_vaddr(obj); + return ret; } static int a6xx_ucode_init(struct msm_gpu *gpu) @@ -566,7 +611,13 @@ static int a6xx_ucode_init(struct msm_gpu *gpu) } msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); - a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo); + if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) { + msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); + drm_gem_object_put(a6xx_gpu->sqe_bo); + + a6xx_gpu->sqe_bo = NULL; + return -EPERM; + } } gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO, @@ -1177,8 +1228,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) /* Force the GPU power on so we can read this register */ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); - *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO, - REG_A6XX_RBBM_PERFCTR_CP_0_HI); + *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO, + REG_A6XX_CP_ALWAYS_ON_COUNTER_HI); a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); mutex_unlock(&perfcounter_oob); @@ -1350,35 +1401,26 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu, u32 revn) { struct opp_table *opp_table; - struct nvmem_cell *cell; u32 supp_hw = UINT_MAX; - void *buf; + u16 speedbin; + int ret; - cell = nvmem_cell_get(dev, "speed_bin"); + ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin); /* * -ENOENT means that the platform doesn't support speedbin which is * fine */ - if (PTR_ERR(cell) == -ENOENT) + if (ret == -ENOENT) { return 0; - else if (IS_ERR(cell)) { - DRM_DEV_ERROR(dev, - "failed to read speed-bin. Some OPPs may not be supported by hardware"); - goto done; - } - - buf = nvmem_cell_read(cell, NULL); - if (IS_ERR(buf)) { - nvmem_cell_put(cell); + } else if (ret) { DRM_DEV_ERROR(dev, - "failed to read speed-bin. Some OPPs may not be supported by hardware"); + "failed to read speed-bin (%d). Some OPPs may not be supported by hardware", + ret); goto done; } + speedbin = le16_to_cpu(speedbin); - supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf)); - - kfree(buf); - nvmem_cell_put(cell); + supp_hw = fuse_to_supp_hw(dev, revn, speedbin); done: opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 8981cfa9dbc3..92e6f1b94738 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -496,7 +496,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, DPU_REG_WRITE(c, CTL_TOP, mode_sel); DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); - DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0)); + if (cfg->merge_3d) + DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, + BIT(cfg->merge_3d - MERGE_3D_0)); } static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 5a8e3e1fc48c..85f2c3564c96 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -43,6 +43,8 @@ #define DPU_DEBUGFS_DIR "msm_dpu" #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask" +#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */ + static int dpu_kms_hw_init(struct msm_kms *kms); static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); @@ -931,6 +933,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms) DPU_DEBUG("REG_DMA is not defined"); } + if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) + dpu_kms_parse_data_bus_icc_path(dpu_kms); + pm_runtime_get_sync(&dpu_kms->pdev->dev); dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0); @@ -1032,9 +1037,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dpu_vbif_init_memtypes(dpu_kms); - if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) - dpu_kms_parse_data_bus_icc_path(dpu_kms); - pm_runtime_put_sync(&dpu_kms->pdev->dev); return 0; @@ -1191,10 +1193,10 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev) ddev = dpu_kms->dev; + WARN_ON(!(dpu_kms->num_paths)); /* Min vote of BW is required before turning on AXI clk */ for (i = 0; i < dpu_kms->num_paths; i++) - icc_set_bw(dpu_kms->path[i], 0, - dpu_kms->catalog->perf.min_dram_ib); + icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW)); rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); if (rc) { diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 1c6e1d2b947c..7c22bfe0fc7d 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -32,6 +32,8 @@ struct dp_aux_private { struct drm_dp_aux dp_aux; }; +#define MAX_AUX_RETRIES 5 + static const char *dp_aux_get_error(u32 aux_error) { switch (aux_error) { @@ -377,6 +379,11 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, ret = dp_aux_cmd_fifo_tx(aux, msg); if (ret < 0) { + if (aux->native) { + aux->retry_cnt++; + if (!(aux->retry_cnt % MAX_AUX_RETRIES)) + dp_catalog_aux_update_cfg(aux->catalog); + } usleep_range(400, 500); /* at least 400us to next try */ goto unlock_exit; } diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index a45fe95aff49..3dc65877fa10 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -163,7 +163,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, break; case MSM_DSI_PHY_7NM: case MSM_DSI_PHY_7NM_V4_1: - pll = msm_dsi_pll_7nm_init(pdev, id); + pll = msm_dsi_pll_7nm_init(pdev, type, id); break; default: pll = ERR_PTR(-ENXIO); diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index 3405982a092c..bbecb1de5678 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -117,10 +117,12 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id) } #endif #ifdef CONFIG_DRM_MSM_DSI_7NM_PHY -struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id); +struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id); #else static inline struct msm_dsi_pll * -msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) +msm_dsi_pll_7nm_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id) { return ERR_PTR(-ENODEV); } diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c index 93bf142e4a4e..e29b3bfd63d1 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c @@ -325,7 +325,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll) pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low); pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid); pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high); - pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate); pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */ pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters); @@ -509,6 +509,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw, { struct msm_dsi_pll *pll = hw_clk_to_pll(hw); struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + struct dsi_pll_config *config = &pll_7nm->pll_configuration; void __iomem *base = pll_7nm->mmio; u64 ref_clk = pll_7nm->vco_ref_clk_rate; u64 vco_rate = 0x0; @@ -529,9 +530,8 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw, /* * TODO: * 1. Assumes prescaler is disabled - * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits) */ - multiplier = 1 << 18; + multiplier = 1 << config->frac_bits; pll_freq = dec * (ref_clk * 2); tmp64 = (ref_clk * 2 * frac); pll_freq += div_u64(tmp64, multiplier); @@ -852,7 +852,8 @@ err_base_clk_hw: return ret; } -struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) +struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id) { struct dsi_pll_7nm *pll_7nm; struct msm_dsi_pll *pll; @@ -885,7 +886,7 @@ struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) pll = &pll_7nm->base; pll->min_rate = 1000000000UL; pll->max_rate = 3500000000UL; - if (pll->type == MSM_DSI_PHY_7NM_V4_1) { + if (type == MSM_DSI_PHY_7NM_V4_1) { pll->min_rate = 600000000UL; pll->max_rate = (unsigned long)5000000000ULL; /* workaround for max rate overflowing on 32-bit builds: */ diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 6a326761dc4a..edcaccaa27e6 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -57,10 +57,13 @@ static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) { + int crtc_index; struct drm_crtc *crtc; - for_each_crtc_mask(kms->dev, crtc, crtc_mask) - mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]); + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { + crtc_index = drm_crtc_index(crtc); + mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index); + } } static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 94525ac76d4e..196907689c82 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -570,6 +570,7 @@ err_free_priv: kfree(priv); err_put_drm_dev: drm_dev_put(ddev); + platform_set_drvdata(pdev, NULL); return ret; } @@ -1072,6 +1073,10 @@ static int __maybe_unused msm_pm_resume(struct device *dev) static int __maybe_unused msm_pm_prepare(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); + struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL; + + if (!priv || !priv->kms) + return 0; return drm_mode_config_helper_suspend(ddev); } @@ -1079,6 +1084,10 @@ static int __maybe_unused msm_pm_prepare(struct device *dev) static void __maybe_unused msm_pm_complete(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); + struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL; + + if (!priv || !priv->kms) + return; drm_mode_config_helper_resume(ddev); } @@ -1311,6 +1320,10 @@ static int msm_pdev_remove(struct platform_device *pdev) static void msm_pdev_shutdown(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); + struct msm_drm_private *priv = drm ? drm->dev_private : NULL; + + if (!priv || !priv->kms) + return; drm_atomic_helper_shutdown(drm); } diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index ad2703698b05..cd59a5918038 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, int ret; if (fence > fctx->last_fence) { - DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n", + DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n", fctx->name, fence, fctx->last_fence); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 4735251a394d..d8151a89e163 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -157,7 +157,6 @@ struct msm_kms { * from the crtc's pending_timer close to end of the frame: */ struct mutex commit_lock[MAX_CRTCS]; - struct lock_class_key commit_lock_keys[MAX_CRTCS]; unsigned pending_crtc_mask; struct msm_pending_timer pending_timers[MAX_CRTCS]; }; @@ -167,11 +166,8 @@ static inline int msm_kms_init(struct msm_kms *kms, { unsigned i, ret; - for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) { - lockdep_register_key(&kms->commit_lock_keys[i]); - __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]", - &kms->commit_lock_keys[i]); - } + for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) + mutex_init(&kms->commit_lock[i]); kms->funcs = funcs; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 196612addfd6..1c9c0cdf85db 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2693,9 +2693,20 @@ nv50_display_create(struct drm_device *dev) else nouveau_display(dev)->format_modifiers = disp50xx_modifiers; - if (disp->disp->object.oclass >= GK104_DISP) { + /* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later + * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The + * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to + * small page allocations in prepare_fb(). When this is implemented, we should also force + * large pages (128K) for ovly fbs in order to fix Kepler ovlys. + * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using + * large pages. + */ + if (disp->disp->object.oclass >= GM107_DISP) { dev->mode_config.cursor_width = 256; dev->mode_config.cursor_height = 256; + } else if (disp->disp->object.oclass >= GK104_DISP) { + dev->mode_config.cursor_width = 128; + dev->mode_config.cursor_height = 128; } else { dev->mode_config.cursor_width = 64; dev->mode_config.cursor_height = 64; diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c index af381d756ac1..5fbfb71ca3d9 100644 --- a/drivers/gpu/drm/panel/panel-dsi-cm.c +++ b/drivers/gpu/drm/panel/panel-dsi-cm.c @@ -37,6 +37,7 @@ struct dsic_panel_data { u32 height_mm; u32 max_hs_rate; u32 max_lp_rate; + bool te_support; }; struct panel_drv_data { @@ -334,9 +335,11 @@ static int dsicm_power_on(struct panel_drv_data *ddata) if (r) goto err; - r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (r) - goto err; + if (ddata->panel_data->te_support) { + r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + if (r) + goto err; + } /* possible panel bug */ msleep(100); @@ -619,6 +622,7 @@ static const struct dsic_panel_data taal_data = { .height_mm = 0, .max_hs_rate = 300000000, .max_lp_rate = 10000000, + .te_support = true, }; static const struct dsic_panel_data himalaya_data = { @@ -629,6 +633,7 @@ static const struct dsic_panel_data himalaya_data = { .height_mm = 88, .max_hs_rate = 300000000, .max_lp_rate = 10000000, + .te_support = false, }; static const struct dsic_panel_data droid4_data = { @@ -639,6 +644,7 @@ static const struct dsic_panel_data droid4_data = { .height_mm = 89, .max_hs_rate = 300000000, .max_lp_rate = 10000000, + .te_support = false, }; static const struct of_device_id dsicm_of_match[] = { diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index e8c66d10478f..78893bea85ae 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -364,7 +364,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt * if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) { /* check that we only pin down anonymous memory to prevent problems with writeback */ - unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; + unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE; struct vm_area_struct *vma; vma = find_vma(gtt->usermm, gtt->userptr); if (!vma || vma->vm_file || vma->vm_end < end) @@ -386,7 +386,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt * } while (pinned < ttm->num_pages); r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, - ttm->num_pages << PAGE_SHIFT, + (u64)ttm->num_pages << PAGE_SHIFT, GFP_KERNEL); if (r) goto release_sg; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index ba8c6038cd63..ca3761772211 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -48,21 +48,12 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node) static const struct drm_encoder_funcs rcar_du_encoder_funcs = { }; -static void rcar_du_encoder_release(struct drm_device *dev, void *res) -{ - struct rcar_du_encoder *renc = res; - - drm_encoder_cleanup(&renc->base); - kfree(renc); -} - int rcar_du_encoder_init(struct rcar_du_device *rcdu, enum rcar_du_output output, struct device_node *enc_node) { struct rcar_du_encoder *renc; struct drm_bridge *bridge; - int ret; /* * Locate the DRM bridge from the DT node. For the DPAD outputs, if the @@ -101,26 +92,16 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, return -ENOLINK; } - renc = kzalloc(sizeof(*renc), GFP_KERNEL); - if (renc == NULL) - return -ENOMEM; - - renc->output = output; - dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n", enc_node, output); - ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs, - DRM_MODE_ENCODER_NONE, NULL); - if (ret < 0) { - kfree(renc); - return ret; - } + renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base, + &rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE, + NULL); + if (!renc) + return -ENOMEM; - ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release, - renc); - if (ret) - return ret; + renc->output = output; /* * Attach the bridge to the encoder. The bridge will create the diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 0ae3a025efe9..134986dc2783 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1688,6 +1688,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, dev_err(dc->dev, "failed to set clock rate to %lu Hz\n", state->pclk); + + err = clk_set_rate(dc->clk, state->pclk); + if (err < 0) + dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n", + dc->clk, state->pclk, err); } DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), @@ -1698,11 +1703,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); } - - err = clk_set_rate(dc->clk, state->pclk); - if (err < 0) - dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n", - dc->clk, state->pclk, err); } static void tegra_dc_stop(struct tegra_dc *dc) @@ -2501,22 +2501,18 @@ static int tegra_dc_couple(struct tegra_dc *dc) * POWER_CONTROL registers during CRTC enabling. */ if (dc->soc->coupled_pm && dc->pipe == 1) { - u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER; - struct device_link *link; - struct device *partner; + struct device *companion; + struct tegra_dc *parent; - partner = driver_find_device(dc->dev->driver, NULL, NULL, - tegra_dc_match_by_pipe); - if (!partner) + companion = driver_find_device(dc->dev->driver, NULL, (const void *)0, + tegra_dc_match_by_pipe); + if (!companion) return -EPROBE_DEFER; - link = device_link_add(dc->dev, partner, flags); - if (!link) { - dev_err(dc->dev, "failed to link controllers\n"); - return -EINVAL; - } + parent = dev_get_drvdata(companion); + dc->client.parent = &parent->client; - dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner)); + dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion)); } return 0; diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index f02a035dda45..7b88261f57bb 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client) * kernel is possible. */ if (sor->rst) { + err = pm_runtime_resume_and_get(sor->dev); + if (err < 0) { + dev_err(sor->dev, "failed to get runtime PM: %d\n", err); + return err; + } + err = reset_control_acquire(sor->rst); if (err < 0) { dev_err(sor->dev, "failed to acquire SOR reset: %d\n", @@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client) } reset_control_release(sor->rst); + pm_runtime_put(sor->dev); } err = clk_prepare_enable(sor->clk_safe); diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 269390bc586e..76657dcdf9b0 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -210,6 +210,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) { const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc); const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc); + struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev); u32 fifo_len_bytes = pv_data->fifo_depth; /* @@ -238,6 +239,22 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) if (crtc_data->hvs_output == 5) return 32; + /* + * It looks like in some situations, we will overflow + * the PixelValve FIFO (with the bit 10 of PV stat being + * set) and stall the HVS / PV, eventually resulting in + * a page flip timeout. + * + * Displaying the video overlay during a playback with + * Kodi on an RPi3 seems to be a great solution with a + * failure rate around 50%. + * + * Removing 1 from the FIFO full level however + * seems to completely remove that issue. + */ + if (!vc4->hvs->hvs5) + return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1; + return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; } } diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 7322169c0682..1e9c84cf614a 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -1146,7 +1146,6 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, plane->state->src_y = state->src_y; plane->state->src_w = state->src_w; plane->state->src_h = state->src_h; - plane->state->src_h = state->src_h; plane->state->alpha = state->alpha; plane->state->pixel_blend_mode = state->pixel_blend_mode; plane->state->rotation = state->rotation; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index ba658fa9cf6c..183571c387b7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -481,11 +481,15 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) vmw_bo_unreference(&old_buf); res->id = vcotbl->type; + /* Release the pin acquired in vmw_bo_init */ + ttm_bo_unpin(bo); + return 0; out_map_new: ttm_bo_kunmap(&old_map); out_wait: + ttm_bo_unpin(bo); ttm_bo_unreserve(bo); vmw_bo_unreference(&buf); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index dd69b51c40e4..6fa24645fbbf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -712,17 +712,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->last_read_seqno = (uint32_t) -100; dev_priv->drm.dev_private = dev_priv; - ret = vmw_setup_pci_resources(dev_priv, pci_id); - if (ret) - return ret; - ret = vmw_detect_version(dev_priv); - if (ret) - goto out_no_pci_or_version; - mutex_init(&dev_priv->cmdbuf_mutex); - mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->binding_mutex); - mutex_init(&dev_priv->global_kms_state_mutex); ttm_lock_init(&dev_priv->reservation_sem); spin_lock_init(&dev_priv->resource_lock); spin_lock_init(&dev_priv->hw_lock); @@ -730,6 +721,14 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) spin_lock_init(&dev_priv->cap_lock); spin_lock_init(&dev_priv->cursor_lock); + ret = vmw_setup_pci_resources(dev_priv, pci_id); + if (ret) + return ret; + ret = vmw_detect_version(dev_priv); + if (ret) + goto out_no_pci_or_version; + + for (i = vmw_res_context; i < vmw_res_max; ++i) { idr_init(&dev_priv->res_idr[i]); INIT_LIST_HEAD(&dev_priv->res_lru[i]); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 5fa5bcd20cc5..eb76a6b9ebca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -529,7 +529,6 @@ struct vmw_private { struct vmw_overlay *overlay_priv; struct drm_property *hotplug_mode_update_property; struct drm_property *implicit_placement_property; - struct mutex global_kms_state_mutex; spinlock_t cursor_lock; struct drm_atomic_state *suspend_state; @@ -592,7 +591,6 @@ struct vmw_private { bool refuse_hibernation; bool suspend_locked; - struct mutex release_mutex; atomic_t num_fifo_resources; /* @@ -1524,9 +1522,8 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) struct vmw_buffer_object *tmp_buf = *buf; *buf = NULL; - if (tmp_buf != NULL) { + if (tmp_buf != NULL) ttm_bo_put(&tmp_buf->base); - } } static inline struct vmw_buffer_object * diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index a372980fe6a5..f2d625415458 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -94,6 +94,16 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, struct vmw_piter data_iter, unsigned long num_data_pages); + +static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo) +{ + int ret = ttm_bo_reserve(bo, false, true, NULL); + BUG_ON(ret != 0); + ttm_bo_unpin(bo); + ttm_bo_unreserve(bo); +} + + /* * vmw_setup_otable_base - Issue an object table base setup command to * the device @@ -277,6 +287,7 @@ out_no_setup: &batch->otables[i]); } + vmw_bo_unpin_unlocked(batch->otable_bo); ttm_bo_put(batch->otable_bo); batch->otable_bo = NULL; return ret; @@ -340,6 +351,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, BUG_ON(ret != 0); vmw_bo_fence_single(bo, NULL); + ttm_bo_unpin(bo); ttm_bo_unreserve(bo); ttm_bo_put(batch->otable_bo); @@ -528,6 +540,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, void vmw_mob_destroy(struct vmw_mob *mob) { if (mob->pt_bo) { + vmw_bo_unpin_unlocked(mob->pt_bo); ttm_bo_put(mob->pt_bo); mob->pt_bo = NULL; } @@ -643,6 +656,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv, out_no_cmd_space: vmw_fifo_resource_dec(dev_priv); if (pt_set_up) { + vmw_bo_unpin_unlocked(mob->pt_bo); ttm_bo_put(mob->pt_bo); mob->pt_bo = NULL; } diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 30d9adf31c84..9f14d99c763c 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -521,7 +521,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info) drm_dev = drm_dev_alloc(&xen_drm_driver, dev); if (IS_ERR(drm_dev)) { ret = PTR_ERR(drm_dev); - goto fail; + goto fail_dev; } drm_info->drm_dev = drm_dev; @@ -551,8 +551,10 @@ fail_modeset: drm_kms_helper_poll_fini(drm_dev); drm_mode_config_cleanup(drm_dev); drm_dev_put(drm_dev); -fail: +fail_dev: kfree(drm_info); + front_info->drm_info = NULL; +fail: return ret; } diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.h b/drivers/gpu/drm/xen/xen_drm_front_conn.h index 3adacba9a23b..e5f4314899ee 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_conn.h +++ b/drivers/gpu/drm/xen/xen_drm_front_conn.h @@ -16,7 +16,6 @@ struct drm_connector; struct xen_drm_front_drm_info; -struct xen_drm_front_drm_info; int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info, struct drm_connector *connector); diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 347fb962b6c9..68a766ff0e9d 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -705,8 +705,9 @@ void host1x_driver_unregister(struct host1x_driver *driver) EXPORT_SYMBOL(host1x_driver_unregister); /** - * host1x_client_register() - register a host1x client + * __host1x_client_register() - register a host1x client * @client: host1x client + * @key: lock class key for the client-specific mutex * * Registers a host1x client with each host1x controller instance. Note that * each client will only match their parent host1x controller and will only be @@ -715,13 +716,14 @@ EXPORT_SYMBOL(host1x_driver_unregister); * device and call host1x_device_init(), which will in turn call each client's * &host1x_client_ops.init implementation. */ -int host1x_client_register(struct host1x_client *client) +int __host1x_client_register(struct host1x_client *client, + struct lock_class_key *key) { struct host1x *host1x; int err; INIT_LIST_HEAD(&client->list); - mutex_init(&client->lock); + __mutex_init(&client->lock, "host1x client lock", key); client->usecount = 0; mutex_lock(&devices_lock); @@ -742,7 +744,7 @@ int host1x_client_register(struct host1x_client *client) return 0; } -EXPORT_SYMBOL(host1x_client_register); +EXPORT_SYMBOL(__host1x_client_register); /** * host1x_client_unregister() - unregister a host1x client diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c index dbac16641662..ddecc84fd6f0 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c @@ -10,6 +10,7 @@ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/dma-mapping.h> +#include <linux/dmi.h> #include <linux/interrupt.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/module.h> @@ -22,9 +23,13 @@ #define ACEL_EN BIT(0) #define GYRO_EN BIT(1) -#define MAGNO_EN BIT(2) +#define MAGNO_EN BIT(2) #define ALS_EN BIT(19) +static int sensor_mask_override = -1; +module_param_named(sensor_mask, sensor_mask_override, int, 0444); +MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask"); + void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info) { union sfh_cmd_param cmd_param; @@ -73,12 +78,41 @@ void amd_stop_all_sensors(struct amd_mp2_dev *privdata) writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0); } +static const struct dmi_system_id dmi_sensor_mask_overrides[] = { + { + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 13-ag0xxx"), + }, + .driver_data = (void *)(ACEL_EN | MAGNO_EN), + }, + { + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 15-cp0xxx"), + }, + .driver_data = (void *)(ACEL_EN | MAGNO_EN), + }, + { } +}; + int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id) { int activestatus, num_of_sensors = 0; + const struct dmi_system_id *dmi_id; + u32 activecontrolstatus; + + if (sensor_mask_override == -1) { + dmi_id = dmi_first_match(dmi_sensor_mask_overrides); + if (dmi_id) + sensor_mask_override = (long)dmi_id->driver_data; + } + + if (sensor_mask_override >= 0) { + activestatus = sensor_mask_override; + } else { + activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3); + activestatus = activecontrolstatus >> 4; + } - privdata->activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3); - activestatus = privdata->activecontrolstatus >> 4; if (ACEL_EN & activestatus) sensor_id[num_of_sensors++] = accel_idx; diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h index 8f8d19b2cfe5..489415f7c22c 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h @@ -61,7 +61,6 @@ struct amd_mp2_dev { struct pci_dev *pdev; struct amdtp_cl_data *cl_data; void __iomem *mmio; - u32 activecontrolstatus; }; struct amd_mp2_sensor_info { diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index 3feaece13ade..6b665931147d 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) if (input_register_device(data->input2)) { input_free_device(input2); + ret = -ENOENT; goto exit; } } diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 1dfe184ebf5a..2ab22b925941 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -1222,6 +1222,9 @@ static const struct hid_device_id asus_devices[] = { USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD), QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, + USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2), + QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD), QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 21e15627a461..477baa30889c 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -161,6 +161,7 @@ struct cp2112_device { atomic_t read_avail; atomic_t xfer_avail; struct gpio_chip gc; + struct irq_chip irq; u8 *in_out_buffer; struct mutex lock; @@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type) return 0; } -static struct irq_chip cp2112_gpio_irqchip = { - .name = "cp2112-gpio", - .irq_startup = cp2112_gpio_irq_startup, - .irq_shutdown = cp2112_gpio_irq_shutdown, - .irq_ack = cp2112_gpio_irq_ack, - .irq_mask = cp2112_gpio_irq_mask, - .irq_unmask = cp2112_gpio_irq_unmask, - .irq_set_type = cp2112_gpio_irq_type, -}; - static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev, int pin) { @@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) dev->gc.can_sleep = 1; dev->gc.parent = &hdev->dev; + dev->irq.name = "cp2112-gpio"; + dev->irq.irq_startup = cp2112_gpio_irq_startup; + dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown; + dev->irq.irq_ack = cp2112_gpio_irq_ack; + dev->irq.irq_mask = cp2112_gpio_irq_mask; + dev->irq.irq_unmask = cp2112_gpio_irq_unmask; + dev->irq.irq_set_type = cp2112_gpio_irq_type; + dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND; + girq = &dev->gc.irq; - girq->chip = &cp2112_gpio_irqchip; + girq->chip = &dev->irq; /* The event comes from the outside so no parent handler */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index d9319622da44..e60c31dd05ff 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -574,6 +574,8 @@ static void hammer_remove(struct hid_device *hdev) static const struct hid_device_id hammer_devices[] = { { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index e42aaae3138f..67fd8a2f5aba 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -194,6 +194,7 @@ #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822 #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866 +#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6 #define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869 #define USB_VENDOR_ID_ATEN 0x0557 @@ -493,6 +494,7 @@ #define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 +#define USB_DEVICE_ID_GOOGLE_DON 0x5050 #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 44d715c12f6a..2d70dc4bea65 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, !wacom_wac->shared->is_touch_on) { if (!wacom_wac->shared->touch_down) return; - prox = 0; + prox = false; } wacom_wac->hid_data.num_received++; @@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, { struct wacom_features *features = &wacom_wac->features; - input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); - if (!(features->device_type & WACOM_DEVICETYPE_PEN)) return -ENODEV; @@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, return 0; } + input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(ABS_MISC, input_dev->absbit); @@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, { struct wacom_features *features = &wacom_wac->features; - input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); - if (!(features->device_type & WACOM_DEVICETYPE_TOUCH)) return -ENODEV; @@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, /* setup has already been done */ return 0; + input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); if (features->touch_max == 1) { diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index dd27b9dbe931..873ef38eb1c8 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -129,6 +129,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK) != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) { dev_err(dev->dev, "High Speed not supported!\n"); + t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; dev->master_cfg &= ~DW_IC_CON_SPEED_MASK; dev->master_cfg |= DW_IC_CON_SPEED_FAST; dev->hs_hcnt = 0; diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index 5ac30d95650c..97d4f3ac0abd 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index c45f226c2b85..aa00ba8bcb70 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014 Linaro Ltd. - * Copyright (c) 2014 Hisilicon Limited. + * Copyright (c) 2014 HiSilicon Limited. * * Now only support 7 bit address. */ diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index 8509c5f11356..55177eb21d7b 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -525,8 +525,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id) i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA); data = *i2c->wbuf; data &= ~JZ4780_I2C_DC_READ; - if ((!i2c->stop_hold) && (i2c->cdata->version >= - ID_X1000)) + if ((i2c->wt_len == 1) && (!i2c->stop_hold) && + (i2c->cdata->version >= ID_X1000)) data |= X1000_I2C_DC_STOP; jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data); i2c->wbuf++; diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index c590d36b5fd1..5c8e94b6cdb5 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -221,6 +221,10 @@ mv64xxx_i2c_hw_init(struct mv64xxx_i2c_data *drv_data) writel(0, drv_data->reg_base + drv_data->reg_offsets.ext_addr); writel(MV64XXX_I2C_REG_CONTROL_TWSIEN | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + drv_data->reg_offsets.control); + + if (drv_data->errata_delay) + udelay(5); + drv_data->state = MV64XXX_I2C_STATE_IDLE; } diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c index 937c2c8fd349..4933fc8ce3fd 100644 --- a/drivers/i2c/busses/i2c-stm32f4.c +++ b/drivers/i2c/busses/i2c-stm32f4.c @@ -534,7 +534,7 @@ static void stm32f4_i2c_handle_rx_addr(struct stm32f4_i2c_dev *i2c_dev) default: /* * N-byte reception: - * Enable ACK, reset POS (ACK postion) and clear ADDR flag. + * Enable ACK, reset POS (ACK position) and clear ADDR flag. * In that way, ACK will be sent as soon as the current byte * will be received in the shift register */ diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 63ebf722a424..f21362355973 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -378,7 +378,7 @@ static int i2c_gpio_init_recovery(struct i2c_adapter *adap) static int i2c_init_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; - char *err_str; + char *err_str, *err_level = KERN_ERR; if (!bri) return 0; @@ -387,7 +387,8 @@ static int i2c_init_recovery(struct i2c_adapter *adap) return -EPROBE_DEFER; if (!bri->recover_bus) { - err_str = "no recover_bus() found"; + err_str = "no suitable method provided"; + err_level = KERN_DEBUG; goto err; } @@ -414,7 +415,7 @@ static int i2c_init_recovery(struct i2c_adapter *adap) return 0; err: - dev_err(&adap->dev, "Not using recovery: %s\n", err_str); + dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str); adap->bus_recovery_info = NULL; return -EINVAL; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 0abce004a959..65e3e7df8a4b 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq; static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, - .len = sizeof(struct rdma_nla_ls_gid)}, + .len = sizeof(struct rdma_nla_ls_gid), + .validation_type = NLA_VALIDATE_MIN, + .min = sizeof(struct rdma_nla_ls_gid)}, }; static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 8769e7aa097f..e42c812e74c3 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3610,13 +3610,14 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) ep->com.local_addr.ss_family == AF_INET) { err = cxgb4_remove_server_filter( ep->com.dev->rdev.lldi.ports[0], ep->stid, - ep->com.dev->rdev.lldi.rxq_ids[0], 0); + ep->com.dev->rdev.lldi.rxq_ids[0], false); } else { struct sockaddr_in6 *sin6; c4iw_init_wr_wait(ep->com.wr_waitp); err = cxgb4_remove_server( ep->com.dev->rdev.lldi.ports[0], ep->stid, - ep->com.dev->rdev.lldi.rxq_ids[0], 0); + ep->com.dev->rdev.lldi.rxq_ids[0], + ep->com.local_addr.ss_family == AF_INET6); if (err) goto done; err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 2a91b8d95e12..04b1e8f021f6 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd, */ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) { - int node = pcibus_to_node(dd->pcidev->bus); struct hfi1_affinity_node *entry; const struct cpumask *local_mask; int curr_cpu, possible, i, ret; bool new_entry = false; - /* - * If the BIOS does not have the NUMA node information set, select - * NUMA 0 so we get consistent performance. - */ - if (node < 0) { - dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n"); - node = 0; - } - dd->node = node; - local_mask = cpumask_of_node(dd->node); if (cpumask_first(local_mask) >= nr_cpu_ids) local_mask = topology_core_cpumask(0); @@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) * create an entry in the global affinity structure and initialize it. */ if (!entry) { - entry = node_affinity_allocate(node); + entry = node_affinity_allocate(dd->node); if (!entry) { dd_dev_err(dd, "Unable to allocate global affinity node\n"); @@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) if (new_entry) node_affinity_add_tail(entry); + dd->affinity_entry = entry; mutex_unlock(&node_affinity.lock); return 0; @@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd) { struct hfi1_affinity_node *entry; - if (dd->node < 0) - return; - mutex_lock(&node_affinity.lock); + if (!dd->affinity_entry) + goto unlock; entry = node_affinity_lookup(dd->node); if (!entry) goto unlock; @@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd) */ _dev_comp_vect_cpu_mask_clean_up(dd, entry); unlock: + dd->affinity_entry = NULL; mutex_unlock(&node_affinity.lock); - dd->node = NUMA_NO_NODE; } /* diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index e09e8244a94c..2a9a040569eb 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1409,6 +1409,7 @@ struct hfi1_devdata { spinlock_t irq_src_lock; int vnic_num_vports; struct net_device *dummy_netdev; + struct hfi1_affinity_node *affinity_entry; /* Keeps track of IPoIB RSM rule users */ atomic_t ipoib_rsm_usr_num; diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index cb7ad1288821..786c6316273f 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, dd->pport = (struct hfi1_pportdata *)(dd + 1); dd->pcidev = pdev; pci_set_drvdata(pdev, dd); - dd->node = NUMA_NO_NODE; ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b, GFP_KERNEL); @@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, goto bail; } rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); + /* + * If the BIOS does not have the NUMA node information set, select + * NUMA 0 so we get consistent performance. + */ + dd->node = pcibus_to_node(pdev->bus); + if (dd->node == NUMA_NO_NODE) { + dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n"); + dd->node = 0; + } /* * Initialize all locks for the device. This needs to be as early as diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c index 1fb6e1a0e4e1..1bcab992ac26 100644 --- a/drivers/infiniband/hw/hfi1/netdev_rx.c +++ b/drivers/infiniband/hw/hfi1/netdev_rx.c @@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts, return 0; } - cpumask_and(node_cpu_mask, cpu_mask, - cpumask_of_node(pcibus_to_node(dd->pcidev->bus))); + cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node)); available_cpus = cpumask_weight(node_cpu_mask); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index c3934abeb260..ce26f97b2ca2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1194,8 +1194,10 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type) upper_32_bits(dma)); roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); - roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); + + /* Make sure to write tail first and then head */ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0); + roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); } else { roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma); roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG, diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index de3c2fc6f361..07b8350929cd 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1116,7 +1116,7 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, case MLX5_CMD_OP_CREATE_MKEY: MLX5_SET(destroy_mkey_in, din, opcode, MLX5_CMD_OP_DESTROY_MKEY); - MLX5_SET(destroy_mkey_in, in, mkey_index, *obj_id); + MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id); break; case MLX5_CMD_OP_CREATE_CQ: MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ec4b3f6a8222..f5a52a6fae43 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1078,7 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev, qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); MLX5_SET(qpc, qpc, uar_page, uar_index); - MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); /* Set "fast registration enabled" for all kernel QPs */ @@ -1188,7 +1188,8 @@ static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) } return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING; } - return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT; + return fr_supported ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT; } static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) @@ -1206,7 +1207,8 @@ static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) } return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING; } - return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT; + return fr_supported ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT; } static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, @@ -1217,7 +1219,8 @@ static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING || MLX5_CAP_ROCE(dev->mdev, qp_ts_format) == MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; - int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; + int ts_format = fr_supported ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; if (recv_cq && recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) @@ -1930,6 +1933,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (qp->flags & IB_QP_CREATE_MANAGED_RECV) MLX5_SET(qpc, qpc, cd_slave_receive, 1); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ); MLX5_SET(qpc, qpc, no_sq, 1); MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); @@ -4873,6 +4877,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, struct mlx5_ib_dev *dev; int has_net_offloads; __be64 *rq_pas0; + int ts_format; void *in; void *rqc; void *wq; @@ -4881,6 +4886,10 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, dev = to_mdev(pd->device); + ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq)); + if (ts_format < 0) + return ts_format; + inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; in = kvzalloc(inlen, GFP_KERNEL); if (!in) @@ -4890,6 +4899,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); + MLX5_SET(rqc, rqc, ts_format, ts_format); MLX5_SET(rqc, rqc, user_index, rwq->user_index); MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 0eb6a7a618e0..9ea542270ed4 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1244,7 +1244,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, * TGT QP isn't associated with RQ/SQ */ if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) && - (attrs->qp_type != IB_QPT_XRC_TGT)) { + (attrs->qp_type != IB_QPT_XRC_TGT) && + (attrs->qp_type != IB_QPT_XRC_INI)) { struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq); struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 0a08b4b742a3..6734329cca33 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -2720,8 +2720,8 @@ void rtrs_clt_close(struct rtrs_clt *clt) /* Now it is safe to iterate over all paths without locks */ list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { - rtrs_clt_destroy_sess_files(sess, NULL); rtrs_clt_close_conns(sess, true); + rtrs_clt_destroy_sess_files(sess, NULL); kobject_put(&sess->kobj); } free_clt(clt); diff --git a/drivers/input/joystick/n64joy.c b/drivers/input/joystick/n64joy.c index 8bcc529942bc..9dbca366613e 100644 --- a/drivers/input/joystick/n64joy.c +++ b/drivers/input/joystick/n64joy.c @@ -252,8 +252,8 @@ static int __init n64joy_probe(struct platform_device *pdev) mutex_init(&priv->n64joy_mutex); priv->reg_base = devm_platform_ioremap_resource(pdev, 0); - if (!priv->reg_base) { - err = -EINVAL; + if (IS_ERR(priv->reg_base)) { + err = PTR_ERR(priv->reg_base); goto fail; } diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c index 63d5e488137d..e9fa1423f136 100644 --- a/drivers/input/keyboard/nspire-keypad.c +++ b/drivers/input/keyboard/nspire-keypad.c @@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static int nspire_keypad_chip_init(struct nspire_keypad *keypad) +static int nspire_keypad_open(struct input_dev *input) { + struct nspire_keypad *keypad = input_get_drvdata(input); unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles; + int error; + + error = clk_prepare_enable(keypad->clk); + if (error) + return error; cycles_per_us = (clk_get_rate(keypad->clk) / 1000000); if (cycles_per_us == 0) @@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad) keypad->int_mask = 1 << 1; writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK); - /* Disable GPIO interrupts to prevent hanging on touchpad */ - /* Possibly used to detect touchpad events */ - writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); - /* Acknowledge existing interrupts */ - writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); - - return 0; -} - -static int nspire_keypad_open(struct input_dev *input) -{ - struct nspire_keypad *keypad = input_get_drvdata(input); - int error; - - error = clk_prepare_enable(keypad->clk); - if (error) - return error; - - error = nspire_keypad_chip_init(keypad); - if (error) { - clk_disable_unprepare(keypad->clk); - return error; - } - return 0; } @@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input) { struct nspire_keypad *keypad = input_get_drvdata(input); + /* Disable interrupts */ + writel(0, keypad->reg_base + KEYPAD_INTMSK); + /* Acknowledge existing interrupts */ + writel(~0, keypad->reg_base + KEYPAD_INT); + clk_disable_unprepare(keypad->clk); } @@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev) return -ENOMEM; } + error = clk_prepare_enable(keypad->clk); + if (error) { + dev_err(&pdev->dev, "failed to enable clock\n"); + return error; + } + + /* Disable interrupts */ + writel(0, keypad->reg_base + KEYPAD_INTMSK); + /* Acknowledge existing interrupts */ + writel(~0, keypad->reg_base + KEYPAD_INT); + + /* Disable GPIO interrupts to prevent hanging on touchpad */ + /* Possibly used to detect touchpad events */ + writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); + /* Acknowledge existing GPIO interrupts */ + writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); + + clk_disable_unprepare(keypad->clk); + input_set_drvdata(input, keypad); input->id.bustype = BUS_HOST; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 9119e12a5778..a5a003553646 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */ diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 4c2b579f6c8b..5f7706febcb0 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -1441,7 +1441,7 @@ static int elants_i2c_probe(struct i2c_client *client, touchscreen_parse_properties(ts->input, true, &ts->prop); - if (ts->chip_id == EKTF3624) { + if (ts->chip_id == EKTF3624 && ts->phy_x && ts->phy_y) { /* calculate resolution from size */ ts->x_res = DIV_ROUND_CLOSEST(ts->prop.max_x, ts->phy_x); ts->y_res = DIV_ROUND_CLOSEST(ts->prop.max_y, ts->phy_y); @@ -1449,8 +1449,7 @@ static int elants_i2c_probe(struct i2c_client *client, input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res); input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res); - if (ts->major_res > 0) - input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res); + input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res); error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c index b63d7fdf0cd2..85a1f465c097 100644 --- a/drivers/input/touchscreen/s6sy761.c +++ b/drivers/input/touchscreen/s6sy761.c @@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata, u8 major = event[4]; u8 minor = event[5]; u8 z = event[6] & S6SY761_MASK_Z; - u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4); - u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y); + u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4); + u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y); input_mt_slot(sdata->input, tid); diff --git a/drivers/interconnect/bulk.c b/drivers/interconnect/bulk.c index 73e2c8d0a412..448cc536aa79 100644 --- a/drivers/interconnect/bulk.c +++ b/drivers/interconnect/bulk.c @@ -53,7 +53,7 @@ void icc_bulk_put(int num_paths, struct icc_bulk_data *paths) EXPORT_SYMBOL_GPL(icc_bulk_put); /** - * icc_bulk_set() - set bandwidth to a set of paths + * icc_bulk_set_bw() - set bandwidth to a set of paths * @num_paths: the number of icc_bulk_data * @paths: the icc_bulk_data table containing the paths and bandwidth * diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 5ad519c9f239..8a1e70e00876 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -942,6 +942,8 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst) GFP_KERNEL); if (new) src->links = new; + else + ret = -ENOMEM; out: mutex_unlock(&icc_lock); diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c index dfbec30ed149..20f31a1b4192 100644 --- a/drivers/interconnect/qcom/msm8939.c +++ b/drivers/interconnect/qcom/msm8939.c @@ -131,7 +131,7 @@ DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8939_MASTER_SDCC_1, 8, -1, -1, MSM8939_PNOC_IN DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8939_MASTER_SDCC_2, 8, -1, -1, MSM8939_PNOC_INT_1); DEFINE_QNODE(mas_qdss_bam, MSM8939_MASTER_QDSS_BAM, 8, -1, -1, MSM8939_SNOC_QDSS_INT); DEFINE_QNODE(mas_qdss_etr, MSM8939_MASTER_QDSS_ETR, 8, -1, -1, MSM8939_SNOC_QDSS_INT); -DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, 20, -1, MSM8939_SLAVE_SRVC_SNOC); +DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, -1, -1, MSM8939_SLAVE_SRVC_SNOC); DEFINE_QNODE(mas_spdm, MSM8939_MASTER_SPDM, 4, -1, -1, MSM8939_PNOC_MAS_0); DEFINE_QNODE(mas_tcu0, MSM8939_MASTER_TCU0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2); DEFINE_QNODE(mas_usb_hs1, MSM8939_MASTER_USB_HS1, 4, -1, -1, MSM8939_PNOC_MAS_1); @@ -156,14 +156,14 @@ DEFINE_QNODE(pcnoc_snoc_mas, MSM8939_PNOC_SNOC_MAS, 8, 29, -1, MSM8939_PNOC_SNOC DEFINE_QNODE(pcnoc_snoc_slv, MSM8939_PNOC_SNOC_SLV, 8, -1, 45, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC, MSM8939_SNOC_INT_1); DEFINE_QNODE(qdss_int, MSM8939_SNOC_QDSS_INT, 8, -1, -1, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC); DEFINE_QNODE(slv_apps_l2, MSM8939_SLAVE_AMPSS_L2, 16, -1, -1, 0); -DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, 20, 0); +DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, -1, 0); DEFINE_QNODE(slv_audio, MSM8939_SLAVE_LPASS, 4, -1, -1, 0); DEFINE_QNODE(slv_bimc_cfg, MSM8939_SLAVE_BIMC_CFG, 4, -1, -1, 0); DEFINE_QNODE(slv_blsp_1, MSM8939_SLAVE_BLSP_1, 4, -1, -1, 0); DEFINE_QNODE(slv_boot_rom, MSM8939_SLAVE_BOOT_ROM, 4, -1, -1, 0); DEFINE_QNODE(slv_camera_cfg, MSM8939_SLAVE_CAMERA_CFG, 4, -1, -1, 0); -DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, 106, 0); -DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, 107, 0); +DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, -1, 0); +DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, -1, 0); DEFINE_QNODE(slv_clk_ctl, MSM8939_SLAVE_CLK_CTL, 4, -1, -1, 0); DEFINE_QNODE(slv_crypto_0_cfg, MSM8939_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0); DEFINE_QNODE(slv_dehr_cfg, MSM8939_SLAVE_DEHR_CFG, 4, -1, -1, 0); @@ -187,20 +187,20 @@ DEFINE_QNODE(slv_sdcc_2, MSM8939_SLAVE_SDCC_2, 4, -1, -1, 0); DEFINE_QNODE(slv_security, MSM8939_SLAVE_SECURITY, 4, -1, -1, 0); DEFINE_QNODE(slv_snoc_cfg, MSM8939_SLAVE_SNOC_CFG, 4, -1, -1, 0); DEFINE_QNODE(slv_spdm, MSM8939_SLAVE_SPDM, 4, -1, -1, 0); -DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, 29, 0); +DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, -1, 0); DEFINE_QNODE(slv_tcsr, MSM8939_SLAVE_TCSR, 4, -1, -1, 0); DEFINE_QNODE(slv_tlmm, MSM8939_SLAVE_TLMM, 4, -1, -1, 0); DEFINE_QNODE(slv_usb_hs1, MSM8939_SLAVE_USB_HS1, 4, -1, -1, 0); DEFINE_QNODE(slv_usb_hs2, MSM8939_SLAVE_USB_HS2, 4, -1, -1, 0); DEFINE_QNODE(slv_venus_cfg, MSM8939_SLAVE_VENUS_CFG, 4, -1, -1, 0); -DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, 3, -1, MSM8939_SNOC_BIMC_0_SLV); -DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, 24, MSM8939_SLAVE_EBI_CH0); +DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_0_SLV); +DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0); DEFINE_QNODE(snoc_bimc_1_mas, MSM8939_SNOC_BIMC_1_MAS, 16, 76, -1, MSM8939_SNOC_BIMC_1_SLV); DEFINE_QNODE(snoc_bimc_1_slv, MSM8939_SNOC_BIMC_1_SLV, 16, -1, 104, MSM8939_SLAVE_EBI_CH0); DEFINE_QNODE(snoc_bimc_2_mas, MSM8939_SNOC_BIMC_2_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_2_SLV); DEFINE_QNODE(snoc_bimc_2_slv, MSM8939_SNOC_BIMC_2_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0); DEFINE_QNODE(snoc_int_0, MSM8939_SNOC_INT_0, 8, 99, 130, MSM8939_SLAVE_QDSS_STM, MSM8939_SLAVE_IMEM, MSM8939_SNOC_PNOC_MAS); -DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, 100, 131, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64); +DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, -1, -1, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64); DEFINE_QNODE(snoc_int_bimc, MSM8939_SNOC_INT_BIMC, 8, 101, 132, MSM8939_SNOC_BIMC_1_MAS); DEFINE_QNODE(snoc_pcnoc_mas, MSM8939_SNOC_PNOC_MAS, 8, -1, -1, MSM8939_SNOC_PNOC_SLV); DEFINE_QNODE(snoc_pcnoc_slv, MSM8939_SNOC_PNOC_SLV, 8, -1, -1, MSM8939_PNOC_INT_0); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 15536e321df5..c8f57e3e058d 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -279,8 +279,13 @@ config XTENSA_MX select GENERIC_IRQ_EFFECTIVE_AFF_MASK config XILINX_INTC - bool + bool "Xilinx Interrupt Controller IP" + depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP select IRQ_DOMAIN + help + Support for the Xilinx Interrupt Controller IP core. + This is used as a primary controller with MicroBlaze and can also + be used as a secondary chained controller on other platforms. config IRQ_CROSSBAR bool @@ -577,4 +582,15 @@ config MST_IRQ help Support MStar Interrupt Controller. +config WPCM450_AIC + bool "Nuvoton WPCM450 Advanced Interrupt Controller" + depends on ARCH_WPCM450 + help + Support for the interrupt controller in the Nuvoton WPCM450 BMC SoC. + +config IRQ_IDT3243X + bool + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + endmenu diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index c59b95a0532c..18573602a939 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -113,3 +113,5 @@ obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o +obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o +obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c index 6567ed782f82..58717cd44f99 100644 --- a/drivers/irqchip/irq-aspeed-vic.c +++ b/drivers/irqchip/irq-aspeed-vic.c @@ -71,7 +71,7 @@ static void vic_init_hw(struct aspeed_vic *vic) writel(0, vic->base + AVIC_INT_SELECT); writel(0, vic->base + AVIC_INT_SELECT + 4); - /* Some interrupts have a programable high/low level trigger + /* Some interrupts have a programmable high/low level trigger * (4 GPIO direct inputs), for now we assume this was configured * by firmware. We read which ones are edge now. */ @@ -203,7 +203,7 @@ static int __init avic_of_init(struct device_node *node, } vic->base = regs; - /* Initialize soures, all masked */ + /* Initialize sources, all masked */ vic_init_hw(vic); /* Ready to receive interrupts */ diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index c7c9e976acbb..ad59656ccc28 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -309,7 +309,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, if (data->can_wake) { /* This IRQ chip can wake the system, set all - * relevant child interupts in wake_enabled mask + * relevant child interrupts in wake_enabled mask */ gc->wake_enabled = 0xffffffff; gc->wake_enabled &= ~gc->unused; diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c index 5a2ec43b7ddd..ab91afa86755 100644 --- a/drivers/irqchip/irq-csky-apb-intc.c +++ b/drivers/irqchip/irq-csky-apb-intc.c @@ -176,7 +176,7 @@ gx_intc_init(struct device_node *node, struct device_node *parent) writel(0x0, reg_base + GX_INTC_NEN63_32); /* - * Initial mask reg with all unmasked, because we only use enalbe reg + * Initial mask reg with all unmasked, because we only use enable reg */ writel(0x0, reg_base + GX_INTC_NMASK31_00); writel(0x0, reg_base + GX_INTC_NMASK63_32); diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index fbec07d634ad..4116b48e60af 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -371,7 +371,7 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode, * the MSI data is the absolute value within the range from * spi_start to (spi_start + num_spis). * - * Broadom NS2 GICv2m implementation has an erratum where the MSI data + * Broadcom NS2 GICv2m implementation has an erratum where the MSI data * is 'spi_number - 32' * * Reading that register fails on the Graviton implementation diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index ed46e6057e33..c3485b230d70 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1492,7 +1492,7 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) * * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI * value or to 1023, depending on the enable bit. But that - * would be issueing a mapping for an /existing/ DevID+EventID + * would be issuing a mapping for an /existing/ DevID+EventID * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI * to the /same/ vPE, using this opportunity to adjust the * doorbell. Mouahahahaha. We loves it, Precious. @@ -3122,7 +3122,7 @@ static void its_cpu_init_lpis(void) /* * It's possible for CPU to receive VLPIs before it is - * sheduled as a vPE, especially for the first CPU, and the + * scheduled as a vPE, especially for the first CPU, and the * VLPI with INTID larger than 2^(IDbits+1) will be considered * as out of range and dropped by GIC. * So we initialize IDbits to known value to avoid VLPI drop. @@ -3616,7 +3616,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, /* * If all interrupts have been freed, start mopping the - * floor. This is conditionned on the device not being shared. + * floor. This is conditioned on the device not being shared. */ if (!its_dev->shared && bitmap_empty(its_dev->event_map.lpi_map, @@ -4194,7 +4194,7 @@ static int its_sgi_set_affinity(struct irq_data *d, { /* * There is no notion of affinity for virtual SGIs, at least - * not on the host (since they can only be targetting a vPE). + * not on the host (since they can only be targeting a vPE). * Tell the kernel we've done whatever it asked for. */ irq_data_update_effective_affinity(d, mask_val); @@ -4239,7 +4239,7 @@ static int its_sgi_get_irqchip_state(struct irq_data *d, /* * Locking galore! We can race against two different events: * - * - Concurent vPE affinity change: we must make sure it cannot + * - Concurrent vPE affinity change: we must make sure it cannot * happen, or we'll talk to the wrong redistributor. This is * identical to what happens with vLPIs. * diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c index 563a9b366294..e81e89a81cb5 100644 --- a/drivers/irqchip/irq-gic-v3-mbi.c +++ b/drivers/irqchip/irq-gic-v3-mbi.c @@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent) reg = of_get_property(np, "mbi-alias", NULL); if (reg) { mbi_phys_base = of_translate_address(np, reg); - if (mbi_phys_base == OF_BAD_ADDR) { + if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) { ret = -ENXIO; goto err_free_mbi; } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index eb0ee356a629..37a23aa6de37 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -648,6 +648,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs irqnr = gic_read_iar(); + /* Check for special IDs first */ + if ((irqnr >= 1020 && irqnr <= 1023)) + return; + if (gic_supports_nmi() && unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) { gic_handle_nmi(irqnr, regs); @@ -659,10 +663,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs gic_arch_enable_irqs(); } - /* Check for special IDs first */ - if ((irqnr >= 1020 && irqnr <= 1023)) - return; - if (static_branch_likely(&supports_deactivate_key)) gic_write_eoir(irqnr); else @@ -1379,7 +1379,7 @@ static int gic_irq_domain_translate(struct irq_domain *d, /* * Make it clear that broken DTs are... broken. - * Partitionned PPIs are an unfortunate exception. + * Partitioned PPIs are an unfortunate exception. */ WARN_ON(*type == IRQ_TYPE_NONE && fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 5d1dc9915272..4ea71b28f9f5 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -87,17 +87,40 @@ static struct irq_domain *gic_domain; static const struct irq_domain_ops *vpe_domain_ops; static const struct irq_domain_ops *sgi_domain_ops; +#ifdef CONFIG_ARM64 +#include <asm/cpufeature.h> + +bool gic_cpuif_has_vsgi(void) +{ + unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT); + + return fld >= 0x3; +} +#else +bool gic_cpuif_has_vsgi(void) +{ + return false; +} +#endif + static bool has_v4_1(void) { return !!sgi_domain_ops; } +static bool has_v4_1_sgi(void) +{ + return has_v4_1() && gic_cpuif_has_vsgi(); +} + static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) { char *name; int sgi_base; - if (!has_v4_1()) + if (!has_v4_1_sgi()) return 0; name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current)); @@ -182,7 +205,7 @@ static void its_free_sgi_irqs(struct its_vm *vm) { int i; - if (!has_v4_1()) + if (!has_v4_1_sgi()) return; for (i = 0; i < vm->nr_vpes; i++) { diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index a6ed877d9dd3..058ebaebe2c4 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Hisilicon HiP04 INTC + * HiSilicon HiP04 INTC * * Copyright (C) 2002-2014 ARM Limited. - * Copyright (c) 2013-2014 Hisilicon Ltd. + * Copyright (c) 2013-2014 HiSilicon Ltd. * Copyright (c) 2013-2014 Linaro Ltd. * * Interrupt architecture for the HIP04 INTC: diff --git a/drivers/irqchip/irq-idt3243x.c b/drivers/irqchip/irq-idt3243x.c new file mode 100644 index 000000000000..f0996820077a --- /dev/null +++ b/drivers/irqchip/irq-idt3243x.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for IDT/Renesas 79RC3243x Interrupt Controller. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#define IDT_PIC_NR_IRQS 32 + +#define IDT_PIC_IRQ_PEND 0x00 +#define IDT_PIC_IRQ_MASK 0x08 + +struct idt_pic_data { + void __iomem *base; + struct irq_domain *irq_domain; + struct irq_chip_generic *gc; +}; + +static void idt_irq_dispatch(struct irq_desc *desc) +{ + struct idt_pic_data *idtpic = irq_desc_get_handler_data(desc); + struct irq_chip *host_chip = irq_desc_get_chip(desc); + u32 pending, hwirq, virq; + + chained_irq_enter(host_chip, desc); + + pending = irq_reg_readl(idtpic->gc, IDT_PIC_IRQ_PEND); + pending &= ~idtpic->gc->mask_cache; + while (pending) { + hwirq = __fls(pending); + virq = irq_linear_revmap(idtpic->irq_domain, hwirq); + if (virq) + generic_handle_irq(virq); + pending &= ~(1 << hwirq); + } + + chained_irq_exit(host_chip, desc); +} + +static int idt_pic_init(struct device_node *of_node, struct device_node *parent) +{ + struct irq_domain *domain; + struct idt_pic_data *idtpic; + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + unsigned int parent_irq; + int ret = 0; + + idtpic = kzalloc(sizeof(*idtpic), GFP_KERNEL); + if (!idtpic) { + ret = -ENOMEM; + goto out_err; + } + + parent_irq = irq_of_parse_and_map(of_node, 0); + if (!parent_irq) { + pr_err("Failed to map parent IRQ!\n"); + ret = -EINVAL; + goto out_free; + } + + idtpic->base = of_iomap(of_node, 0); + if (!idtpic->base) { + pr_err("Failed to map base address!\n"); + ret = -ENOMEM; + goto out_unmap_irq; + } + + domain = irq_domain_add_linear(of_node, IDT_PIC_NR_IRQS, + &irq_generic_chip_ops, NULL); + if (!domain) { + pr_err("Failed to add irqdomain!\n"); + ret = -ENOMEM; + goto out_iounmap; + } + idtpic->irq_domain = domain; + + ret = irq_alloc_domain_generic_chips(domain, 32, 1, "IDTPIC", + handle_level_irq, 0, + IRQ_NOPROBE | IRQ_LEVEL, 0); + if (ret) + goto out_domain_remove; + + gc = irq_get_domain_generic_chip(domain, 0); + gc->reg_base = idtpic->base; + gc->private = idtpic; + + ct = gc->chip_types; + ct->regs.mask = IDT_PIC_IRQ_MASK; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + idtpic->gc = gc; + + /* Mask interrupts. */ + writel(0xffffffff, idtpic->base + IDT_PIC_IRQ_MASK); + gc->mask_cache = 0xffffffff; + + irq_set_chained_handler_and_data(parent_irq, + idt_irq_dispatch, idtpic); + + return 0; + +out_domain_remove: + irq_domain_remove(domain); +out_iounmap: + iounmap(idtpic->base); +out_unmap_irq: + irq_dispose_mapping(parent_irq); +out_free: + kfree(idtpic); +out_err: + pr_err("Failed to initialize! (errno = %d)\n", ret); + return ret; +} + +IRQCHIP_DECLARE(idt_pic, "idt,32434-pic", idt_pic_init); diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c index 033bccb41455..5f47d8ee4ae3 100644 --- a/drivers/irqchip/irq-jcore-aic.c +++ b/drivers/irqchip/irq-jcore-aic.c @@ -100,11 +100,11 @@ static int __init aic_irq_of_init(struct device_node *node, jcore_aic.irq_unmask = noop; jcore_aic.name = "AIC"; - domain = irq_domain_add_linear(node, dom_sz, &jcore_aic_irqdomain_ops, + domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq, + &jcore_aic_irqdomain_ops, &jcore_aic); if (!domain) return -ENOMEM; - irq_create_strict_mappings(domain, min_irq, min_irq, dom_sz - min_irq); return 0; } diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 9bf6b9a5f734..f790ca6d78aa 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -163,7 +163,7 @@ static void pch_pic_reset(struct pch_pic *priv) int i; for (i = 0; i < PIC_COUNT; i++) { - /* Write vectore ID */ + /* Write vectored ID */ writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i)); /* Hardcode route to HT0 Lo */ writeb(1, priv->base + PCH_INT_ROUTE(i)); diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index ff7627b57772..2cb45c6b8501 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2015 Hisilicon Limited, All Rights Reserved. + * Copyright (C) 2015 HiSilicon Limited, All Rights Reserved. * Author: Jun Ma <majun258@huawei.com> * Author: Yun Wu <wuyun.wu@huawei.com> */ @@ -390,4 +390,4 @@ module_platform_driver(mbigen_platform_driver); MODULE_AUTHOR("Jun Ma <majun258@huawei.com>"); MODULE_AUTHOR("Yun Wu <wuyun.wu@huawei.com>"); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Hisilicon MBI Generator driver"); +MODULE_DESCRIPTION("HiSilicon MBI Generator driver"); diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index bc7aebcc96e9..e50676ce2ec8 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -227,7 +227,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, /* * Get the hwirq number assigned to this channel through - * a pointer the channel_irq table. The added benifit of this + * a pointer the channel_irq table. The added benefit of this * method is that we can also retrieve the channel index with * it, using the table base. */ diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c index 143657b0cf28..f6133ae28155 100644 --- a/drivers/irqchip/irq-mst-intc.c +++ b/drivers/irqchip/irq-mst-intc.c @@ -13,15 +13,27 @@ #include <linux/of_irq.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/syscore_ops.h> -#define INTC_MASK 0x0 -#define INTC_EOI 0x20 +#define MST_INTC_MAX_IRQS 64 + +#define INTC_MASK 0x0 +#define INTC_REV_POLARITY 0x10 +#define INTC_EOI 0x20 + +#ifdef CONFIG_PM_SLEEP +static LIST_HEAD(mst_intc_list); +#endif struct mst_intc_chip_data { raw_spinlock_t lock; unsigned int irq_start, nr_irqs; void __iomem *base; bool no_eoi; +#ifdef CONFIG_PM_SLEEP + struct list_head entry; + u16 saved_polarity_conf[DIV_ROUND_UP(MST_INTC_MAX_IRQS, 16)]; +#endif }; static void mst_set_irq(struct irq_data *d, u32 offset) @@ -78,6 +90,24 @@ static void mst_intc_eoi_irq(struct irq_data *d) irq_chip_eoi_parent(d); } +static int mst_irq_chip_set_type(struct irq_data *data, unsigned int type) +{ + switch (type) { + case IRQ_TYPE_LEVEL_LOW: + case IRQ_TYPE_EDGE_FALLING: + mst_set_irq(data, INTC_REV_POLARITY); + break; + case IRQ_TYPE_LEVEL_HIGH: + case IRQ_TYPE_EDGE_RISING: + mst_clear_irq(data, INTC_REV_POLARITY); + break; + default: + return -EINVAL; + } + + return irq_chip_set_type_parent(data, IRQ_TYPE_LEVEL_HIGH); +} + static struct irq_chip mst_intc_chip = { .name = "mst-intc", .irq_mask = mst_intc_mask_irq, @@ -87,13 +117,62 @@ static struct irq_chip mst_intc_chip = { .irq_set_irqchip_state = irq_chip_set_parent_state, .irq_set_affinity = irq_chip_set_affinity_parent, .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent, - .irq_set_type = irq_chip_set_type_parent, + .irq_set_type = mst_irq_chip_set_type, .irq_retrigger = irq_chip_retrigger_hierarchy, .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, }; +#ifdef CONFIG_PM_SLEEP +static void mst_intc_polarity_save(struct mst_intc_chip_data *cd) +{ + int i; + void __iomem *addr = cd->base + INTC_REV_POLARITY; + + for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++) + cd->saved_polarity_conf[i] = readw_relaxed(addr + i * 4); +} + +static void mst_intc_polarity_restore(struct mst_intc_chip_data *cd) +{ + int i; + void __iomem *addr = cd->base + INTC_REV_POLARITY; + + for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++) + writew_relaxed(cd->saved_polarity_conf[i], addr + i * 4); +} + +static void mst_irq_resume(void) +{ + struct mst_intc_chip_data *cd; + + list_for_each_entry(cd, &mst_intc_list, entry) + mst_intc_polarity_restore(cd); +} + +static int mst_irq_suspend(void) +{ + struct mst_intc_chip_data *cd; + + list_for_each_entry(cd, &mst_intc_list, entry) + mst_intc_polarity_save(cd); + return 0; +} + +static struct syscore_ops mst_irq_syscore_ops = { + .suspend = mst_irq_suspend, + .resume = mst_irq_resume, +}; + +static int __init mst_irq_pm_init(void) +{ + register_syscore_ops(&mst_irq_syscore_ops); + return 0; +} +late_initcall(mst_irq_pm_init); +#endif + static int mst_intc_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, @@ -145,6 +224,15 @@ static int mst_intc_domain_alloc(struct irq_domain *domain, unsigned int virq, parent_fwspec = *fwspec; parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param[1] = cd->irq_start + hwirq; + + /* + * mst-intc latch the interrupt request if it's edge triggered, + * so the output signal to parent GIC is always level sensitive. + * And if the irq signal is active low, configure it to active high + * to meet GIC SPI spec in mst_irq_chip_set_type via REV_POLARITY bit. + */ + parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec); } @@ -193,6 +281,10 @@ static int __init mst_intc_of_init(struct device_node *dn, return -ENOMEM; } +#ifdef CONFIG_PM_SLEEP + INIT_LIST_HEAD(&cd->entry); + list_add_tail(&cd->entry, &mst_intc_list); +#endif return 0; } diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c index 69ba8ce3c178..9bca0918078e 100644 --- a/drivers/irqchip/irq-mtk-cirq.c +++ b/drivers/irqchip/irq-mtk-cirq.c @@ -217,7 +217,7 @@ static void mtk_cirq_resume(void) { u32 value; - /* flush recored interrupts, will send signals to parent controller */ + /* flush recorded interrupts, will send signals to parent controller */ value = readl_relaxed(cirq_data->base + CIRQ_CONTROL); writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL); diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index a671938fd97f..d1f5740cd575 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -58,7 +58,7 @@ struct icoll_priv { static struct icoll_priv icoll_priv; static struct irq_domain *icoll_domain; -/* calculate bit offset depending on number of intterupt per register */ +/* calculate bit offset depending on number of interrupt per register */ static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit) { /* @@ -68,7 +68,7 @@ static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit) return bit << ((d->hwirq & 3) << 3); } -/* calculate mem offset depending on number of intterupt per register */ +/* calculate mem offset depending on number of interrupt per register */ static void __iomem *icoll_intr_reg(struct irq_data *d) { /* offset = hwirq / intr_per_reg * 0x10 */ diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 6f432d2a5ceb..97d4d04b0a80 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -77,8 +77,8 @@ struct plic_handler { void __iomem *enable_base; struct plic_priv *priv; }; -static int plic_parent_irq; -static bool plic_cpuhp_setup_done; +static int plic_parent_irq __ro_after_init; +static bool plic_cpuhp_setup_done __ro_after_init; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); static inline void plic_toggle(struct plic_handler *handler, diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 8662d7b7b262..b9db90c4aa56 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -193,7 +193,14 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = { { .exti = 23, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct }, { .exti = 24, .irq_parent = 95, .chip = &stm32_exti_h_chip_direct }, { .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct }, + { .exti = 26, .irq_parent = 37, .chip = &stm32_exti_h_chip_direct }, + { .exti = 27, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct }, + { .exti = 28, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct }, + { .exti = 29, .irq_parent = 71, .chip = &stm32_exti_h_chip_direct }, { .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct }, + { .exti = 31, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct }, + { .exti = 32, .irq_parent = 82, .chip = &stm32_exti_h_chip_direct }, + { .exti = 33, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct }, { .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct }, { .exti = 48, .irq_parent = 138, .chip = &stm32_exti_h_chip_direct }, { .exti = 50, .irq_parent = 139, .chip = &stm32_exti_h_chip_direct }, diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c index fb78d6623556..9ea94456b178 100644 --- a/drivers/irqchip/irq-sun4i.c +++ b/drivers/irqchip/irq-sun4i.c @@ -189,7 +189,7 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs) * 3) spurious irq * So if we immediately get a reading of 0, check the irq-pending reg * to differentiate between 2 and 3. We only do this once to avoid - * the extra check in the common case of 1 hapening after having + * the extra check in the common case of 1 happening after having * read the vector-reg once. */ hwirq = readl(irq_ic_data->irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c index 9e456497c1c4..9a63b02b8176 100644 --- a/drivers/irqchip/irq-tb10x.c +++ b/drivers/irqchip/irq-tb10x.c @@ -60,6 +60,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) break; case IRQ_TYPE_NONE: flow_type = IRQ_TYPE_LEVEL_LOW; + fallthrough; case IRQ_TYPE_LEVEL_LOW: mod ^= im; pol ^= im; diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c index 532d0ae172d9..ca1f593f4d13 100644 --- a/drivers/irqchip/irq-ti-sci-inta.c +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -78,7 +78,7 @@ struct ti_sci_inta_vint_desc { * struct ti_sci_inta_irq_domain - Structure representing a TISCI based * Interrupt Aggregator IRQ domain. * @sci: Pointer to TISCI handle - * @vint: TISCI resource pointer representing IA inerrupts. + * @vint: TISCI resource pointer representing IA interrupts. * @global_event: TISCI resource pointer representing global events. * @vint_list: List of the vints active in the system * @vint_mutex: Mutex to protect vint_list diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index e46036374227..62f3d29f9042 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -163,7 +163,7 @@ static struct syscore_ops vic_syscore_ops = { }; /** - * vic_pm_init - initicall to register VIC pm + * vic_pm_init - initcall to register VIC pm * * This is called via late_initcall() to register * the resources for the VICs due to the early @@ -397,7 +397,7 @@ static void __init vic_clear_interrupts(void __iomem *base) /* * The PL190 cell from ARM has been modified by ST to handle 64 interrupts. * The original cell has 32 interrupts, while the modified one has 64, - * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case + * replicating two blocks 0x00..0x1f in 0x20..0x3f. In that case * the probe function is called twice, with base set to offset 000 * and 020 within the page. We call this "second block". */ diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c new file mode 100644 index 000000000000..f3ac392d5bc8 --- /dev/null +++ b/drivers/irqchip/irq-wpcm450-aic.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2021 Jonathan NeuschĂ€fer + +#include <linux/irqchip.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/printk.h> + +#include <asm/exception.h> + +#define AIC_SCR(x) ((x)*4) /* Source control registers */ +#define AIC_GEN 0x84 /* Interrupt group enable control register */ +#define AIC_GRSR 0x88 /* Interrupt group raw status register */ +#define AIC_IRSR 0x100 /* Interrupt raw status register */ +#define AIC_IASR 0x104 /* Interrupt active status register */ +#define AIC_ISR 0x108 /* Interrupt status register */ +#define AIC_IPER 0x10c /* Interrupt priority encoding register */ +#define AIC_ISNR 0x110 /* Interrupt source number register */ +#define AIC_IMR 0x114 /* Interrupt mask register */ +#define AIC_OISR 0x118 /* Output interrupt status register */ +#define AIC_MECR 0x120 /* Mask enable command register */ +#define AIC_MDCR 0x124 /* Mask disable command register */ +#define AIC_SSCR 0x128 /* Source set command register */ +#define AIC_SCCR 0x12c /* Source clear command register */ +#define AIC_EOSCR 0x130 /* End of service command register */ + +#define AIC_SCR_SRCTYPE_LOW_LEVEL (0 << 6) +#define AIC_SCR_SRCTYPE_HIGH_LEVEL (1 << 6) +#define AIC_SCR_SRCTYPE_NEG_EDGE (2 << 6) +#define AIC_SCR_SRCTYPE_POS_EDGE (3 << 6) +#define AIC_SCR_PRIORITY(x) (x) +#define AIC_SCR_PRIORITY_MASK 0x7 + +#define AIC_NUM_IRQS 32 + +struct wpcm450_aic { + void __iomem *regs; + struct irq_domain *domain; +}; + +static struct wpcm450_aic *aic; + +static void wpcm450_aic_init_hw(void) +{ + int i; + + /* Disable (mask) all interrupts */ + writel(0xffffffff, aic->regs + AIC_MDCR); + + /* + * Make sure the interrupt controller is ready to serve new interrupts. + * Reading from IPER indicates that the nIRQ signal may be deasserted, + * and writing to EOSCR indicates that interrupt handling has finished. + */ + readl(aic->regs + AIC_IPER); + writel(0, aic->regs + AIC_EOSCR); + + /* Initialize trigger mode and priority of each interrupt source */ + for (i = 0; i < AIC_NUM_IRQS; i++) + writel(AIC_SCR_SRCTYPE_HIGH_LEVEL | AIC_SCR_PRIORITY(7), + aic->regs + AIC_SCR(i)); +} + +static void __exception_irq_entry wpcm450_aic_handle_irq(struct pt_regs *regs) +{ + int hwirq; + + /* Determine the interrupt source */ + /* Read IPER to signal that nIRQ can be de-asserted */ + hwirq = readl(aic->regs + AIC_IPER) / 4; + + handle_domain_irq(aic->domain, hwirq, regs); +} + +static void wpcm450_aic_eoi(struct irq_data *d) +{ + /* Signal end-of-service */ + writel(0, aic->regs + AIC_EOSCR); +} + +static void wpcm450_aic_mask(struct irq_data *d) +{ + unsigned int mask = BIT(d->hwirq); + + /* Disable (mask) the interrupt */ + writel(mask, aic->regs + AIC_MDCR); +} + +static void wpcm450_aic_unmask(struct irq_data *d) +{ + unsigned int mask = BIT(d->hwirq); + + /* Enable (unmask) the interrupt */ + writel(mask, aic->regs + AIC_MECR); +} + +static int wpcm450_aic_set_type(struct irq_data *d, unsigned int flow_type) +{ + /* + * The hardware supports high/low level, as well as rising/falling edge + * modes, and the DT binding accommodates for that, but as long as + * other modes than high level mode are not used and can't be tested, + * they are rejected in this driver. + */ + if ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) + return -EINVAL; + + return 0; +} + +static struct irq_chip wpcm450_aic_chip = { + .name = "wpcm450-aic", + .irq_eoi = wpcm450_aic_eoi, + .irq_mask = wpcm450_aic_mask, + .irq_unmask = wpcm450_aic_unmask, + .irq_set_type = wpcm450_aic_set_type, +}; + +static int wpcm450_aic_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) +{ + if (hwirq >= AIC_NUM_IRQS) + return -EPERM; + + irq_set_chip_and_handler(irq, &wpcm450_aic_chip, handle_fasteoi_irq); + irq_set_chip_data(irq, aic); + irq_set_probe(irq); + + return 0; +} + +static const struct irq_domain_ops wpcm450_aic_ops = { + .map = wpcm450_aic_map, + .xlate = irq_domain_xlate_twocell, +}; + +static int __init wpcm450_aic_of_init(struct device_node *node, + struct device_node *parent) +{ + if (parent) + return -EINVAL; + + aic = kzalloc(sizeof(*aic), GFP_KERNEL); + if (!aic) + return -ENOMEM; + + aic->regs = of_iomap(node, 0); + if (!aic->regs) { + pr_err("Failed to map WPCM450 AIC registers\n"); + return -ENOMEM; + } + + wpcm450_aic_init_hw(); + + set_handle_irq(wpcm450_aic_handle_irq); + + aic->domain = irq_domain_add_linear(node, AIC_NUM_IRQS, &wpcm450_aic_ops, aic); + + return 0; +} + +IRQCHIP_DECLARE(wpcm450_aic, "nuvoton,wpcm450-aic", wpcm450_aic_of_init); diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c index 1d3d273309bd..8cd1bfc73057 100644 --- a/drivers/irqchip/irq-xilinx-intc.c +++ b/drivers/irqchip/irq-xilinx-intc.c @@ -210,7 +210,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc, /* * Disable all external interrupts until they are - * explicity requested. + * explicitly requested. */ xintc_write(irqc, IER, 0); diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 7168778fbbe1..cb0afe897162 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -721,7 +721,7 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb) * Return value: CAPI result code */ -u16 capi20_get_manufacturer(u32 contr, u8 *buf) +u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]) { struct capi_ctr *ctr; u16 ret; @@ -787,7 +787,7 @@ u16 capi20_get_version(u32 contr, struct capi_version *verp) * Return value: CAPI result code */ -u16 capi20_get_serial(u32 contr, u8 *serial) +u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]) { struct capi_ctr *ctr; u16 ret; diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index ec475087fbf9..39f841b42488 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -694,7 +694,7 @@ isac_release(struct isac_hw *isac) { if (isac->type & IPAC_TYPE_ISACX) WriteISAC(isac, ISACX_MASK, 0xff); - else + else if (isac->type != 0) WriteISAC(isac, ISAC_MASK, 0xff); if (isac->dch.timer.function != NULL) { del_timer(&isac->dch.timer); diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c index 7b2f4d0ae3fe..2f9a289ab245 100644 --- a/drivers/leds/leds-turris-omnia.c +++ b/drivers/leds/leds-turris-omnia.c @@ -2,7 +2,7 @@ /* * CZ.NIC's Turris Omnia LEDs driver * - * 2020 by Marek Behun <marek.behun@nic.cz> + * 2020 by Marek BehĂșn <kabel@kernel.org> */ #include <linux/i2c.h> @@ -287,6 +287,6 @@ static struct i2c_driver omnia_leds_driver = { module_i2c_driver(omnia_leds_driver); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); MODULE_DESCRIPTION("CZ.NIC's Turris Omnia LEDs"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c index 9f2ce7f03c67..456a117a65fd 100644 --- a/drivers/mailbox/armada-37xx-rwtm-mailbox.c +++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c @@ -2,7 +2,7 @@ /* * rWTM BIU Mailbox driver for Armada 37xx * - * Author: Marek Behun <marek.behun@nic.cz> + * Author: Marek BehĂșn <kabel@kernel.org> */ #include <linux/device.h> @@ -203,4 +203,4 @@ module_platform_driver(armada_37xx_mbox_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx"); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 5e306bba4375..1ca65b434f1f 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -529,7 +529,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ * Grab our output buffer. */ nl = orig_nl = get_result_buffer(param, param_size, &len); - if (len < needed) { + if (len < needed || len < sizeof(nl->dev)) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 95391f78b8d5..e5f0f1703c5d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1594,6 +1594,13 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, return blk_queue_zoned_model(q) != *zoned_model; } +/* + * Check the device zoned model based on the target feature flag. If the target + * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are + * also accepted but all devices must have the same zoned model. If the target + * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any + * zoned model with all zoned devices having the same zone size. + */ static bool dm_table_supports_zoned_model(struct dm_table *t, enum blk_zoned_model zoned_model) { @@ -1603,13 +1610,15 @@ static bool dm_table_supports_zoned_model(struct dm_table *t, for (i = 0; i < dm_table_get_num_targets(t); i++) { ti = dm_table_get_target(t, i); - if (zoned_model == BLK_ZONED_HM && - !dm_target_supports_zoned_hm(ti->type)) - return false; - - if (!ti->type->iterate_devices || - ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model)) - return false; + if (dm_target_supports_zoned_hm(ti->type)) { + if (!ti->type->iterate_devices || + ti->type->iterate_devices(ti, device_not_zoned_model, + &zoned_model)) + return false; + } else if (!dm_target_supports_mixed_zoned_model(ti->type)) { + if (zoned_model == BLK_ZONED_HM) + return false; + } } return true; @@ -1621,9 +1630,17 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev * struct request_queue *q = bdev_get_queue(dev->bdev); unsigned int *zone_sectors = data; + if (!blk_queue_is_zoned(q)) + return 0; + return blk_queue_zone_sectors(q) != *zone_sectors; } +/* + * Check consistency of zoned model and zone sectors across all targets. For + * zone sectors, if the destination device is a zoned block device, it shall + * have the specified zone_sectors. + */ static int validate_hardware_zoned_model(struct dm_table *table, enum blk_zoned_model zoned_model, unsigned int zone_sectors) @@ -1642,7 +1659,7 @@ static int validate_hardware_zoned_model(struct dm_table *table, return -EINVAL; if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { - DMERR("%s: zone sectors is not consistent across all devices", + DMERR("%s: zone sectors is not consistent across all zoned devices", dm_device_name(table->md)); return -EINVAL; } diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 66f4c6398f67..cea2b3789736 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index, u8 *res; position = (index + rsb) * v->fec->roots; - block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem); + block = div64_u64_rem(position, v->fec->io_size, &rem); *offset = (unsigned)rem; res = dm_bufio_read(v->fec->bufio, block, buf); @@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, /* read the next block when we run out of parity bytes */ offset += v->fec->roots; - if (offset >= v->fec->roots << SECTOR_SHIFT) { + if (offset >= v->fec->io_size) { dm_bufio_release(buf); par = fec_read_parity(v, rsb, block_offset, &offset, &buf); @@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v) return -E2BIG; } + if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1)) + f->io_size = 1 << v->data_dev_block_bits; + else + f->io_size = v->fec->roots << SECTOR_SHIFT; + f->bufio = dm_bufio_client_create(f->dev->bdev, - f->roots << SECTOR_SHIFT, + f->io_size, 1, 0, NULL, NULL); if (IS_ERR(f->bufio)) { ti->error = "Cannot initialize FEC bufio client"; diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h index 42fbd3a7fc9f..3c46c8d61883 100644 --- a/drivers/md/dm-verity-fec.h +++ b/drivers/md/dm-verity-fec.h @@ -36,6 +36,7 @@ struct dm_verity_fec { struct dm_dev *dev; /* parity data device */ struct dm_bufio_client *data_bufio; /* for data dev access */ struct dm_bufio_client *bufio; /* for parity data access */ + size_t io_size; /* IO size for roots */ sector_t start; /* parity data start in blocks */ sector_t blocks; /* number of blocks covered */ sector_t rounds; /* number of interleaving rounds */ diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 6b8e5bdd8526..808a98ef624c 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -34,7 +34,7 @@ #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks" #define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once" -#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC + \ +#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \ DM_VERITY_ROOT_HASH_VERIFICATION_OPTS) static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE; diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 697f9de37355..7e88df64d197 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -1143,7 +1143,7 @@ static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv, static struct target_type dmz_type = { .name = "zoned", .version = {2, 0, 0}, - .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM, + .features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL, .module = THIS_MODULE, .ctr = dmz_ctr, .dtr = dmz_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 50b693d776d6..3f3be9408afa 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2036,7 +2036,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, if (size != dm_get_size(md)) memset(&md->geometry, 0, sizeof(md->geometry)); - set_capacity_and_notify(md->disk, size); + if (!get_capacity(md->disk)) + set_capacity(md->disk, size); + else + set_capacity_and_notify(md->disk, size); dm_table_event_callback(t, event_callback, md); diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c index fe8ca945f367..b67cb0a3ab05 100644 --- a/drivers/mfd/intel_quark_i2c_gpio.c +++ b/drivers/mfd/intel_quark_i2c_gpio.c @@ -72,7 +72,8 @@ static const struct dmi_system_id dmi_platform_info[] = { {} }; -static const struct resource intel_quark_i2c_res[] = { +/* This is used as a place holder and will be modified at run-time */ +static struct resource intel_quark_i2c_res[] = { [INTEL_QUARK_IORES_MEM] = { .flags = IORESOURCE_MEM, }, @@ -85,7 +86,8 @@ static struct mfd_cell_acpi_match intel_quark_acpi_match_i2c = { .adr = MFD_ACPI_MATCH_I2C, }; -static const struct resource intel_quark_gpio_res[] = { +/* This is used as a place holder and will be modified at run-time */ +static struct resource intel_quark_gpio_res[] = { [INTEL_QUARK_IORES_MEM] = { .flags = IORESOURCE_MEM, }, diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 110f5a8538e9..0e8254d0cf0b 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -134,6 +134,23 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void) __lkdtm_CORRUPT_STACK((void *)&data); } +static pid_t stack_pid; +static unsigned long stack_addr; + +void lkdtm_REPORT_STACK(void) +{ + volatile uintptr_t magic; + pid_t pid = task_pid_nr(current); + + if (pid != stack_pid) { + pr_info("Starting stack offset tracking for pid %d\n", pid); + stack_pid = pid; + stack_addr = (uintptr_t)&magic; + } + + pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic)); +} + void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) { static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index b2aff4d87c01..8024b6a5cc7f 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -110,6 +110,7 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(EXHAUST_STACK), CRASHTYPE(CORRUPT_STACK), CRASHTYPE(CORRUPT_STACK_STRONG), + CRASHTYPE(REPORT_STACK), CRASHTYPE(CORRUPT_LIST_ADD), CRASHTYPE(CORRUPT_LIST_DEL), CRASHTYPE(STACK_GUARD_PAGE_LEADING), diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 5ae48c64df24..99f90d3e5e9c 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -17,6 +17,7 @@ void lkdtm_LOOP(void); void lkdtm_EXHAUST_STACK(void); void lkdtm_CORRUPT_STACK(void); void lkdtm_CORRUPT_STACK_STRONG(void); +void lkdtm_REPORT_STACK(void); void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void); void lkdtm_SOFTLOCKUP(void); void lkdtm_HARDLOCKUP(void); diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 4378a9b25848..2cc370adb238 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -2286,8 +2286,8 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, if (buffer_id == 0) return -EINVAL; - if (!mei_cl_is_connected(cl)) - return -ENODEV; + if (mei_cl_is_connected(cl)) + return -EPROTO; if (cl->dma_mapped) return -EPROTO; @@ -2327,9 +2327,7 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, - cl->dma_mapped || - cl->status || - !mei_cl_is_connected(cl), + cl->dma_mapped || cl->status, mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); @@ -2376,8 +2374,9 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) return -EOPNOTSUPP; } - if (!mei_cl_is_connected(cl)) - return -ENODEV; + /* do not allow unmap for connected client */ + if (mei_cl_is_connected(cl)) + return -EPROTO; if (!cl->dma_mapped) return -EPROTO; @@ -2405,9 +2404,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, - !cl->dma_mapped || - cl->status || - !mei_cl_is_connected(cl), + !cl->dma_mapped || cl->status, mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index eb6c02bc4a02..b8b771b643cc 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -247,8 +247,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc, */ for_each_sg(data->sg, sg, data->sg_len, i) { if (sg->length % data->blksz) { - WARN_ONCE(1, "unaligned sg len %u blksize %u\n", - sg->length, data->blksz); + dev_warn_once(mmc_dev(mmc), + "unaligned sg len %u blksize %u, disabling descriptor DMA for transfer\n", + sg->length, data->blksz); return; } } diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 57f1f1708994..5c5c92132287 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip, return 0; case NAND_OP_WAITRDY_INSTR: return readl_poll_timeout(nfc->regs + NFI_STA, status, - status & STA_BUSY, 20, - instr->ctx.waitrdy.timeout_ms); + !(status & STA_BUSY), 20, + instr->ctx.waitrdy.timeout_ms * 1000); default: break; } diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 8bdc44b7e09a..3c8f665c1558 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -127,6 +127,8 @@ static int com20020pci_probe(struct pci_dev *pdev, int i, ioaddr, ret; struct resource *r; + ret = 0; + if (pci_enable_device(pdev)) return -EIO; @@ -139,6 +141,8 @@ static int com20020pci_probe(struct pci_dev *pdev, priv->ci = ci; mm = &ci->misc_map; + pci_set_drvdata(pdev, priv); + INIT_LIST_HEAD(&priv->list_dev); if (mm->size) { @@ -161,7 +165,7 @@ static int com20020pci_probe(struct pci_dev *pdev, dev = alloc_arcdev(device); if (!dev) { ret = -ENOMEM; - goto out_port; + break; } dev->dev_port = i; @@ -178,7 +182,7 @@ static int com20020pci_probe(struct pci_dev *pdev, pr_err("IO region %xh-%xh already allocated\n", ioaddr, ioaddr + cm->size - 1); ret = -EBUSY; - goto out_port; + goto err_free_arcdev; } /* Dummy access after Reset @@ -216,18 +220,18 @@ static int com20020pci_probe(struct pci_dev *pdev, if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) { pr_err("IO address %Xh is empty!\n", ioaddr); ret = -EIO; - goto out_port; + goto err_free_arcdev; } if (com20020_check(dev)) { ret = -EIO; - goto out_port; + goto err_free_arcdev; } card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev), GFP_KERNEL); if (!card) { ret = -ENOMEM; - goto out_port; + goto err_free_arcdev; } card->index = i; @@ -253,29 +257,29 @@ static int com20020pci_probe(struct pci_dev *pdev, ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); if (ret) - goto out_port; + goto err_free_arcdev; ret = devm_led_classdev_register(&pdev->dev, &card->recon_led); if (ret) - goto out_port; + goto err_free_arcdev; dev_set_drvdata(&dev->dev, card); ret = com20020_found(dev, IRQF_SHARED); if (ret) - goto out_port; + goto err_free_arcdev; devm_arcnet_led_init(dev, dev->dev_id, i); list_add(&card->list, &priv->list_dev); - } + continue; - pci_set_drvdata(pdev, priv); - - return 0; - -out_port: - com20020pci_remove(pdev); +err_free_arcdev: + free_arcdev(dev); + break; + } + if (ret) + com20020pci_remove(pdev); return ret; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 456315bef3a8..74cbbb22470b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3978,15 +3978,11 @@ static int bond_neigh_init(struct neighbour *n) rcu_read_lock(); slave = bond_first_slave_rcu(bond); - if (!slave) { - ret = -EINVAL; + if (!slave) goto out; - } slave_ops = slave->dev->netdev_ops; - if (!slave_ops->ndo_neigh_setup) { - ret = -EINVAL; + if (!slave_ops->ndo_neigh_setup) goto out; - } /* TODO: find another way [1] to implement this. * Passing a zeroed structure is fragile, diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index ef474bae47a1..6958830cb983 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = { .brp_inc = 1, }; -static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv) -{ - if (priv->device) - pm_runtime_enable(priv->device); -} - -static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv) -{ - if (priv->device) - pm_runtime_disable(priv->device); -} - static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv) { if (priv->device) @@ -1335,7 +1323,6 @@ static const struct net_device_ops c_can_netdev_ops = { int register_c_can_dev(struct net_device *dev) { - struct c_can_priv *priv = netdev_priv(dev); int err; /* Deactivate pins to prevent DRA7 DCAN IP from being @@ -1345,28 +1332,19 @@ int register_c_can_dev(struct net_device *dev) */ pinctrl_pm_select_sleep_state(dev->dev.parent); - c_can_pm_runtime_enable(priv); - dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &c_can_netdev_ops; err = register_candev(dev); - if (err) - c_can_pm_runtime_disable(priv); - else + if (!err) devm_can_led_init(dev); - return err; } EXPORT_SYMBOL_GPL(register_c_can_dev); void unregister_c_can_dev(struct net_device *dev) { - struct c_can_priv *priv = netdev_priv(dev); - unregister_candev(dev); - - c_can_pm_runtime_disable(priv); } EXPORT_SYMBOL_GPL(unregister_c_can_dev); diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index 406b4847e5dc..7efb60b50876 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(dev); + void __iomem *addr = priv->base; unregister_c_can_dev(dev); free_c_can_dev(dev); - pci_iounmap(pdev, priv->base); + pci_iounmap(pdev, addr); pci_disable_msi(pdev); pci_clear_master(pdev); pci_release_regions(pdev); diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 05f425ceb53a..47b251b1607c 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -29,6 +29,7 @@ #include <linux/list.h> #include <linux/io.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/of_device.h> @@ -386,6 +387,7 @@ static int c_can_plat_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); + pm_runtime_enable(priv->device); ret = register_c_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", @@ -398,6 +400,7 @@ static int c_can_plat_probe(struct platform_device *pdev) return 0; exit_free_device: + pm_runtime_disable(priv->device); free_c_can_dev(dev); exit: dev_err(&pdev->dev, "probe failed\n"); @@ -408,9 +411,10 @@ exit: static int c_can_plat_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + struct c_can_priv *priv = netdev_priv(dev); unregister_c_can_dev(dev); - + pm_runtime_disable(priv->device); free_c_can_dev(dev); return 0; diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index 867f6be31230..f5d79e6e5483 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -355,6 +355,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head) struct rtnl_link_ops can_link_ops __read_mostly = { .kind = "can", + .netns_refund = true, .maxtype = IFLA_CAN_MAX, .policy = can_policy, .setup = can_setup, diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 134c05757a3b..57f3635ad8d7 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -697,9 +697,15 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) static int flexcan_chip_freeze(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; + unsigned int timeout; + u32 bitrate = priv->can.bittiming.bitrate; u32 reg; + if (bitrate) + timeout = 1000 * 1000 * 10 / bitrate; + else + timeout = FLEXCAN_TIMEOUT_US / 10; + reg = priv->read(®s->mcr); reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT; priv->write(reg, ®s->mcr); diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 37e05010ca91..74d9899fc904 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -57,6 +57,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 +#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 /* Loopback control register */ @@ -949,6 +950,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0); + /* Disable Bus load reporting */ + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); + tx_npackets = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) & diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 3752520a7d4b..0c8d36bc668c 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -501,9 +501,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) } while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { - if (rxfs & RXFS_RFL) - netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); - m_can_read_fifo(dev, rxfs); quota--; @@ -876,7 +873,7 @@ static int m_can_rx_peripheral(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); - m_can_rx_handler(dev, 1); + m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT); m_can_enable_all_interrupts(cdev); diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index f69fb4238a65..a57da43680d8 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -314,6 +314,18 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len) return ret; } +static int mcp251x_spi_write(struct spi_device *spi, int len) +{ + struct mcp251x_priv *priv = spi_get_drvdata(spi); + int ret; + + ret = spi_write(spi, priv->spi_tx_buf, len); + if (ret) + dev_err(&spi->dev, "spi write failed: ret = %d\n", ret); + + return ret; +} + static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg) { struct mcp251x_priv *priv = spi_get_drvdata(spi); @@ -361,7 +373,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val) priv->spi_tx_buf[1] = reg; priv->spi_tx_buf[2] = val; - mcp251x_spi_trans(spi, 3); + mcp251x_spi_write(spi, 3); } static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2) @@ -373,7 +385,7 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2) priv->spi_tx_buf[2] = v1; priv->spi_tx_buf[3] = v2; - mcp251x_spi_trans(spi, 4); + mcp251x_spi_write(spi, 4); } static void mcp251x_write_bits(struct spi_device *spi, u8 reg, @@ -386,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg, priv->spi_tx_buf[2] = mask; priv->spi_tx_buf[3] = val; - mcp251x_spi_trans(spi, 4); + mcp251x_spi_write(spi, 4); } static u8 mcp251x_read_stat(struct spi_device *spi) @@ -618,7 +630,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, buf[i]); } else { memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len); - mcp251x_spi_trans(spi, TXBDAT_OFF + len); + mcp251x_spi_write(spi, TXBDAT_OFF + len); } } @@ -650,7 +662,7 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */ priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx); - mcp251x_spi_trans(priv->spi, 1); + mcp251x_spi_write(priv->spi, 1); } static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, @@ -888,7 +900,7 @@ static int mcp251x_hw_reset(struct spi_device *spi) mdelay(MCP251X_OST_DELAY_MS); priv->spi_tx_buf[0] = INSTRUCTION_RESET; - ret = mcp251x_spi_trans(spi, 1); + ret = mcp251x_spi_write(spi, 1); if (ret) return ret; diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index c1e5d5b570b6..538f4d9adb91 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -73,6 +73,7 @@ config CAN_KVASER_USB - Kvaser Memorator Pro 5xHS - Kvaser USBcan Light 4xHS - Kvaser USBcan Pro 2xHS v2 + - Kvaser USBcan Pro 4xHS - Kvaser USBcan Pro 5xHS - Kvaser U100 - Kvaser U100P diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 2b7efd296758..4e97da8434ab 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -86,8 +86,9 @@ #define USB_U100_PRODUCT_ID 273 #define USB_U100P_PRODUCT_ID 274 #define USB_U100S_PRODUCT_ID 275 +#define USB_USBCAN_PRO_4HS_PRODUCT_ID 276 #define USB_HYDRA_PRODUCT_ID_END \ - USB_U100S_PRODUCT_ID + USB_USBCAN_PRO_4HS_PRODUCT_ID static inline bool kvaser_is_leaf(const struct usb_device_id *id) { @@ -193,6 +194,7 @@ static const struct usb_device_id kvaser_usb_table[] = { { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) }, { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) }, { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) }, { } }; MODULE_DEVICE_TABLE(usb, kvaser_usb_table); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 573b11559d73..28e916a04047 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -857,7 +857,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 0); if (err) - goto lbl_unregister_candev; + goto adap_dev_free; } /* get device number early */ @@ -869,6 +869,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, return 0; +adap_dev_free: + if (dev->adapter->dev_free) + dev->adapter->dev_free(dev); + lbl_unregister_candev: unregister_candev(netdev); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index a162499bcafc..eb443721c58e 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1105,13 +1105,6 @@ static int b53_setup(struct dsa_switch *ds) b53_disable_port(ds, port); } - /* Let DSA handle the case were multiple bridges span the same switch - * device and different VLAN awareness settings are requested, which - * would be breaking filtering semantics for any of the other bridge - * devices. (not hardware supported) - */ - ds->vlan_filtering_is_global = true; - return b53_setup_devlink_resources(ds); } @@ -2664,6 +2657,13 @@ struct b53_device *b53_switch_alloc(struct device *base, ds->ops = &b53_switch_ops; ds->untag_bridge_pvid = true; dev->vlan_enabled = true; + /* Let DSA handle the case were multiple bridges span the same switch + * device and different VLAN awareness settings are requested, which + * would be breaking filtering semantics for any of the other bridge + * devices. (not hardware supported) + */ + ds->vlan_filtering_is_global = true; + mutex_init(&dev->reg_mutex); mutex_init(&dev->stats_mutex); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index f277df922fcd..ba5d546d06aa 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -114,7 +114,10 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) /* Force link status for IMP port */ reg = core_readl(priv, offset); reg |= (MII_SW_OR | LINK_STS); - reg &= ~GMII_SPEED_UP_2G; + if (priv->type == BCM4908_DEVICE_ID) + reg |= GMII_SPEED_UP_2G; + else + reg &= ~GMII_SPEED_UP_2G; core_writel(priv, reg, offset); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ @@ -585,8 +588,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) * in bits 15:8 and the patch level in bits 7:0 which is exactly what * the REG_PHY_REVISION register layout is. */ - - return priv->hw_params.gphy_rev; + if (priv->int_phy_mask & BIT(port)) + return priv->hw_params.gphy_rev; + else + return 0; } static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 52e865a3912c..bf5c62e5c0b0 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -93,8 +93,12 @@ /* GSWIP MII Registers */ #define GSWIP_MII_CFGp(p) (0x2 * (p)) +#define GSWIP_MII_CFG_RESET BIT(15) #define GSWIP_MII_CFG_EN BIT(14) +#define GSWIP_MII_CFG_ISOLATE BIT(13) #define GSWIP_MII_CFG_LDCLKDIS BIT(12) +#define GSWIP_MII_CFG_RGMII_IBS BIT(8) +#define GSWIP_MII_CFG_RMII_CLK BIT(7) #define GSWIP_MII_CFG_MODE_MIIP 0x0 #define GSWIP_MII_CFG_MODE_MIIM 0x1 #define GSWIP_MII_CFG_MODE_RMIIP 0x2 @@ -190,6 +194,23 @@ #define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA)) #define GSWIP_MAC_FLEN 0x8C5 +#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC)) +#define GSWIP_MAC_CTRL_0_PADEN BIT(8) +#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7) +#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070 +#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000 +#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010 +#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020 +#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030 +#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040 +#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C +#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000 +#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004 +#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C +#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003 +#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000 +#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001 +#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002 #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */ @@ -653,16 +674,13 @@ static int gswip_port_enable(struct dsa_switch *ds, int port, GSWIP_SDMA_PCTRLp(port)); if (!dsa_is_cpu_port(ds, port)) { - u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO | - GSWIP_MDIO_PHY_SPEED_AUTO | - GSWIP_MDIO_PHY_FDUP_AUTO | - GSWIP_MDIO_PHY_FCONTX_AUTO | - GSWIP_MDIO_PHY_FCONRX_AUTO | - (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK); - - gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port)); - /* Activate MDIO auto polling */ - gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0); + u32 mdio_phy = 0; + + if (phydev) + mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; + + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); } return 0; @@ -675,14 +693,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port) if (!dsa_is_user_port(ds, port)) return; - if (!dsa_is_cpu_port(ds, port)) { - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN, - GSWIP_MDIO_PHY_LINK_MASK, - GSWIP_MDIO_PHYp(port)); - /* Deactivate MDIO auto polling */ - gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0); - } - gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, GSWIP_FDMA_PCTRLp(port)); gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, @@ -794,14 +804,32 @@ static int gswip_setup(struct dsa_switch *ds) gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2); gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3); - /* disable PHY auto polling */ + /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an + * interoperability problem with this auto polling mechanism because + * their status registers think that the link is in a different state + * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set + * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the + * auto polling state machine consider the link being negotiated with + * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads + * to the switch port being completely dead (RX and TX are both not + * working). + * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F + * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes + * it would work fine for a few minutes to hours and then stop, on + * other device it would no traffic could be sent or received at all. + * Testing shows that when PHY auto polling is disabled these problems + * go away. + */ gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0); + /* Configure the MDIO Clock 2.5 MHz */ gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1); - /* Disable the xMII link */ + /* Disable the xMII interface and clear it's isolation bit */ for (i = 0; i < priv->hw_info->max_ports; i++) - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i); + gswip_mii_mask_cfg(priv, + GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE, + 0, i); /* enable special tag insertion on cpu port */ gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, @@ -1450,6 +1478,112 @@ unsupported: return; } +static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) +{ + u32 mdio_phy; + + if (link) + mdio_phy = GSWIP_MDIO_PHY_LINK_UP; + else + mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN; + + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); +} + +static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed, + phy_interface_t interface) +{ + u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0; + + switch (speed) { + case SPEED_10: + mdio_phy = GSWIP_MDIO_PHY_SPEED_M10; + + if (interface == PHY_INTERFACE_MODE_RMII) + mii_cfg = GSWIP_MII_CFG_RATE_M50; + else + mii_cfg = GSWIP_MII_CFG_RATE_M2P5; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; + break; + + case SPEED_100: + mdio_phy = GSWIP_MDIO_PHY_SPEED_M100; + + if (interface == PHY_INTERFACE_MODE_RMII) + mii_cfg = GSWIP_MII_CFG_RATE_M50; + else + mii_cfg = GSWIP_MII_CFG_RATE_M25; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; + break; + + case SPEED_1000: + mdio_phy = GSWIP_MDIO_PHY_SPEED_G1; + + mii_cfg = GSWIP_MII_CFG_RATE_M125; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII; + break; + } + + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port); + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0, + GSWIP_MAC_CTRL_0p(port)); +} + +static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex) +{ + u32 mac_ctrl_0, mdio_phy; + + if (duplex == DUPLEX_FULL) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN; + mdio_phy = GSWIP_MDIO_PHY_FDUP_EN; + } else { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS; + mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS; + } + + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0, + GSWIP_MAC_CTRL_0p(port)); + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); +} + +static void gswip_port_set_pause(struct gswip_priv *priv, int port, + bool tx_pause, bool rx_pause) +{ + u32 mac_ctrl_0, mdio_phy; + + if (tx_pause && rx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | + GSWIP_MDIO_PHY_FCONRX_EN; + } else if (tx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | + GSWIP_MDIO_PHY_FCONRX_DIS; + } else if (rx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | + GSWIP_MDIO_PHY_FCONRX_EN; + } else { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | + GSWIP_MDIO_PHY_FCONRX_DIS; + } + + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK, + mac_ctrl_0, GSWIP_MAC_CTRL_0p(port)); + gswip_mdio_mask(priv, + GSWIP_MDIO_PHY_FCONTX_MASK | + GSWIP_MDIO_PHY_FCONRX_MASK, + mdio_phy, GSWIP_MDIO_PHYp(port)); +} + static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state) @@ -1469,6 +1603,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, break; case PHY_INTERFACE_MODE_RMII: miicfg |= GSWIP_MII_CFG_MODE_RMIIM; + + /* Configure the RMII clock as output: */ + miicfg |= GSWIP_MII_CFG_RMII_CLK; break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: @@ -1481,7 +1618,11 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, "Unsupported interface: %d\n", state->interface); return; } - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port); + + gswip_mii_mask_cfg(priv, + GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK | + GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS, + miicfg, port); switch (state->interface) { case PHY_INTERFACE_MODE_RGMII_ID: @@ -1506,6 +1647,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port, struct gswip_priv *priv = ds->priv; gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); + + if (!dsa_is_cpu_port(ds, port)) + gswip_port_set_link(priv, port, false); } static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, @@ -1517,6 +1661,13 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, { struct gswip_priv *priv = ds->priv; + if (!dsa_is_cpu_port(ds, port)) { + gswip_port_set_link(priv, port, true); + gswip_port_set_speed(priv, port, speed, interface); + gswip_port_set_duplex(priv, port, duplex); + gswip_port_set_pause(priv, port, tx_pause, rx_pause); + } + gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); } diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index f06f5fa2f898..9871d7cff93a 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -436,34 +436,32 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) TD_DM_DRVP(8) | TD_DM_DRVN(8)); /* Setup core clock for MT7530 */ - if (!trgint) { - /* Disable MT7530 core clock */ - core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); - - /* Disable PLL, since phy_device has not yet been created - * provided for phy_[read,write]_mmd_indirect is called, we - * provide our own core_write_mmd_indirect to complete this - * function. - */ - core_write_mmd_indirect(priv, - CORE_GSWPLL_GRP1, - MDIO_MMD_VEND2, - 0); - - /* Set core clock into 500Mhz */ - core_write(priv, CORE_GSWPLL_GRP2, - RG_GSWPLL_POSDIV_500M(1) | - RG_GSWPLL_FBKDIV_500M(25)); + /* Disable MT7530 core clock */ + core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); - /* Enable PLL */ - core_write(priv, CORE_GSWPLL_GRP1, - RG_GSWPLL_EN_PRE | - RG_GSWPLL_POSDIV_200M(2) | - RG_GSWPLL_FBKDIV_200M(32)); - - /* Enable MT7530 core clock */ - core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); - } + /* Disable PLL, since phy_device has not yet been created + * provided for phy_[read,write]_mmd_indirect is called, we + * provide our own core_write_mmd_indirect to complete this + * function. + */ + core_write_mmd_indirect(priv, + CORE_GSWPLL_GRP1, + MDIO_MMD_VEND2, + 0); + + /* Set core clock into 500Mhz */ + core_write(priv, CORE_GSWPLL_GRP2, + RG_GSWPLL_POSDIV_500M(1) | + RG_GSWPLL_FBKDIV_500M(25)); + + /* Enable PLL */ + core_write(priv, CORE_GSWPLL_GRP1, + RG_GSWPLL_EN_PRE | + RG_GSWPLL_POSDIV_200M(2) | + RG_GSWPLL_FBKDIV_200M(32)); + + /* Enable MT7530 core clock */ + core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); /* Setup the MT7530 TRGMII Tx Clock */ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 903d619e08ed..e08bf9377140 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3026,10 +3026,17 @@ out_resources: return err; } +/* prod_id for switch families which do not have a PHY model number */ +static const u16 family_prod_id_table[] = { + [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341, + [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390, +}; + static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) { struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; struct mv88e6xxx_chip *chip = mdio_bus->chip; + u16 prod_id; u16 val; int err; @@ -3040,23 +3047,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) err = chip->info->ops->phy_read(chip, bus, phy, reg, &val); mv88e6xxx_reg_unlock(chip); - if (reg == MII_PHYSID2) { - /* Some internal PHYs don't have a model number. */ - if (chip->info->family != MV88E6XXX_FAMILY_6165) - /* Then there is the 6165 family. It gets is - * PHYs correct. But it can also have two - * SERDES interfaces in the PHY address - * space. And these don't have a model - * number. But they are not PHYs, so we don't - * want to give them something a PHY driver - * will recognise. - * - * Use the mv88e6390 family model number - * instead, for anything which really could be - * a PHY, - */ - if (!(val & 0x3f0)) - val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4; + /* Some internal PHYs don't have a model number. */ + if (reg == MII_PHYSID2 && !(val & 0x3f0) && + chip->info->family < ARRAY_SIZE(family_prod_id_table)) { + prod_id = family_prod_id_table[chip->info->family]; + if (prod_id) + val |= prod_id >> 4; } return err ? err : val; diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 187b0b9a6e1d..f78daba60b35 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1534,8 +1534,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) } pci_set_master(pdev); - ioaddr = pci_resource_start(pdev, 0); - if (!ioaddr) { + if (!pci_resource_len(pdev, 0)) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("card has no PCI IO resources, aborting\n"); err = -ENODEV; @@ -1548,6 +1547,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) pr_err("architecture does not support 32bit PCI busmaster DMA\n"); goto err_disable_dev; } + + ioaddr = pci_resource_start(pdev, 0); if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("io address range already allocated\n"); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ba8321ec1ee7..3305979a9f7c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -180,9 +180,9 @@ #define XGBE_DMA_SYS_AWCR 0x30303030 /* DMA cache settings - PCI device */ -#define XGBE_DMA_PCI_ARCR 0x00000003 -#define XGBE_DMA_PCI_AWCR 0x13131313 -#define XGBE_DMA_PCI_AWARCR 0x00000313 +#define XGBE_DMA_PCI_ARCR 0x000f0f0f +#define XGBE_DMA_PCI_AWCR 0x0f0f0f0f +#define XGBE_DMA_PCI_AWARCR 0x00000f0f /* DMA channel interrupt modes */ #define XGBE_IRQ_MODE_EDGE 0 diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index f8a168b73307..cb88ffb8f12f 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -54,7 +54,7 @@ config B44_PCI config BCM4908_ENET tristate "Broadcom BCM4908 internal mac support" depends on ARCH_BCM4908 || COMPILE_TEST - default y + default y if ARCH_BCM4908 help This driver supports Ethernet controller integrated into Broadcom BCM4908 family SoCs. diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 98cf82dea3e4..65981931a798 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -172,6 +172,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet, err_free_buf_descs: dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr); + ring->cpu_addr = NULL; return -ENOMEM; } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 15362d016a87..0f6a6cb7e98d 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3239,6 +3239,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) bool cmp_b = false; bool cmp_c = false; + if (!macb_is_gem(bp)) + return; + tp4sp_v = &(fs->h_u.tcp_ip4_spec); tp4sp_m = &(fs->m_u.tcp_ip4_spec); @@ -3607,6 +3610,7 @@ static void macb_restore_features(struct macb *bp) { struct net_device *netdev = bp->dev; netdev_features_t features = netdev->features; + struct ethtool_rx_fs_item *item; /* TX checksum offload */ macb_set_txcsum_feature(bp, features); @@ -3615,6 +3619,9 @@ static void macb_restore_features(struct macb *bp) macb_set_rxcsum_feature(bp, features); /* RX Flow Filters */ + list_for_each_entry(item, &bp->rx_fs_list.list, list) + gem_prog_cmp_regs(bp, &item->fs); + macb_set_rxflow_feature(bp, features); } @@ -3911,6 +3918,7 @@ static int macb_init(struct platform_device *pdev) reg = gem_readl(bp, DCFG8); bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), GEM_BFEXT(T2SCR, reg)); + INIT_LIST_HEAD(&bp->rx_fs_list.list); if (bp->max_tuples > 0) { /* also needs one ethtype match to check IPv4 */ if (GEM_BFEXT(SCR2ETH, reg) > 0) { @@ -3921,7 +3929,6 @@ static int macb_init(struct platform_device *pdev) /* Filtering is supported in hw but don't enable it in kernel now */ dev->hw_features |= NETIF_F_NTUPLE; /* init Rx flow definitions */ - INIT_LIST_HEAD(&bp->rx_fs_list.list); bp->rx_fs_list.count = 0; spin_lock_init(&bp->rx_fs_lock); } else diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h index b248966837b4..7aad40b2aa73 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h @@ -412,7 +412,7 @@ | CN6XXX_INTR_M0UNWI_ERR \ | CN6XXX_INTR_M1UPB0_ERR \ | CN6XXX_INTR_M1UPWI_ERR \ - | CN6XXX_INTR_M1UPB0_ERR \ + | CN6XXX_INTR_M1UNB0_ERR \ | CN6XXX_INTR_M1UNWI_ERR \ | CN6XXX_INTR_INSTR_DB_OF_ERR \ | CN6XXX_INTR_SLIST_DB_OF_ERR \ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 6c85a10f465c..23a2ebdfd503 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1794,11 +1794,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer temp_buff = { 0 }; struct sge_qbase_reg_field *sge_qbase; struct ireg_buf *ch_sge_dbg; + u8 padap_running = 0; int i, rc; + u32 size; - rc = cudbg_get_buff(pdbg_init, dbg_buff, - sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase), - &temp_buff); + /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can + * lead to SGE missing doorbells under heavy traffic. So, only + * collect them when adapter is idle. + */ + for_each_port(padap, i) { + padap_running = netif_running(padap->port[i]); + if (padap_running) + break; + } + + size = sizeof(*ch_sge_dbg) * 2; + if (!padap_running) + size += sizeof(*sge_qbase); + + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -1820,7 +1834,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, ch_sge_dbg++; } - if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { + if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 && + !padap_running) { sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg; /* 1 addr reg SGE_QBASE_INDEX and 4 data reg * SGE_QBASE_MAP[0-3] diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 98829e482bfa..80882cfc370f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2090,7 +2090,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, - 0x11fc, 0x1274, + 0x11fc, 0x123c, + 0x1254, 0x1274, 0x1280, 0x133c, 0x1800, 0x18fc, 0x3000, 0x302c, diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 169e10c91378..a3f5b80888e5 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -350,18 +350,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word, } /* - * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE - * @tx_info - driver specific tls info. - * return: NET_TX_OK/NET_XMIT_DROP. - */ -static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info) -{ - return chcr_set_tcb_field(tx_info, TCB_T_STATE_W, - TCB_T_STATE_V(TCB_T_STATE_M), - CHCR_TCB_STATE_CLOSED, 1); -} - -/* * chcr_ktls_dev_del: call back for tls_dev_del. * Remove the tid and l2t entry and close the connection. * it per connection basis. @@ -395,8 +383,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev, /* clear tid */ if (tx_info->tid != -1) { - /* clear tcb state and then release tid */ - chcr_ktls_mark_tcb_close(tx_info); cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, tx_info->tid, tx_info->ip_family); } @@ -574,7 +560,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, return 0; free_tid: - chcr_ktls_mark_tcb_close(tx_info); #if IS_ENABLED(CONFIG_IPV6) /* clear clip entry */ if (tx_info->ip_family == AF_INET6) @@ -672,10 +657,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, if (tx_info->pending_close) { spin_unlock(&tx_info->lock); if (!status) { - /* it's a late success, tcb status is established, - * mark it close. - */ - chcr_ktls_mark_tcb_close(tx_info); cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, tid, tx_info->ip_family); } @@ -722,7 +703,7 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input) kvfree(tx_info); return 0; } - tx_info->open_state = false; + tx_info->open_state = CH_KTLS_OPEN_SUCCESS; spin_unlock(&tx_info->lock); complete(&tx_info->completion); @@ -1664,54 +1645,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb, } /* - * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid - * sending the same segment again. It will discard the segment which is before - * the current tx max. - * @tx_info - driver specific tls info. - * @q - TX queue. - * return: NET_TX_OK/NET_XMIT_DROP. - */ -static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info, - struct sge_eth_txq *q) -{ - struct fw_ulptx_wr *wr; - unsigned int ndesc; - int credits; - void *pos; - u32 len; - - len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16); - ndesc = DIV_ROUND_UP(len, 64); - - credits = chcr_txq_avail(&q->q) - ndesc; - if (unlikely(credits < 0)) { - chcr_eth_txq_stop(q); - return NETDEV_TX_BUSY; - } - - pos = &q->q.desc[q->q.pidx]; - - wr = pos; - /* ULPTX wr */ - wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); - wr->cookie = 0; - /* fill len in wr field */ - wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16))); - - pos += sizeof(*wr); - - pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, - TCB_SND_UNA_RAW_W, - TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M), - TCB_SND_UNA_RAW_V(0), 0); - - chcr_txq_advance(&q->q, ndesc); - cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc); - - return 0; -} - -/* * chcr_end_part_handler: This handler will handle the record which * is complete or if record's end part is received. T6 adapter has a issue that * it can't send out TAG with partial record so if its an end part then we have @@ -1735,7 +1668,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, struct sge_eth_txq *q, u32 skb_offset, u32 tls_end_offset, bool last_wr) { + bool free_skb_if_tx_fails = false; struct sk_buff *nskb = NULL; + /* check if it is a complete record */ if (tls_end_offset == record->len) { nskb = skb; @@ -1758,6 +1693,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, if (last_wr) dev_kfree_skb_any(skb); + else + free_skb_if_tx_fails = true; last_wr = true; @@ -1769,6 +1706,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, record->num_frags, (last_wr && tcp_push_no_fin), mss)) { + if (free_skb_if_tx_fails) + dev_kfree_skb_any(skb); goto out; } tx_info->prev_seq = record->end_seq; @@ -1905,11 +1844,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info, /* reset tcp_seq as per the prior_data_required len */ tcp_seq -= prior_data_len; } - /* reset snd una, so the middle record won't send the already - * sent part. - */ - if (chcr_ktls_update_snd_una(tx_info, q)) - goto out; atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts); } else { atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts); @@ -2010,12 +1944,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) * we will send the complete record again. */ + spin_lock_irqsave(&tx_ctx->base.lock, flags); + do { - int i; cxgb4_reclaim_completed_tx(adap, &q->q, true); - /* lock taken */ - spin_lock_irqsave(&tx_ctx->base.lock, flags); /* fetch the tls record */ record = tls_get_record(&tx_ctx->base, tcp_seq, &tx_info->record_no); @@ -2074,11 +2007,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) tls_end_offset, skb_offset, 0); - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); if (ret) { /* free the refcount taken earlier */ if (tls_end_offset < data_len) dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&tx_ctx->base.lock, flags); goto out; } @@ -2088,16 +2021,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) continue; } - /* increase page reference count of the record, so that there - * won't be any chance of page free in middle if in case stack - * receives ACK and try to delete the record. - */ - for (i = 0; i < record->num_frags; i++) - __skb_frag_ref(&record->frags[i]); - /* lock cleared */ - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); - - /* if a tls record is finishing in this SKB */ if (tls_end_offset <= data_len) { ret = chcr_end_part_handler(tx_info, skb, record, @@ -2122,13 +2045,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) data_len = 0; } - /* clear the frag ref count which increased locally before */ - for (i = 0; i < record->num_frags; i++) { - /* clear the frag ref count */ - __skb_frag_unref(&record->frags[i]); - } /* if any failure, come out from the loop. */ if (ret) { + spin_unlock_irqrestore(&tx_ctx->base.lock, flags); if (th->fin) dev_kfree_skb_any(skb); @@ -2143,6 +2062,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) } while (data_len > 0); + spin_unlock_irqrestore(&tx_ctx->base.lock, flags); atomic64_inc(&port_stats->ktls_tx_encrypted_packets); atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 252adfa5d837..8a9096aa85cd 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1471,8 +1471,10 @@ dm9000_probe(struct platform_device *pdev) /* Init network device */ ndev = alloc_etherdev(sizeof(struct board_info)); - if (!ndev) - return -ENOMEM; + if (!ndev) { + ret = -ENOMEM; + goto out_regulator_disable; + } SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index e3a8858915b3..df0eab479d51 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -963,7 +963,7 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue) unsigned long flag; netif_stop_queue(dev); - tasklet_disable(&np->tx_tasklet); + tasklet_disable_in_atomic(&np->tx_tasklet); iowrite16(0, ioaddr + IntrEnable); printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x " "TxFrameId %2.2x," diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 88bfe2107938..04421aec2dfd 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1337,6 +1337,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget) */ if (unlikely(priv->need_mac_restart)) { ftgmac100_start_hw(priv); + priv->need_mac_restart = false; /* Re-enable "bad" interrupts */ iowrite32(FTGMAC100_INT_BAD, diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 1cf8ef717453..3ec4d9fddd52 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -363,7 +363,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, static int gfar_set_mac_addr(struct net_device *dev, void *p) { - eth_mac_addr(dev, p); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; gfar_set_mac_for_addr(dev, 0, dev->dev_addr); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index e3f81c7e0ce7..b0dbe6dcaa7b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3966,7 +3966,6 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) * normalcy is to reset. * 2. A new reset request from the stack due to timeout * - * For the first case,error event might not have ae handle available. * check if this is a new reset request and we are not here just because * last reset attempt did not succeed and watchdog hit us again. We will * know this if last reset request did not occur very recently (watchdog @@ -3976,14 +3975,14 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) * want to make sure we throttle the reset request. Therefore, we will * not allow it again before 3*HZ times. */ - if (!handle) - handle = &hdev->vport[0].nic; if (time_before(jiffies, (hdev->last_reset_time + HCLGE_RESET_INTERVAL))) { mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); return; - } else if (hdev->default_reset_request) { + } + + if (hdev->default_reset_request) { hdev->reset_level = hclge_get_reset_level(ae_dev, &hdev->default_reset_request); @@ -11211,7 +11210,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, if (ret) return ret; - /* RSS indirection table has been configuared by user */ + /* RSS indirection table has been configured by user */ if (rxfh_configured) goto out; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 700e068764c8..e295d359e912 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2193,7 +2193,7 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) if (test_and_clear_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) { - /* PF has initmated that it is about to reset the hardware. + /* PF has intimated that it is about to reset the hardware. * We now have to poll & check if hardware has actually * completed the reset sequence. On hardware reset completion, * VF needs to reset the client and ae device. @@ -2624,14 +2624,14 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); + hclgevf_reset_tqp_stats(handle); hclgevf_request_link_info(hdev); hclgevf_update_link_mode(hdev); - clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); - return 0; } @@ -3497,7 +3497,7 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, if (ret) return ret; - /* RSS indirection table has been configuared by user */ + /* RSS indirection table has been configured by user */ if (rxfh_configured) goto out; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 9c6438d3b3a5..ffb2a91750c7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1149,19 +1149,13 @@ static int __ibmvnic_open(struct net_device *netdev) rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); if (rc) { - for (i = 0; i < adapter->req_rx_queues; i++) - napi_disable(&adapter->napi[i]); + ibmvnic_napi_disable(adapter); release_resources(adapter); return rc; } netif_tx_start_all_queues(netdev); - if (prev_state == VNIC_CLOSED) { - for (i = 0; i < adapter->req_rx_queues; i++) - napi_schedule(&adapter->napi[i]); - } - adapter->state = VNIC_OPEN; return rc; } @@ -1922,7 +1916,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, u64 old_num_rx_queues, old_num_tx_queues; u64 old_num_rx_slots, old_num_tx_slots; struct net_device *netdev = adapter->netdev; - int i, rc; + int rc; netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset reason %d, reset_state %d\n", @@ -2111,10 +2105,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, /* refresh device's multicast list */ ibmvnic_set_multi(netdev); - /* kick napi */ - for (i = 0; i < adapter->req_rx_queues; i++) - napi_schedule(&adapter->napi[i]); - if (adapter->reset_reason == VNIC_RESET_FAILOVER || adapter->reset_reason == VNIC_RESET_MOBILITY) __netdev_notify_peers(netdev); @@ -3204,9 +3194,6 @@ restart_loop: next = ibmvnic_next_scrq(adapter, scrq); for (i = 0; i < next->tx_comp.num_comps; i++) { - if (next->tx_comp.rcs[i]) - dev_err(dev, "tx error %x\n", - next->tx_comp.rcs[i]); index = be32_to_cpu(next->tx_comp.correlators[i]); if (index & IBMVNIC_TSO_POOL_MASK) { tx_pool = &adapter->tso_pool[pool]; @@ -3220,7 +3207,13 @@ restart_loop: num_entries += txbuff->num_entries; if (txbuff->skb) { total_bytes += txbuff->skb->len; - dev_consume_skb_irq(txbuff->skb); + if (next->tx_comp.rcs[i]) { + dev_err(dev, "tx error %x\n", + next->tx_comp.rcs[i]); + dev_kfree_skb_irq(txbuff->skb); + } else { + dev_consume_skb_irq(txbuff->skb); + } txbuff->skb = NULL; } else { netdev_warn(adapter->netdev, diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 88faf05e23ba..0b1e890dd583 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 69a2329ea463..db79c4e6413e 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 1999 - 2018 Intel Corporation. */ -#ifndef _E1000_HW_H_ -#define _E1000_HW_H_ +#ifndef _E1000E_HW_H_ +#define _E1000E_HW_H_ #include "regs.h" #include "defines.h" @@ -714,4 +714,4 @@ struct e1000_hw { #include "80003es2lan.h" #include "ich8lan.h" -#endif +#endif /* _E1000E_HW_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index e9b82c209c2d..a0948002ddf8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5974,15 +5974,19 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter; adapter = container_of(work, struct e1000_adapter, reset_task); + rtnl_lock(); /* don't run the task if already down */ - if (test_bit(__E1000_DOWN, &adapter->state)) + if (test_bit(__E1000_DOWN, &adapter->state)) { + rtnl_unlock(); return; + } if (!(adapter->flags & FLAG_RESTART_NOW)) { e1000e_dump(adapter); e_err("Reset adapter unexpectedly\n"); } e1000e_reinit_locked(adapter); + rtnl_unlock(); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index cd53981fa5e0..15f93b355099 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -142,6 +142,7 @@ enum i40e_state_t { __I40E_VIRTCHNL_OP_PENDING, __I40E_RECOVERY_MODE, __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */ + __I40E_VFS_RELEASING, /* This must be last as it determines the size of the BITMAP */ __I40E_STATE_SIZE__, }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index d7c13ca9be7d..d627b59ad446 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -578,6 +578,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, case RING_TYPE_XDP: ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL); break; + default: + ring = NULL; + break; } if (!ring) return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c70dec65a572..0e92668012e3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -232,6 +232,8 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], I40E_STAT(struct i40e_vsi, _name, _stat) #define I40E_VEB_STAT(_name, _stat) \ I40E_STAT(struct i40e_veb, _name, _stat) +#define I40E_VEB_TC_STAT(_name, _stat) \ + I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat) #define I40E_PFC_STAT(_name, _stat) \ I40E_STAT(struct i40e_pfc_stats, _name, _stat) #define I40E_QUEUE_STAT(_name, _stat) \ @@ -266,11 +268,18 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = { I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol), }; +struct i40e_cp_veb_tc_stats { + u64 tc_rx_packets; + u64 tc_rx_bytes; + u64 tc_tx_packets; + u64 tc_tx_bytes; +}; + static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = { - I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets), - I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes), - I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets), - I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes), + I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets), + I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes), + I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets), + I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes), }; static const struct i40e_stats i40e_gstrings_misc_stats[] = { @@ -1101,6 +1110,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev, /* Set flow control settings */ ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause); switch (hw->fc.requested_mode) { case I40E_FC_FULL: @@ -2217,6 +2227,29 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } /** + * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure + * @tc: the TC statistics in VEB structure (veb->tc_stats) + * @i: the index of traffic class in (veb->tc_stats) structure to copy + * + * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to + * one dimensional structure i40e_cp_veb_tc_stats. + * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC + * statistics for the given TC. + **/ +static struct i40e_cp_veb_tc_stats +i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i) +{ + struct i40e_cp_veb_tc_stats veb_tc = { + .tc_rx_packets = tc->tc_rx_packets[i], + .tc_rx_bytes = tc->tc_rx_bytes[i], + .tc_tx_packets = tc->tc_tx_packets[i], + .tc_tx_bytes = tc->tc_tx_bytes[i], + }; + + return veb_tc; +} + +/** * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure * @pf: the PF device structure * @i: the priority value to copy @@ -2300,8 +2333,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i40e_gstrings_veb_stats); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) - i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL, - i40e_gstrings_veb_tc_stats); + if (veb_stats) { + struct i40e_cp_veb_tc_stats veb_tc = + i40e_get_veb_tc_stats(&veb->tc_stats, i); + + i40e_add_ethtool_stats(&data, &veb_tc, + i40e_gstrings_veb_tc_stats); + } else { + i40e_add_ethtool_stats(&data, NULL, + i40e_gstrings_veb_tc_stats); + } i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats); @@ -5439,7 +5480,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev, status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - true, addr, offset, &value, NULL); + addr, true, offset, &value, NULL); if (status) return -EIO; data[i] = value; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 353deae139f9..527023ee4c07 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2560,8 +2560,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { - dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n", - vsi->netdev->name, + dev_info(&pf->pdev->dev, "%s allmulti mode.\n", cur_multipromisc ? "entering" : "leaving"); } } @@ -3259,6 +3258,17 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) } /** + * i40e_rx_offset - Return expected offset into page to access data + * @rx_ring: Ring we are requesting offset of + * + * Returns the offset value for ring into the data buffer. + */ +static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; +} + +/** * i40e_configure_rx_ring - Configure a receive ring context * @ring: The Rx ring to configure * @@ -3369,6 +3379,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) else set_ring_build_skb_enabled(ring); + ring->rx_offset = i40e_rx_offset(ring); + /* cache tail for quicker writes, and clear the reg before use */ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); @@ -6725,9 +6737,9 @@ out: set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); } - /* registers are set, lets apply */ - if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) - ret = i40e_hw_set_dcb_config(pf, new_cfg); + /* registers are set, lets apply */ + if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) + ret = i40e_hw_set_dcb_config(pf, new_cfg); } err: @@ -10560,12 +10572,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) goto end_core_reset; } - if (!lock_acquired) - rtnl_lock(); - ret = i40e_setup_pf_switch(pf, reinit); - if (ret) - goto end_unlock; - #ifdef CONFIG_I40E_DCB /* Enable FW to write a default DCB config on link-up * unless I40E_FLAG_TC_MQPRIO was enabled or DCB @@ -10580,7 +10586,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) i40e_aq_set_dcb_parameters(hw, false, NULL); dev_warn(&pf->pdev->dev, "DCB is not supported for X710-T*L 2.5/5G speeds\n"); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + pf->flags &= ~I40E_FLAG_DCB_CAPABLE; } else { i40e_aq_set_dcb_parameters(hw, true, NULL); ret = i40e_init_pf_dcb(pf); @@ -10594,6 +10600,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } #endif /* CONFIG_I40E_DCB */ + if (!lock_acquired) + rtnl_lock(); + ret = i40e_setup_pf_switch(pf, reinit); + if (ret) + goto end_unlock; /* The driver only wants link up/down and module qualification * reports from firmware. Note the negative logic. @@ -12346,6 +12357,7 @@ static int i40e_sw_init(struct i40e_pf *pf) { int err = 0; int size; + u16 pow; /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | @@ -12364,6 +12376,11 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); + + /* find the next higher power-of-2 of num cpus */ + pow = roundup_pow_of_two(num_online_cpus()); + pf->rss_size_max = min_t(int, pf->rss_size_max, pow); + if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; pf->alloc_rss_size = min_t(int, pf->rss_size_max, @@ -15127,12 +15144,16 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) * in order to register the netdev */ v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN); - if (v_idx < 0) + if (v_idx < 0) { + err = v_idx; goto err_switch_setup; + } pf->lan_vsi = v_idx; vsi = pf->vsi[v_idx]; - if (!vsi) + if (!vsi) { + err = -EFAULT; goto err_switch_setup; + } vsi->alloc_queue_pairs = 1; err = i40e_config_netdev(vsi); if (err) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 627794b31e33..06b4271219b1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1570,17 +1570,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) } /** - * i40e_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) -{ - return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; -} - -/** * i40e_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -1608,7 +1597,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - rx_ring->rx_offset = i40e_rx_offset(rx_ring); /* XDP RX-queue info only needed for RX rings exposed to XDP */ if (rx_ring->vsi->type == I40E_VSI_MAIN) { @@ -2307,8 +2295,7 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) * @rx_ring: Rx ring being processed * @xdp: XDP buffer containing the frame **/ -static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, - struct xdp_buff *xdp) +static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; @@ -2347,7 +2334,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, } xdp_out: rcu_read_unlock(); - return ERR_PTR(-result); + return result; } /** @@ -2460,6 +2447,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) unsigned int xdp_xmit = 0; bool failure = false; struct xdp_buff xdp; + int xdp_res = 0; #if (PAGE_SIZE < 8192) frame_sz = i40e_rx_frame_truesize(rx_ring, 0); @@ -2525,12 +2513,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size); #endif - skb = i40e_run_xdp(rx_ring, &xdp); + xdp_res = i40e_run_xdp(rx_ring, &xdp); } - if (IS_ERR(skb)) { - unsigned int xdp_res = -PTR_ERR(skb); - + if (xdp_res) { if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { xdp_xmit |= xdp_res; i40e_rx_buffer_flip(rx_ring, rx_buffer, size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 1b6ec9be155a..5d301a466f5c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) **/ static inline void i40e_vc_disable_vf(struct i40e_vf *vf) { + struct i40e_pf *pf = vf->pf; int i; i40e_vc_notify_vf_reset(vf); @@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf) * ensure a reset. */ for (i = 0; i < 20; i++) { + /* If PF is in VFs releasing state reset VF is impossible, + * so leave it. + */ + if (test_bit(__I40E_VFS_RELEASING, pf->state)) + return; if (i40e_reset_vf(vf, false)) return; usleep_range(10000, 20000); @@ -1574,6 +1580,8 @@ void i40e_free_vfs(struct i40e_pf *pf) if (!pf->vf) return; + + set_bit(__I40E_VFS_RELEASING, pf->state); while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) usleep_range(1000, 2000); @@ -1631,6 +1639,7 @@ void i40e_free_vfs(struct i40e_pf *pf) } } clear_bit(__I40E_VF_DISABLE, pf->state); + clear_bit(__I40E_VFS_RELEASING, pf->state); } #ifdef CONFIG_PCI_IOV diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index fc32c5019b0f..12ca84113587 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -471,7 +471,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget); if (!nb_pkts) - return false; + return true; if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { nb_processed = xdp_ring->count - xdp_ring->next_to_use; @@ -488,7 +488,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes); - return true; + return nb_pkts < budget; } /** diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 357706444dd5..17101c45cbcd 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -196,7 +196,6 @@ enum ice_state { __ICE_NEEDS_RESTART, __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ __ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ - __ICE_DCBNL_DEVRESET, /* set by dcbnl devreset */ __ICE_PFR_REQ, /* set by driver and peers */ __ICE_CORER_REQ, /* set by driver and peers */ __ICE_GLOBR_REQ, /* set by driver and peers */ @@ -624,7 +623,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); const char *ice_stat_str(enum ice_status stat_err); const char *ice_aq_str(enum ice_aq_err aq_err); -bool ice_is_wol_supported(struct ice_pf *pf); +bool ice_is_wol_supported(struct ice_hw *hw); int ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, bool is_tun); @@ -642,6 +641,7 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf); int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, struct ice_rq_event_info *event); int ice_open(struct net_device *netdev); +int ice_open_internal(struct net_device *netdev); int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 3124a3bf519a..1148d768f8ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -275,6 +275,22 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) } /** + * ice_rx_offset - Return expected offset into page to access data + * @rx_ring: Ring we are requesting offset of + * + * Returns the offset value for ring into the data buffer. + */ +static unsigned int ice_rx_offset(struct ice_ring *rx_ring) +{ + if (ice_ring_uses_build_skb(rx_ring)) + return ICE_SKB_PAD; + else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +/** * ice_setup_rx_ctx - Configure a receive ring context * @ring: The Rx ring to configure * @@ -413,11 +429,15 @@ int ice_setup_rx_ctx(struct ice_ring *ring) else ice_set_ring_build_skb_ena(ring); + ring->rx_offset = ice_rx_offset(ring); + /* init queue specific tail register */ ring->tail = hw->hw_addr + QRX_TAIL(pf_q); writel(0, ring->tail); if (ring->xsk_pool) { + bool ok; + if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", num_bufs, ring->q_index); @@ -426,8 +446,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring) return 0; } - err = ice_alloc_rx_bufs_zc(ring, num_bufs); - if (err) + ok = ice_alloc_rx_bufs_zc(ring, num_bufs); + if (!ok) dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n", ring->q_index, pf_q); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 3d9475e222cd..a20edf1538a0 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -717,8 +717,8 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) if (!data) { data = devm_kcalloc(ice_hw_to_dev(hw), - sizeof(*data), ICE_AQC_FW_LOG_ID_MAX, + sizeof(*data), GFP_KERNEL); if (!data) return ICE_ERR_NO_MEMORY; diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index faaa08e8171b..68866f4f0eb0 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -31,8 +31,8 @@ enum ice_ctl_q { ICE_CTL_Q_MAILBOX, }; -/* Control Queue timeout settings - max delay 250ms */ -#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */ +/* Control Queue timeout settings - max delay 1s */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */ #define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ #define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index e42727941ef5..28e834a128c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -738,22 +738,27 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, /** * ice_cee_to_dcb_cfg * @cee_cfg: pointer to CEE configuration struct - * @dcbcfg: DCB configuration struct + * @pi: port information structure * * Convert CEE configuration from firmware to DCB configuration */ static void ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, - struct ice_dcbx_cfg *dcbcfg) + struct ice_port_info *pi) { u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); - u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift; - u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j; u8 i, err, sync, oper, app_index, ice_app_sel_type; + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift; + struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg; u16 ice_app_prot_id_type; - /* CEE PG data to ETS config */ + dcbcfg = &pi->qos_cfg.local_dcbx_cfg; + dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE; + dcbcfg->tlv_status = tlv_status; + + /* CEE PG data */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; /* Note that the FW creates the oper_prio_tc nibbles reversed @@ -780,10 +785,16 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, } } - /* CEE PFC data to ETS config */ + /* CEE PFC data */ dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS; + /* CEE APP TLV data */ + if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) + cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg; + else + cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg; + app_index = 0; for (i = 0; i < 3; i++) { if (i == 0) { @@ -802,6 +813,18 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S; ice_app_sel_type = ICE_APP_SEL_TCPIP; ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI; + + for (j = 0; j < cmp_dcbcfg->numapps; j++) { + u16 prot_id = cmp_dcbcfg->app[j].prot_id; + u8 sel = cmp_dcbcfg->app[j].selector; + + if (sel == ICE_APP_SEL_TCPIP && + (prot_id == ICE_APP_PROT_ID_ISCSI || + prot_id == ICE_APP_PROT_ID_ISCSI_860)) { + ice_app_prot_id_type = prot_id; + break; + } + } } else { /* FIP APP */ ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M; @@ -892,11 +915,8 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); if (!ret) { /* CEE mode */ - dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; - dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE; - dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status); - ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg); ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE); + ice_cee_to_dcb_cfg(&cee_cfg, pi); } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) { /* CEE mode not enabled try querying IEEE data */ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 468a63f7eff9..4180f1f35fb8 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -18,12 +18,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev) while (ice_is_reset_in_progress(pf->state)) usleep_range(1000, 2000); - set_bit(__ICE_DCBNL_DEVRESET, pf->state); dev_close(netdev); netdev_state_change(netdev); dev_open(netdev, NULL); netdev_state_change(netdev); - clear_bit(__ICE_DCBNL_DEVRESET, pf->state); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 2dcfa685b763..32ba71a16165 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3472,7 +3472,7 @@ static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n"); /* Get WoL settings based on the HW capability */ - if (ice_is_wol_supported(pf)) { + if (ice_is_wol_supported(&pf->hw)) { wol->supported = WAKE_MAGIC; wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0; } else { @@ -3492,7 +3492,7 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf)) + if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw)) return -EOPNOTSUPP; /* only magic packet is supported */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 8d4e2ad4328d..d13c7fc8fb0a 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2620,7 +2620,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) if (!locked) rtnl_lock(); - err = ice_open(vsi->netdev); + err = ice_open_internal(vsi->netdev); if (!locked) rtnl_unlock(); @@ -2649,7 +2649,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) if (!locked) rtnl_lock(); - ice_stop(vsi->netdev); + ice_vsi_close(vsi); if (!locked) rtnl_unlock(); @@ -3078,7 +3078,6 @@ err_vsi: bool ice_is_reset_in_progress(unsigned long *state) { return test_bit(__ICE_RESET_OICR_RECV, state) || - test_bit(__ICE_DCBNL_DEVRESET, state) || test_bit(__ICE_PFR_REQ, state) || test_bit(__ICE_CORER_REQ, state) || test_bit(__ICE_GLOBR_REQ, state); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2c23c8f468a5..d821c687f239 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3537,15 +3537,14 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) } /** - * ice_is_wol_supported - get NVM state of WoL - * @pf: board private structure + * ice_is_wol_supported - check if WoL is supported + * @hw: pointer to hardware info * * Check if WoL is supported based on the HW configuration. * Returns true if NVM supports and enables WoL for this port, false otherwise */ -bool ice_is_wol_supported(struct ice_pf *pf) +bool ice_is_wol_supported(struct ice_hw *hw) { - struct ice_hw *hw = &pf->hw; u16 wol_ctrl; /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control @@ -3554,7 +3553,7 @@ bool ice_is_wol_supported(struct ice_pf *pf) if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) return false; - return !(BIT(hw->pf_id) & wol_ctrl); + return !(BIT(hw->port_info->lport) & wol_ctrl); } /** @@ -4192,28 +4191,25 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_send_version_unroll; } + /* not a fatal error if this fails */ err = ice_init_nvm_phy_type(pf->hw.port_info); - if (err) { + if (err) dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); - goto err_send_version_unroll; - } + /* not a fatal error if this fails */ err = ice_update_link_info(pf->hw.port_info); - if (err) { + if (err) dev_err(dev, "ice_update_link_info failed: %d\n", err); - goto err_send_version_unroll; - } ice_init_link_dflt_override(pf->hw.port_info); /* if media available, initialize PHY settings */ if (pf->hw.port_info->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { + /* not a fatal error if this fails */ err = ice_init_phy_user_cfg(pf->hw.port_info); - if (err) { + if (err) dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); - goto err_send_version_unroll; - } if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { struct ice_vsi *vsi = ice_get_main_vsi(pf); @@ -4568,6 +4564,7 @@ static int __maybe_unused ice_suspend(struct device *dev) continue; ice_vsi_free_q_vectors(pf->vsi[v]); } + ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); ice_clear_interrupt_scheme(pf); pci_save_state(pdev); @@ -6637,6 +6634,28 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) int ice_open(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + + if (ice_is_reset_in_progress(pf->state)) { + netdev_err(netdev, "can't open net device while reset is in progress"); + return -EBUSY; + } + + return ice_open_internal(netdev); +} + +/** + * ice_open_internal - Called when a network interface becomes active + * @netdev: network interface device structure + * + * Internal ice_open implementation. Should not be used directly except for ice_open and reset + * handling routine + * + * Returns 0 on success, negative value on failure + */ +int ice_open_internal(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_port_info *pi; @@ -6715,6 +6734,12 @@ int ice_stop(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + + if (ice_is_reset_in_progress(pf->state)) { + netdev_err(netdev, "can't stop net device while reset is in progress"); + return -EBUSY; + } ice_vsi_close(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 67c965a3f5d2..834cbd3f7b31 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -1238,6 +1238,9 @@ ice_add_update_vsi_list(struct ice_hw *hw, ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, vsi_list_id); + if (!m_entry->vsi_list_info) + return ICE_ERR_NO_MEMORY; + /* If this entry was large action then the large action needs * to be updated to point to FWD to VSI list */ @@ -2220,6 +2223,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && fm_entry->fltr_info.vsi_handle == vsi_handle) || (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && + fm_entry->vsi_list_info && (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); } @@ -2292,14 +2296,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, return ICE_ERR_PARAM; list_for_each_entry(fm_entry, lkup_list_head, list_entry) { - struct ice_fltr_info *fi; - - fi = &fm_entry->fltr_info; - if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) + if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) continue; status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, - vsi_list_head, fi); + vsi_list_head, + &fm_entry->fltr_info); if (status) return status; } @@ -2622,7 +2624,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, &remove_list_head); mutex_unlock(rule_lock); if (status) - return; + goto free_fltr_list; switch (lkup) { case ICE_SW_LKUP_MAC: @@ -2645,6 +2647,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, break; } +free_fltr_list: list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { list_del(&fm_entry->list_entry); devm_kfree(ice_hw_to_dev(hw), fm_entry); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index b7dc25da1202..b91dcfd12727 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -444,22 +444,6 @@ void ice_free_rx_ring(struct ice_ring *rx_ring) } /** - * ice_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static unsigned int ice_rx_offset(struct ice_ring *rx_ring) -{ - if (ice_ring_uses_build_skb(rx_ring)) - return ICE_SKB_PAD; - else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) - return XDP_PACKET_HEADROOM; - - return 0; -} - -/** * ice_setup_rx_ring - Allocate the Rx descriptors * @rx_ring: the Rx ring to set up * @@ -493,7 +477,6 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) rx_ring->next_to_use = 0; rx_ring->next_to_clean = 0; - rx_ring->rx_offset = ice_rx_offset(rx_ring); if (ice_is_xdp_ena_vsi(rx_ring->vsi)) WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index a6cb0c35748c..266036b7a49a 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -535,6 +535,7 @@ struct ice_dcb_app_priority_table { #define ICE_TLV_STATUS_ERR 0x4 #define ICE_APP_PROT_ID_FCOE 0x8906 #define ICE_APP_PROT_ID_ISCSI 0x0cbc +#define ICE_APP_PROT_ID_ISCSI_860 0x035c #define ICE_APP_PROT_ID_FIP 0x8914 #define ICE_APP_SEL_ETHTYPE 0x1 #define ICE_APP_SEL_TCPIP 0x2 diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 83f3c9574ed1..9f94d9159acd 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -358,18 +358,18 @@ xsk_pool_if_up: * This function allocates a number of Rx buffers from the fill ring * or the internal recycle mechanism and places them on the Rx ring. * - * Returns false if all allocations were successful, true if any fail. + * Returns true if all allocations were successful, false if any fail. */ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) { union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; struct ice_rx_buf *rx_buf; - bool ret = false; + bool ok = true; dma_addr_t dma; if (!count) - return false; + return true; rx_desc = ICE_RX_DESC(rx_ring, ntu); rx_buf = &rx_ring->rx_buf[ntu]; @@ -377,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) do { rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool); if (!rx_buf->xdp) { - ret = true; + ok = false; break; } @@ -402,7 +402,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) ice_release_rx_desc(rx_ring, ntu); } - return ret; + return ok; } /** diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 5d87957b2627..44111f65afc7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2007 - 2018 Intel Corporation. */ -#ifndef _E1000_HW_H_ -#define _E1000_HW_H_ +#ifndef _E1000_IGB_HW_H_ +#define _E1000_IGB_HW_H_ #include <linux/types.h> #include <linux/delay.h> @@ -551,4 +551,4 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); -#endif /* _E1000_HW_H_ */ +#endif /* _E1000_IGB_HW_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index aaa954aae574..7bda8c5edea5 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -748,8 +748,8 @@ void igb_ptp_suspend(struct igb_adapter *adapter); void igb_ptp_rx_hang(struct igb_adapter *adapter); void igb_ptp_tx_hang(struct igb_adapter *adapter); void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); -void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, - struct sk_buff *skb); +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + struct sk_buff *skb); int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 878b31d534ec..a45cd2b416c8 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -8214,7 +8214,8 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, new_buff->pagecnt_bias = old_buff->pagecnt_bias; } -static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, + int rx_buf_pgcnt) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; @@ -8225,7 +8226,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) return false; #else #define IGB_LAST_OFFSET \ @@ -8301,9 +8302,10 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, return NULL; if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb); - xdp->data += IGB_TS_HDR_LEN; - size -= IGB_TS_HDR_LEN; + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) { + xdp->data += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + } } /* Determine available headroom for copy */ @@ -8364,8 +8366,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, /* pull timestamp out of packet data */ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); - __skb_pull(skb, IGB_TS_HDR_LEN); + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb)) + __skb_pull(skb, IGB_TS_HDR_LEN); } /* update buffer offset */ @@ -8614,11 +8616,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring) } static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, - const unsigned int size) + const unsigned int size, int *rx_buf_pgcnt) { struct igb_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buf_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif prefetchw(rx_buffer->page); /* we are reusing so sync this buffer for CPU use */ @@ -8634,9 +8642,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, } static void igb_put_rx_buffer(struct igb_ring *rx_ring, - struct igb_rx_buffer *rx_buffer) + struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt) { - if (igb_can_reuse_rx_page(rx_buffer)) { + if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) { /* hand second half of page back to the ring */ igb_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -8664,6 +8672,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) unsigned int xdp_xmit = 0; struct xdp_buff xdp; u32 frame_sz = 0; + int rx_buf_pgcnt; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) @@ -8693,7 +8702,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) */ dma_rmb(); - rx_buffer = igb_get_rx_buffer(rx_ring, size); + rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); /* retrieve a buffer from the ring */ if (!skb) { @@ -8736,7 +8745,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) break; } - igb_put_rx_buffer(rx_ring, rx_buffer); + igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt); cleaned_count++; /* fetch next buffer in frame if non-eop */ diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 7cc5428c3b3d..86a576201f5f 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -856,6 +856,9 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) dev_kfree_skb_any(skb); } +#define IGB_RET_PTP_DISABLED 1 +#define IGB_RET_PTP_INVALID 2 + /** * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp * @q_vector: Pointer to interrupt specific structure @@ -864,19 +867,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) * * This function is meant to retrieve a timestamp from the first buffer of an * incoming frame. The value is stored in little endian format starting on - * byte 8. + * byte 8 + * + * Returns: 0 if success, nonzero if failure **/ -void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, - struct sk_buff *skb) +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + struct sk_buff *skb) { - __le64 *regval = (__le64 *)va; struct igb_adapter *adapter = q_vector->adapter; + __le64 *regval = (__le64 *)va; int adjust = 0; + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return IGB_RET_PTP_DISABLED; + /* The timestamp is recorded in little endian format. * DWORD: 0 1 2 3 * Field: Reserved Reserved SYSTIML SYSTIMH */ + + /* check reserved dwords are zero, be/le doesn't matter for zero */ + if (regval[0]) + return IGB_RET_PTP_INVALID; + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), le64_to_cpu(regval[1])); @@ -896,6 +909,8 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, } skb_hwtstamps(skb)->hwtstamp = ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + + return 0; } /** @@ -906,13 +921,15 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, * This function is meant to retrieve a timestamp from the internal registers * of the adapter and store it in the skb. **/ -void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, - struct sk_buff *skb) +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; - u64 regval; int adjust = 0; + u64 regval; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return; /* If this bit is set, then the RX registers contain the time stamp. No * other packet will be time stamped until we read these registers, so diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 5d2809dfd06a..1b08a7dc7bc4 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -547,7 +547,7 @@ void igc_ptp_init(struct igc_adapter *adapter); void igc_ptp_reset(struct igc_adapter *adapter); void igc_ptp_suspend(struct igc_adapter *adapter); void igc_ptp_stop(struct igc_adapter *adapter); -void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, +void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, struct sk_buff *skb); int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 824a6c454bca..8722294ab90c 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1711,6 +1711,9 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev, Autoneg); } + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + switch (hw->fc.requested_mode) { case igc_fc_full: ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); @@ -1725,9 +1728,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev, Asym_Pause); break; default: - ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Asym_Pause); + break; } status = pm_runtime_suspended(&adapter->pdev->dev) ? diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 7ac9597ddb84..4d989ebc9713 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3831,10 +3831,19 @@ static void igc_reset_task(struct work_struct *work) adapter = container_of(work, struct igc_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGC_DOWN, &adapter->state) || + test_bit(__IGC_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igc_rings_dump(adapter); igc_regs_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igc_reinit_locked(adapter); + rtnl_unlock(); } /** diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index ac0b9c85da7c..545f4d0e67cf 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -152,46 +152,54 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, } /** - * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer * @q_vector: Pointer to interrupt specific structure * @va: Pointer to address containing Rx buffer * @skb: Buffer containing timestamp and packet * - * This function is meant to retrieve the first timestamp from the - * first buffer of an incoming frame. The value is stored in little - * endian format starting on byte 0. There's a second timestamp - * starting on byte 8. - **/ -void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, + * This function retrieves the timestamp saved in the beginning of packet + * buffer. While two timestamps are available, one in timer0 reference and the + * other in timer1 reference, this function considers only the timestamp in + * timer0 reference. + */ +void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, struct sk_buff *skb) { struct igc_adapter *adapter = q_vector->adapter; - __le64 *regval = (__le64 *)va; - int adjust = 0; - - /* The timestamp is recorded in little endian format. - * DWORD: | 0 | 1 | 2 | 3 - * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High + u64 regval; + int adjust; + + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. */ - igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), - le64_to_cpu(regval[0])); - - /* adjust timestamp for the RX latency based on link speed */ - if (adapter->hw.mac.type == igc_i225) { - switch (adapter->link_speed) { - case SPEED_10: - adjust = IGC_I225_RX_LATENCY_10; - break; - case SPEED_100: - adjust = IGC_I225_RX_LATENCY_100; - break; - case SPEED_1000: - adjust = IGC_I225_RX_LATENCY_1000; - break; - case SPEED_2500: - adjust = IGC_I225_RX_LATENCY_2500; - break; - } + regval = le32_to_cpu(va[2]); + regval |= (u64)le32_to_cpu(va[3]) << 32; + igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + + /* Adjust timestamp for the RX latency based on link speed */ + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_RX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_RX_LATENCY_2500; + break; + default: + adjust = 0; + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); + break; } skb_hwtstamps(skb)->hwtstamp = ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9f3f12e2ccf2..cffb95f8f632 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4118,6 +4118,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, #endif } + ring->rx_offset = ixgbe_rx_offset(ring); + if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); @@ -6534,6 +6536,13 @@ err_setup_tx: return err; } +static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring) +{ + struct ixgbe_q_vector *q_vector = rx_ring->q_vector; + + return q_vector ? q_vector->napi.napi_id : 0; +} + /** * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: pointer to ixgbe_adapter @@ -6578,11 +6587,10 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - rx_ring->rx_offset = ixgbe_rx_offset(rx_ring); /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, - rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0) + rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0) goto err; rx_ring->xdp_prog = adapter->xdp_prog; @@ -6891,6 +6899,11 @@ static int __maybe_unused ixgbe_resume(struct device *dev_d) adapter->hw.hw_addr = adapter->io_addr; + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); pci_set_master(pdev); diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index e9efe074edc1..f1b9284e0bea 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1265,9 +1265,9 @@ jme_stop_shutdown_timer(struct jme_adapter *jme) jwrite32f(jme, JME_APMC, apmc); } -static void jme_link_change_tasklet(struct tasklet_struct *t) +static void jme_link_change_work(struct work_struct *work) { - struct jme_adapter *jme = from_tasklet(jme, t, linkch_task); + struct jme_adapter *jme = container_of(work, struct jme_adapter, linkch_task); struct net_device *netdev = jme->dev; int rc; @@ -1510,7 +1510,7 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat) * all other events are ignored */ jwrite32(jme, JME_IEVE, intrstat); - tasklet_schedule(&jme->linkch_task); + schedule_work(&jme->linkch_task); goto out_reenable; } @@ -1832,7 +1832,6 @@ jme_open(struct net_device *netdev) jme_clear_pm_disable_wol(jme); JME_NAPI_ENABLE(jme); - tasklet_setup(&jme->linkch_task, jme_link_change_tasklet); tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet); tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet); tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet); @@ -1920,7 +1919,7 @@ jme_close(struct net_device *netdev) JME_NAPI_DISABLE(jme); - tasklet_kill(&jme->linkch_task); + cancel_work_sync(&jme->linkch_task); tasklet_kill(&jme->txclean_task); tasklet_kill(&jme->rxclean_task); tasklet_kill(&jme->rxempty_task); @@ -3035,6 +3034,7 @@ jme_init_one(struct pci_dev *pdev, atomic_set(&jme->rx_empty, 1); tasklet_setup(&jme->pcc_task, jme_pcc_tasklet); + INIT_WORK(&jme->linkch_task, jme_link_change_work); jme->dpi.cur = PCC_P1; jme->reg_ghc = 0; diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h index a2c3b00d939d..2af76329b4a2 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h @@ -411,7 +411,7 @@ struct jme_adapter { struct tasklet_struct rxempty_task; struct tasklet_struct rxclean_task; struct tasklet_struct txclean_task; - struct tasklet_struct linkch_task; + struct work_struct linkch_task; struct tasklet_struct pcc_task; unsigned long flags; u32 reg_txcs; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 7fe15a3286f4..fe0989c0fc25 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_MARVELL bool "Marvell devices" default y - depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET || COMPILE_TEST + depends on PCI || CPU_PXA168 || PPC32 || PLAT_ORION || INET || COMPILE_TEST help If you have a network (Ethernet) card belonging to this class, say Y. @@ -19,7 +19,7 @@ if NET_VENDOR_MARVELL config MV643XX_ETH tristate "Marvell Discovery (643XX) and Orion ethernet support" - depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST + depends on PPC32 || PLAT_ORION || COMPILE_TEST depends on INET select PHYLIB select MVMDIO diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 90e6111ce534..3bfb659b5c99 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2684,7 +2684,7 @@ static const struct of_device_id mv643xx_eth_shared_ids[] = { MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); #endif -#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60) +#ifdef CONFIG_OF_IRQ #define mv643xx_eth_property(_np, _name, _v) \ do { \ u32 tmp; \ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index b192692b4fc4..5c372d2c24a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -13499,8 +13499,6 @@ static struct npc_mcam_kex npc_mkex_default = { [NPC_LT_LC_IP] = { /* SIP+DIP: 8 bytes, KW2[63:0] */ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10), - /* TOS: 1 byte, KW1[63:56] */ - KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf), }, /* Layer C: IPv6 */ [NPC_LT_LC_IP6] = { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index d9a1a71c7ccc..ab24a5e8ee8a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2462,8 +2462,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu) INTR_MASK(rvu->hw->total_pfs) & ~1ULL); for (irq = 0; irq < rvu->num_vec; irq++) { - if (rvu->irq_allocated[irq]) + if (rvu->irq_allocated[irq]) { free_irq(pci_irq_vector(rvu->pdev, irq), rvu); + rvu->irq_allocated[irq] = false; + } } pci_free_irq_vectors(rvu->pdev); @@ -2975,8 +2977,8 @@ static void rvu_remove(struct pci_dev *pdev) struct rvu *rvu = pci_get_drvdata(pdev); rvu_dbg_exit(rvu); - rvu_unregister_interrupts(rvu); rvu_unregister_dl(rvu); + rvu_unregister_interrupts(rvu); rvu_flr_wq_destroy(rvu); rvu_cgx_exit(rvu); rvu_fwdata_exit(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index fa6e46e36ae4..76f399229ddb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -678,6 +678,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, u8 *intf, u8 *ena); bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); +void *rvu_first_cgx_pdata(struct rvu *rvu); /* CPT APIs */ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index e668e482383a..6e2bf4fcd29c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -89,6 +89,21 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) return rvu->cgx_idmap[cgx_id]; } +/* Return first enabled CGX instance if none are enabled then return NULL */ +void *rvu_first_cgx_pdata(struct rvu *rvu) +{ + int first_enabled_cgx = 0; + void *cgxd = NULL; + + for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) { + cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu); + if (cgxd) + break; + } + + return cgxd; +} + /* Based on P2X connectivity find mapped NIX block for a PF */ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, int cgx_id, int lmac_id) @@ -711,10 +726,9 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, u32 rvu_cgx_get_fifolen(struct rvu *rvu) { struct mac_ops *mac_ops; - int rvu_def_cgx_id = 0; u32 fifo_len; - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); fifo_len = mac_ops ? mac_ops->fifo_len : 0; return fifo_len; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index aa2ca8780b9c..de3968d2e5ce 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -234,12 +234,14 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - int index, off = 0, flag = 0, go_back = 0, off_prev; + int index, off = 0, flag = 0, go_back = 0, len = 0; struct rvu *rvu = filp->private_data; int lf, pf, vf, pcifunc; struct rvu_block block; int bytes_not_copied; + int lf_str_size = 12; int buf_size = 2048; + char *lfs; char *buf; /* don't allow partial reads */ @@ -249,12 +251,20 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOSPC; - off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t"); + + lfs = kzalloc(lf_str_size, GFP_KERNEL); + if (!lfs) { + kfree(buf); + return -ENOMEM; + } + off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, + "pcifunc"); for (index = 0; index < BLK_COUNT; index++) - if (strlen(rvu->hw->block[index].name)) - off += scnprintf(&buf[off], buf_size - 1 - off, - "%*s\t", (index - 1) * 2, - rvu->hw->block[index].name); + if (strlen(rvu->hw->block[index].name)) { + off += scnprintf(&buf[off], buf_size - 1 - off, + "%-*s", lf_str_size, + rvu->hw->block[index].name); + } off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { @@ -263,14 +273,15 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, continue; if (vf) { + sprintf(lfs, "PF%d:VF%d", pf, vf - 1); go_back = scnprintf(&buf[off], buf_size - 1 - off, - "PF%d:VF%d\t\t", pf, - vf - 1); + "%-*s", lf_str_size, lfs); } else { + sprintf(lfs, "PF%d", pf); go_back = scnprintf(&buf[off], buf_size - 1 - off, - "PF%d\t\t", pf); + "%-*s", lf_str_size, lfs); } off += go_back; @@ -278,20 +289,22 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, block = rvu->hw->block[index]; if (!strlen(block.name)) continue; - off_prev = off; + len = 0; + lfs[len] = '\0'; for (lf = 0; lf < block.lf.max; lf++) { if (block.fn_map[lf] != pcifunc) continue; flag = 1; - off += scnprintf(&buf[off], buf_size - 1 - - off, "%3d,", lf); + len += sprintf(&lfs[len], "%d,", lf); } - if (flag && off_prev != off) - off--; - else - go_back++; + + if (flag) + len--; + lfs[len] = '\0'; off += scnprintf(&buf[off], buf_size - 1 - off, - "\t"); + "%-*s", lf_str_size, lfs); + if (!strlen(lfs)) + go_back += lf_str_size; } if (!flag) off -= go_back; @@ -303,6 +316,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, } bytes_not_copied = copy_to_user(buffer, buf, off); + kfree(lfs); kfree(buf); if (bytes_not_copied) @@ -319,7 +333,6 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) struct rvu *rvu = filp->private; struct pci_dev *pdev = NULL; struct mac_ops *mac_ops; - int rvu_def_cgx_id = 0; char cgx[10], lmac[10]; struct rvu_pfvf *pfvf; int pf, domain, blkid; @@ -327,7 +340,10 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) u16 pcifunc; domain = 2; - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); + /* There can be no CGX devices at all */ + if (!mac_ops) + return 0; seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", mac_ops->name); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { @@ -1818,7 +1834,6 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) { struct mac_ops *mac_ops; unsigned long lmac_bmap; - int rvu_def_cgx_id = 0; int i, lmac_id; char dname[20]; void *cgx; @@ -1826,7 +1841,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) if (!cgx_get_cgxcnt_max()) return; - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); if (!mac_ops) return; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index d3000194e2d3..3d068b7d46bd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -2629,7 +2629,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) struct nix_rx_flowkey_alg *field; struct nix_rx_flowkey_alg tmp; u32 key_type, valid_key; - int l4_key_offset; + int l4_key_offset = 0; if (!alg) return -EINVAL; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 04bb0803a5c5..0bd49c7080a6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -2490,10 +2490,10 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); if (index >= mcam->bmap_entries) break; + entry = index + 1; if (mcam->entry2cntr_map[index] != req->cntr) continue; - entry = index + 1; npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, index, req->cntr); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 0dbbf38e0597..dc1778420978 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -257,17 +257,19 @@ int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 *rule_locs) { + u32 rule_cnt = nfc->rule_cnt; u32 location = 0; int idx = 0; int err = 0; nfc->data = pfvf->flow_cfg->ntuple_max_flows; - while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) { + while ((!err || err == -ENOENT) && idx < rule_cnt) { err = otx2_get_flow(pfvf, nfc, location); if (!err) rule_locs[idx++] = location; location++; } + nfc->rule_cnt = rule_cnt; return err; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 53ab1814d74b..2fd3d235d292 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1672,6 +1672,7 @@ int otx2_stop(struct net_device *netdev) struct otx2_nic *pf = netdev_priv(netdev); struct otx2_cq_poll *cq_poll = NULL; struct otx2_qset *qset = &pf->qset; + struct otx2_rss_info *rss; int qidx, vec, wrk; netif_carrier_off(netdev); @@ -1684,6 +1685,10 @@ int otx2_stop(struct net_device *netdev) /* First stop packet Rx/Tx */ otx2_rxtx_enable(pf, false); + /* Clear RSS enable flag */ + rss = &pf->hw.rss_info; + rss->enable = false; + /* Cleanup Queue IRQ */ vec = pci_irq_vector(pf->pdev, pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index d1e4d42e497d..3712e1786091 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1544,8 +1544,8 @@ static int pxa168_eth_remove(struct platform_device *pdev) clk_disable_unprepare(pep->clk); mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); - unregister_netdev(dev); cancel_work_sync(&pep->tx_timeout_task); + unregister_netdev(dev); free_netdev(dev); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index b051417ede67..9153c9bda96f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -191,12 +191,12 @@ static bool is_ib_supported(struct mlx5_core_dev *dev) } enum { - MLX5_INTERFACE_PROTOCOL_ETH_REP, MLX5_INTERFACE_PROTOCOL_ETH, + MLX5_INTERFACE_PROTOCOL_ETH_REP, + MLX5_INTERFACE_PROTOCOL_IB, MLX5_INTERFACE_PROTOCOL_IB_REP, MLX5_INTERFACE_PROTOCOL_MPIB, - MLX5_INTERFACE_PROTOCOL_IB, MLX5_INTERFACE_PROTOCOL_VNET, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index d7d8a68ef23d..d0f9d3cee97d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -246,6 +246,11 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink, struct mlx5_devlink_trap *dl_trap; int err = 0; + if (is_mdev_switchdev_mode(dev)) { + NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode"); + return -EOPNOTSUPP; + } + dl_trap = mlx5_find_trap_by_id(dev, trap->id); if (!dl_trap) { mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7435fe6829b6..bc6f77ea0a31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -92,14 +92,15 @@ struct page_pool; MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) -#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) +#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8)) +#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2) +#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts))) /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between * WQEs, This page will absorb write overflow by the hardware, when * receiving packets larger than MTU. These oversize packets are * dropped by the driver at a later stage. */ -#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8)) -#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS)) +#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1)) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) #define MLX5E_MAX_RQ_NUM_MTTS \ ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ @@ -515,6 +516,7 @@ struct mlx5e_icosq { struct mlx5_wq_cyc wq; void __iomem *uar_map; u32 sqn; + u16 reserved_room; unsigned long state; /* control path */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 308fd279669e..89510cac46c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode { *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \ } while (0) -#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ - do { \ - unsigned long policy_long; \ - u16 *__policy = &(policy); \ - bool _write = (write); \ - \ - policy_long = *__policy; \ - if (_write && *__policy) \ - *__policy = find_first_bit(&policy_long, \ - sizeof(policy_long) * BITS_PER_BYTE);\ - MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ - if (!_write && *__policy) \ - *__policy = 1 << *__policy; \ - } while (0) - /* get/set FEC admin field for a given speed */ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write, enum mlx5e_fec_supported_link_mode link_mode) @@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write, MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g); break; case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X: - MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x); + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g_1x); break; case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X: - MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x); + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_2x); break; case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X: - MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x); + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_4x); break; case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X: - MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x); + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x); break; default: return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index f3f6eb081948..68e54cc1cd16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -186,6 +186,28 @@ mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry) } static int +mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv, + u32 *labels, u32 *id) +{ + if (!memchr_inv(labels, 0, sizeof(u32) * 4)) { + *id = 0; + return 0; + } + + if (mapping_add(ct_priv->labels_mapping, labels, id)) + return -EOPNOTSUPP; + + return 0; +} + +static void +mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id) +{ + if (id) + mapping_remove(ct_priv->labels_mapping, id); +} + +static int mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule) { struct flow_match_control control; @@ -436,7 +458,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr); mlx5e_mod_hdr_detach(ct_priv->dev, ct_priv->mod_hdr_tbl, zone_rule->mh); - mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); + mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); kfree(attr); } @@ -639,8 +661,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, if (!meta) return -EOPNOTSUPP; - err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels, - &attr->ct_attr.ct_labels_id); + err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels, + &attr->ct_attr.ct_labels_id); if (err) return -EOPNOTSUPP; if (nat) { @@ -677,7 +699,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, err_mapping: dealloc_mod_hdr_actions(&mod_acts); - mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); + mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); return err; } @@ -745,7 +767,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, err_rule: mlx5e_mod_hdr_detach(ct_priv->dev, ct_priv->mod_hdr_tbl, zone_rule->mh); - mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); + mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); err_mod_hdr: kfree(attr); err_attr: @@ -1181,7 +1203,8 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec) mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG, &ctstate, &ctstate_mask); - if (ctstate_mask) + + if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT) return -EOPNOTSUPP; ctstate_mask |= MLX5_CT_STATE_TRK_BIT; @@ -1196,7 +1219,7 @@ void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_ if (!priv || !ct_attr->ct_labels_id) return; - mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id); + mlx5_put_label_mapping(priv, ct_attr->ct_labels_id); } int @@ -1279,7 +1302,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1]; ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2]; ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3]; - if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id)) + if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id)) return -EOPNOTSUPP; mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id, MLX5_CT_LABELS_MASK); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index f8075a604605..172e0474f2e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -685,14 +685,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, u16 vport_num; int err = 0; - if (flow_attr->ip_version == 4) { + if (flow_attr->tun_ip_version == 4) { /* Addresses are swapped for decap */ attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4; attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4; err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr); } #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) - else if (flow_attr->ip_version == 6) { + else if (flow_attr->tun_ip_version == 6) { /* Addresses are swapped for decap */ attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6; attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6; @@ -718,10 +718,10 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, esw_attr->rx_tun_attr->decap_vport = vport_num; out: - if (flow_attr->ip_version == 4) + if (flow_attr->tun_ip_version == 4) mlx5e_route_lookup_ipv4_put(&attr); #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) - else if (flow_attr->ip_version == 6) + else if (flow_attr->tun_ip_version == 6) mlx5e_route_lookup_ipv6_put(&attr); #endif return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 67de2bf36861..e1271998b937 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -21,6 +21,11 @@ enum { MLX5E_TC_TUNNEL_TYPE_MPLSOUDP, }; +struct mlx5e_encap_key { + const struct ip_tunnel_key *ip_tun_key; + struct mlx5e_tc_tunnel *tc_tunnel; +}; + struct mlx5e_tc_tunnel { int tunnel_type; enum mlx5_flow_match_level match_level; @@ -44,6 +49,8 @@ struct mlx5e_tc_tunnel { struct flow_cls_offload *f, void *headers_c, void *headers_v); + bool (*encap_info_equal)(struct mlx5e_encap_key *a, + struct mlx5e_encap_key *b); }; extern struct mlx5e_tc_tunnel vxlan_tunnel; @@ -101,6 +108,9 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv, void *headers_c, void *headers_v); +bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a, + struct mlx5e_encap_key *b); + #endif /* CONFIG_MLX5_ESWITCH */ #endif //__MLX5_EN_TC_TUNNEL_H__ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 6a116335bb21..9f16ad2c0710 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -89,6 +89,7 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, * required to establish routing. */ flow_flag_set(flow, TUN_RX); + flow->attr->tun_ip_version = ip_version; return 0; } @@ -475,16 +476,11 @@ void mlx5e_detach_decap(struct mlx5e_priv *priv, mlx5e_decap_dealloc(priv, d); } -struct encap_key { - const struct ip_tunnel_key *ip_tun_key; - struct mlx5e_tc_tunnel *tc_tunnel; -}; - -static int cmp_encap_info(struct encap_key *a, - struct encap_key *b) +bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a, + struct mlx5e_encap_key *b) { - return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || - a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type; + return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) == 0 && + a->tc_tunnel->tunnel_type == b->tc_tunnel->tunnel_type; } static int cmp_decap_info(struct mlx5e_decap_key *a, @@ -493,7 +489,7 @@ static int cmp_decap_info(struct mlx5e_decap_key *a, return memcmp(&a->key, &b->key, sizeof(b->key)); } -static int hash_encap_info(struct encap_key *key) +static int hash_encap_info(struct mlx5e_encap_key *key) { return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), key->tc_tunnel->tunnel_type); @@ -515,18 +511,18 @@ static bool mlx5e_decap_take(struct mlx5e_decap_entry *e) } static struct mlx5e_encap_entry * -mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key, +mlx5e_encap_get(struct mlx5e_priv *priv, struct mlx5e_encap_key *key, uintptr_t hash_key) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_encap_key e_key; struct mlx5e_encap_entry *e; - struct encap_key e_key; hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, encap_hlist, hash_key) { e_key.ip_tun_key = &e->tun_info->key; e_key.tc_tunnel = e->tunnel; - if (!cmp_encap_info(&e_key, key) && + if (e->tunnel->encap_info_equal(&e_key, key) && mlx5e_encap_take(e)) return e; } @@ -693,8 +689,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr = flow->attr; const struct ip_tunnel_info *tun_info; unsigned long tbl_time_before = 0; - struct encap_key key; struct mlx5e_encap_entry *e; + struct mlx5e_encap_key key; bool entry_created = false; unsigned short family; uintptr_t hash_key; @@ -1091,7 +1087,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv, if (err || !esw_attr->rx_tun_attr->decap_vport) goto out; - key.ip_version = attr->ip_version; + key.ip_version = attr->tun_ip_version; if (key.ip_version == 4) key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4; else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c index e472ed0eacfb..f5b26f5a7de4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c @@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv, option_key = (struct geneve_opt *)&enc_opts.key->data[0]; option_mask = (struct geneve_opt *)&enc_opts.mask->data[0]; + if (option_mask->opt_class == 0 && option_mask->type == 0 && + !memchr_inv(option_mask->opt_data, 0, option_mask->length * 4)) + return 0; + if (option_key->length > max_tlv_option_data_len) { NL_SET_ERR_MSG_MOD(extack, "Matching on GENEVE options: unsupported option len"); @@ -325,6 +329,34 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv, return mlx5e_tc_tun_parse_geneve_options(priv, spec, f); } +static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a, + struct mlx5e_encap_key *b) +{ + struct ip_tunnel_info *a_info; + struct ip_tunnel_info *b_info; + bool a_has_opts, b_has_opts; + + if (!mlx5e_tc_tun_encap_info_equal_generic(a, b)) + return false; + + a_has_opts = !!(a->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT); + b_has_opts = !!(b->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT); + + /* keys are equal when both don't have any options attached */ + if (!a_has_opts && !b_has_opts) + return true; + + if (a_has_opts != b_has_opts) + return false; + + /* geneve options stored in memory next to ip_tunnel_info struct */ + a_info = container_of(a->ip_tun_key, struct ip_tunnel_info, key); + b_info = container_of(b->ip_tun_key, struct ip_tunnel_info, key); + + return a_info->options_len == b_info->options_len && + memcmp(a_info + 1, b_info + 1, a_info->options_len) == 0; +} + struct mlx5e_tc_tunnel geneve_tunnel = { .tunnel_type = MLX5E_TC_TUNNEL_TYPE_GENEVE, .match_level = MLX5_MATCH_L4, @@ -334,4 +366,5 @@ struct mlx5e_tc_tunnel geneve_tunnel = { .generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_geneve, .parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_geneve, .parse_tunnel = mlx5e_tc_tun_parse_geneve, + .encap_info_equal = mlx5e_tc_tun_encap_info_equal_geneve, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c index 2805416c32a3..ada14f0574dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c @@ -94,4 +94,5 @@ struct mlx5e_tc_tunnel gre_tunnel = { .generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_gretap, .parse_udp_ports = NULL, .parse_tunnel = mlx5e_tc_tun_parse_gretap, + .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c index 3479672e84cf..60952b33b568 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c @@ -131,4 +131,5 @@ struct mlx5e_tc_tunnel mplsoudp_tunnel = { .generate_ip_tun_hdr = generate_ip_tun_hdr, .parse_udp_ports = parse_udp_ports, .parse_tunnel = parse_tunnel, + .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c index 038a0f1cecec..4267f3a1059e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c @@ -150,4 +150,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = { .generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_vxlan, .parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan, .parse_tunnel = mlx5e_tc_tun_parse_vxlan, + .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 2371b83dad9c..055c3bc23733 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -441,4 +441,10 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size) return wqe_size * 2 - 1; } +static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size) +{ + u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size); + + return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room); +} #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index d06532d0baa4..19d22a63313f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -46,7 +46,8 @@ struct mlx5e_ktls_offload_context_rx { struct tls12_crypto_info_aes_gcm_128 crypto_info; struct accel_rule rule; struct sock *sk; - struct mlx5e_rq_stats *stats; + struct mlx5e_rq_stats *rq_stats; + struct mlx5e_tls_sw_stats *sw_stats; struct completion add_ctx; u32 tirn; u32 key_id; @@ -137,11 +138,10 @@ post_static_params(struct mlx5e_icosq *sq, { struct mlx5e_set_tls_static_params_wqe *wqe; struct mlx5e_icosq_wqe_info wi; - u16 pi, num_wqebbs, room; + u16 pi, num_wqebbs; num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS; - room = mlx5e_stop_room_for_wqe(num_wqebbs); - if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room))) + if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) return ERR_PTR(-ENOSPC); pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); @@ -168,11 +168,10 @@ post_progress_params(struct mlx5e_icosq *sq, { struct mlx5e_set_tls_progress_params_wqe *wqe; struct mlx5e_icosq_wqe_info wi; - u16 pi, num_wqebbs, room; + u16 pi, num_wqebbs; num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; - room = mlx5e_stop_room_for_wqe(num_wqebbs); - if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room))) + if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) return ERR_PTR(-ENOSPC); pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); @@ -218,7 +217,7 @@ unlock: return err; err_out: - priv_rx->stats->tls_resync_req_skip++; + priv_rx->rq_stats->tls_resync_req_skip++; err = PTR_ERR(cseg); complete(&priv_rx->add_ctx); goto unlock; @@ -277,17 +276,15 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, buf->priv_rx = priv_rx; - BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1); - spin_lock_bh(&sq->channel->async_icosq_lock); - if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { + if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) { spin_unlock_bh(&sq->channel->async_icosq_lock); err = -ENOSPC; goto err_dma_unmap; } - pi = mlx5e_icosq_get_next_pi(sq, 1); + pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS); wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi); #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)) @@ -307,7 +304,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, wi = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS, - .num_wqebbs = 1, + .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS, .tls_get_params.buf = buf, }; icosq_fill_wi(sq, pi, &wi); @@ -322,7 +319,7 @@ err_dma_unmap: err_free: kfree(buf); err_out: - priv_rx->stats->tls_resync_req_skip++; + priv_rx->rq_stats->tls_resync_req_skip++; return err; } @@ -378,13 +375,13 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx cseg = post_static_params(sq, priv_rx); if (IS_ERR(cseg)) { - priv_rx->stats->tls_resync_res_skip++; + priv_rx->rq_stats->tls_resync_res_skip++; err = PTR_ERR(cseg); goto unlock; } /* Do not increment priv_rx refcnt, CQE handling is empty */ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); - priv_rx->stats->tls_resync_res_ok++; + priv_rx->rq_stats->tls_resync_res_ok++; unlock: spin_unlock_bh(&c->async_icosq_lock); @@ -420,13 +417,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, auth_state = MLX5_GET(tls_progress_params, ctx, auth_state); if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { - priv_rx->stats->tls_resync_req_skip++; + priv_rx->rq_stats->tls_resync_req_skip++; goto out; } hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); - priv_rx->stats->tls_resync_req_end++; + priv_rx->rq_stats->tls_resync_req_end++; out: mlx5e_ktls_priv_rx_put(priv_rx); dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); @@ -609,7 +606,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, priv_rx->rxq = rxq; priv_rx->sk = sk; - priv_rx->stats = &priv->channel_stats[rxq].rq; + priv_rx->rq_stats = &priv->channel_stats[rxq].rq; + priv_rx->sw_stats = &priv->tls->sw_stats; mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); rqtn = priv->direct_tir[rxq].rqt.rqtn; @@ -630,7 +628,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, if (err) goto err_post_wqes; - priv_rx->stats->tls_ctx++; + atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx); return 0; @@ -666,7 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) if (cancel_work_sync(&resync->work)) mlx5e_ktls_priv_rx_put(priv_rx); - priv_rx->stats->tls_del++; + atomic64_inc(&priv_rx->sw_stats->rx_tls_del); if (priv_rx->rule.rule) mlx5e_accel_fs_del_sk(priv_rx->rule.rule); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index d16def68ecff..51bdf71073f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // Copyright (c) 2019 Mellanox Technologies. +#include "en_accel/tls.h" #include "en_accel/ktls_txrx.h" #include "en_accel/ktls_utils.h" @@ -50,6 +51,7 @@ static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn) struct mlx5e_ktls_offload_context_tx { struct tls_offload_context_tx *tx_ctx; struct tls12_crypto_info_aes_gcm_128 crypto_info; + struct mlx5e_tls_sw_stats *sw_stats; u32 expected_seq; u32 tisn; u32 key_id; @@ -99,6 +101,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, if (err) goto err_create_key; + priv_tx->sw_stats = &priv->tls->sw_stats; priv_tx->expected_seq = start_offload_tcp_sn; priv_tx->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; @@ -111,6 +114,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, goto err_create_tis; priv_tx->ctx_post_pending = true; + atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx); return 0; @@ -452,7 +456,6 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) { mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false); - stats->tls_ctx++; } seq = ntohl(tcp_hdr(skb)->seq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index bd270a85c804..4c9274d390da 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -41,10 +41,13 @@ #include "en.h" struct mlx5e_tls_sw_stats { + atomic64_t tx_tls_ctx; atomic64_t tx_tls_drop_metadata; atomic64_t tx_tls_drop_resync_alloc; atomic64_t tx_tls_drop_no_sync_data; atomic64_t tx_tls_drop_bypass_required; + atomic64_t rx_tls_ctx; + atomic64_t rx_tls_del; atomic64_t rx_tls_drop_resync_request; atomic64_t rx_tls_resync_request; atomic64_t rx_tls_resync_reply; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c index b949b9a7538b..29463bdb7715 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c @@ -45,49 +45,60 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) }, }; +static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) }, +}; + #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) -#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc) - -static bool is_tls_atomic_stats(struct mlx5e_priv *priv) +static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv) { - return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev); + if (!priv->tls) + return NULL; + if (mlx5_accel_is_ktls_device(priv->mdev)) + return mlx5e_ktls_sw_stats_desc; + return mlx5e_tls_sw_stats_desc; } int mlx5e_tls_get_count(struct mlx5e_priv *priv) { - if (!is_tls_atomic_stats(priv)) + if (!priv->tls) return 0; - - return NUM_TLS_SW_COUNTERS; + if (mlx5_accel_is_ktls_device(priv->mdev)) + return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); + return ARRAY_SIZE(mlx5e_tls_sw_stats_desc); } int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { - unsigned int i, idx = 0; + const struct counter_desc *stats_desc; + unsigned int i, n, idx = 0; - if (!is_tls_atomic_stats(priv)) - return 0; + stats_desc = get_tls_atomic_stats(priv); + n = mlx5e_tls_get_count(priv); - for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) + for (i = 0; i < n; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - mlx5e_tls_sw_stats_desc[i].format); + stats_desc[i].format); - return NUM_TLS_SW_COUNTERS; + return n; } int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { - int i, idx = 0; + const struct counter_desc *stats_desc; + unsigned int i, n, idx = 0; - if (!is_tls_atomic_stats(priv)) - return 0; + stats_desc = get_tls_atomic_stats(priv); + n = mlx5e_tls_get_count(priv); - for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) + for (i = 0; i < n; i++) data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, - mlx5e_tls_sw_stats_desc, i); + stats_desc, i); - return NUM_TLS_SW_COUNTERS; + return n; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index abdf721bb264..53802e18af90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -758,11 +758,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev, return 0; } -static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings, - u32 eth_proto_cap, - u8 connector_type, bool ext) +static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev, + struct ethtool_link_ksettings *link_ksettings, + u32 eth_proto_cap, u8 connector_type) { - if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { + if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) { if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) @@ -898,9 +898,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = { [MLX5E_PORT_OTHER] = PORT_OTHER, }; -static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext) +static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type) { - if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) + if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) return ptys2connector_type[connector_type]; if (eth_proto & @@ -1001,11 +1001,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, data_rate_oper, link_ksettings); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; - - link_ksettings->base.port = get_connector_port(eth_proto_oper, - connector_type, ext); - ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin, - connector_type, ext); + connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ? + connector_type : MLX5E_PORT_UNKNOWN; + link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type); + ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin, + connector_type); get_lp_advertising(mdev, eth_proto_lp, link_ksettings); if (an_status == MLX5_AN_COMPLETE) @@ -1887,6 +1887,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + int err; if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; @@ -1896,7 +1897,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, return -EINVAL; } - mlx5e_modify_rx_cqe_compression_locked(priv, enable); + err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); + if (err) + return err; + priv->channels.params.rx_cqe_compress_def = enable; return 0; @@ -2014,8 +2018,13 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) */ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + struct mlx5e_params old_params; + + old_params = priv->channels.params; priv->channels.params = new_channels.params; err = mlx5e_num_channels_changed(priv); + if (err) + priv->channels.params = old_params; goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ec2fcb2a2977..5db63b9f3b70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -334,9 +334,9 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq rq->wqe_overflow.addr); } -static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) +static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix) { - return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; + return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT; } static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) @@ -577,7 +577,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); u32 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; - u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); + u64 dma_offset = mlx5e_get_mpwqe_offset(i); wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); wqe->data[0].byte_count = cpu_to_be32(byte_count); @@ -1091,6 +1091,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->reserved_room = param->stop_room; param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -2350,6 +2351,24 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv, mlx5e_build_ico_cq_param(priv, log_wq_size, ¶m->cqp); } +static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv, + struct mlx5e_params *params, + u8 log_wq_size, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + mlx5e_build_sq_param_common(priv, param); + + /* async_icosq is used by XSK only if xdp_prog is active */ + if (params->xdp_prog) + param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */ + MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq)); + MLX5_SET(wq, wq, log_wq_sz, log_wq_size); + mlx5e_build_ico_cq_param(priv, log_wq_size, ¶m->cqp); +} + void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_sq_param *param) @@ -2368,8 +2387,9 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, { switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return order_base_2(MLX5E_UMR_WQEBBS) + - mlx5e_get_rq_log_wq_sz(rqp->rqc); + return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, + order_base_2(MLX5E_UMR_WQEBBS) + + mlx5e_get_rq_log_wq_sz(rqp->rqc)); default: /* MLX5_WQ_TYPE_CYCLIC */ return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; } @@ -2397,7 +2417,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, mlx5e_build_sq_param(priv, params, &cparam->txq_sq); mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq); - mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq); + mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq); } int mlx5e_open_channels(struct mlx5e_priv *priv, @@ -2502,8 +2522,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) { int i; - if (chs->port_ptp) + if (chs->port_ptp) { mlx5e_port_ptp_close(chs->port_ptp); + chs->port_ptp = NULL; + } for (i = 0; i < chs->num; i++) mlx5e_close_channel(chs->c[i]); @@ -3815,6 +3837,15 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) s->tx_dropped += sq_stats->dropped; } } + if (priv->port_ptp_opened) { + for (i = 0; i < priv->max_opened_tc; i++) { + struct mlx5e_sq_stats *sq_stats = &priv->port_ptp_stats.sq[i]; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_dropped += sq_stats->dropped; + } + } } void @@ -3834,10 +3865,17 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) } if (mlx5e_is_uplink_rep(priv)) { + struct mlx5e_vport_stats *vstats = &priv->stats.vport; + stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); + + /* vport multicast also counts packets that are dropped due to steering + * or rx out of buffer + */ + stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); } else { mlx5e_fold_sw_stats64(priv, stats); } @@ -4683,8 +4721,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) struct mlx5e_channel *c = priv->channels.c[i]; mlx5e_rq_replace_xdp_prog(&c->rq, prog); - if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) { + bpf_prog_inc(prog); mlx5e_rq_replace_xdp_prog(&c->xskrq, prog); + } } unlock: @@ -4958,6 +4998,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 priv->max_nch); params->num_tc = 1; + /* Set an initial non-zero value, so that mlx5e_select_queue won't + * divide by zero if called before first activating channels. + */ + priv->num_tc_x_num_ch = params->num_channels * params->num_tc; + /* SQ */ params->log_sq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : @@ -5474,8 +5519,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, struct net_device *netdev, struct mlx5_core_dev *mdev) { - memset(priv, 0, sizeof(*priv)); - /* priv init */ priv->mdev = mdev; priv->netdev = netdev; @@ -5508,12 +5551,18 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) { int i; + /* bail if change profile failed and also rollback failed */ + if (!priv->mdev) + return; + destroy_workqueue(priv->wq); free_cpumask_var(priv->scratchpad.cpumask); for (i = 0; i < priv->htb.max_qos_sqs; i++) kfree(priv->htb.qos_sq_stats[i]); kvfree(priv->htb.qos_sq_stats); + + memset(priv, 0, sizeof(*priv)); } struct net_device * @@ -5630,11 +5679,10 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) } static int -mlx5e_netdev_attach_profile(struct mlx5e_priv *priv, +mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, const struct mlx5e_profile *new_profile, void *new_ppriv) { - struct net_device *netdev = priv->netdev; - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_priv *priv = netdev_priv(netdev); int err; err = mlx5e_priv_init(priv, netdev, mdev); @@ -5647,10 +5695,16 @@ mlx5e_netdev_attach_profile(struct mlx5e_priv *priv, priv->ppriv = new_ppriv; err = new_profile->init(priv->mdev, priv->netdev); if (err) - return err; + goto priv_cleanup; err = mlx5e_attach_netdev(priv); if (err) - new_profile->cleanup(priv); + goto profile_cleanup; + return err; + +profile_cleanup: + new_profile->cleanup(priv); +priv_cleanup: + mlx5e_priv_cleanup(priv); return err; } @@ -5659,13 +5713,14 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, { unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile); const struct mlx5e_profile *orig_profile = priv->profile; + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; void *orig_ppriv = priv->ppriv; int err, rollback_err; /* sanity */ if (new_max_nch != priv->max_nch) { - netdev_warn(priv->netdev, - "%s: Replacing profile with different max channels\n", + netdev_warn(netdev, "%s: Replacing profile with different max channels\n", __func__); return -EINVAL; } @@ -5675,22 +5730,19 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, priv->profile->cleanup(priv); mlx5e_priv_cleanup(priv); - err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv); + err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv); if (err) { /* roll back to original profile */ - netdev_warn(priv->netdev, "%s: new profile init failed, %d\n", - __func__, err); + netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err); goto rollback; } return 0; rollback: - rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv); - if (rollback_err) { - netdev_err(priv->netdev, - "%s: failed to rollback to orig profile, %d\n", + rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv); + if (rollback_err) + netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n", __func__, rollback_err); - } return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a132fff7a980..8d39bfee84a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1107,8 +1107,9 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) mlx5e_rep_tc_enable(priv); - mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK, - 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO); + if (MLX5_CAP_GEN(mdev, uplink_follow)) + mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK, + 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO); mlx5_lag_add(mdev, netdev); priv->events_nb.notifier_call = uplink_rep_async_event; mlx5_notifier_register(mdev, &priv->events_nb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1b6ad94ebb10..249d8905e644 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -500,7 +500,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) struct mlx5e_icosq *sq = rq->icosq; struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; - u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); u16 pi; int err; int i; @@ -531,7 +530,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) umr_wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); - umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); + umr_wqe->uctrl.xlt_offset = + cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix))); sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 92c5b81427b9..88a01c59ce61 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -116,7 +116,6 @@ static const struct counter_desc sw_stats_desc[] = { #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) }, @@ -180,8 +179,6 @@ static const struct counter_desc sw_stats_desc[] = { #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) }, @@ -342,8 +339,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, #ifdef CONFIG_MLX5_EN_TLS s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; - s->rx_tls_ctx += rq_stats->tls_ctx; - s->rx_tls_del += rq_stats->tls_del; s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; @@ -390,7 +385,6 @@ static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s, #ifdef CONFIG_MLX5_EN_TLS s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets; s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; - s->tx_tls_ctx += sq_stats->tls_ctx; s->tx_tls_ooo += sq_stats->tls_ooo; s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; s->tx_tls_dump_packets += sq_stats->tls_dump_packets; @@ -1622,8 +1616,6 @@ static const struct counter_desc rq_stats_desc[] = { #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) }, @@ -1650,7 +1642,6 @@ static const struct counter_desc sq_stats_desc[] = { #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, @@ -1776,7 +1767,6 @@ static const struct counter_desc qos_sq_stats_desc[] = { #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, - { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 93c41312fb03..adf9b7b8b712 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -191,7 +191,6 @@ struct mlx5e_sw_stats { #ifdef CONFIG_MLX5_EN_TLS u64 tx_tls_encrypted_packets; u64 tx_tls_encrypted_bytes; - u64 tx_tls_ctx; u64 tx_tls_ooo; u64 tx_tls_dump_packets; u64 tx_tls_dump_bytes; @@ -202,8 +201,6 @@ struct mlx5e_sw_stats { u64 rx_tls_decrypted_packets; u64 rx_tls_decrypted_bytes; - u64 rx_tls_ctx; - u64 rx_tls_del; u64 rx_tls_resync_req_pkt; u64 rx_tls_resync_req_start; u64 rx_tls_resync_req_end; @@ -334,8 +331,6 @@ struct mlx5e_rq_stats { #ifdef CONFIG_MLX5_EN_TLS u64 tls_decrypted_packets; u64 tls_decrypted_bytes; - u64 tls_ctx; - u64 tls_del; u64 tls_resync_req_pkt; u64 tls_resync_req_start; u64 tls_resync_req_end; @@ -364,7 +359,6 @@ struct mlx5e_sq_stats { #ifdef CONFIG_MLX5_EN_TLS u64 tls_encrypted_packets; u64 tls_encrypted_bytes; - u64 tls_ctx; u64 tls_ooo; u64 tls_dump_packets; u64 tls_dump_bytes; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0da69b98f38f..d675107d9eca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1895,6 +1895,9 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev, return 0; flow_rule_match_meta(rule, &match); + if (!match.mask->ingress_ifindex) + return 0; + if (match.mask->ingress_ifindex != 0xFFFFFFFF) { NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); return -EOPNOTSUPP; @@ -2296,6 +2299,16 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, *match_level = MLX5_MATCH_L4; } + /* Currenlty supported only for MPLS over UDP */ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && + !netif_is_bareudp(filter_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on MPLS is supported only for MPLS over UDP"); + netdev_err(priv->netdev, + "Matching on MPLS is supported only for MPLS over UDP\n"); + return -EOPNOTSUPP; + } + return 0; } @@ -2899,6 +2912,37 @@ static int is_action_keys_supported(const struct flow_action_entry *act, return 0; } +static bool modify_tuple_supported(bool modify_tuple, bool ct_clear, + bool ct_flow, struct netlink_ext_ack *extack, + struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec) +{ + if (!modify_tuple || ct_clear) + return true; + + if (ct_flow) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload tuple modification with non-clear ct()"); + netdev_info(priv->netdev, + "can't offload tuple modification with non-clear ct()"); + return false; + } + + /* Add ct_state=-trk match so it will be offloaded for non ct flows + * (or after clear action), as otherwise, since the tuple is changed, + * we can't restore ct state + */ + if (mlx5_tc_ct_add_no_trk_match(spec)) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload tuple modification with ct matches and no ct(clear) action"); + netdev_info(priv->netdev, + "can't offload tuple modification with ct matches and no ct(clear) action"); + return false; + } + + return true; +} + static bool modify_header_match_supported(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_action *flow_action, @@ -2937,18 +2981,9 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, return err; } - /* Add ct_state=-trk match so it will be offloaded for non ct flows - * (or after clear action), as otherwise, since the tuple is changed, - * we can't restore ct state - */ - if (!ct_clear && modify_tuple && - mlx5_tc_ct_add_no_trk_match(spec)) { - NL_SET_ERR_MSG_MOD(extack, - "can't offload tuple modify header with ct matches"); - netdev_info(priv->netdev, - "can't offload tuple modify header with ct matches"); + if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack, + priv, spec)) return false; - } ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); if (modify_ip_header && ip_proto != IPPROTO_TCP && @@ -4445,7 +4480,8 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate, */ if (rate) { rate = (rate * BITS_PER_BYTE) + 500000; - rate_mbps = max_t(u64, do_div(rate, 1000000), 1); + do_div(rate, 1000000); + rate_mbps = max_t(u32, rate, 1); } err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 89003ae7775a..25c091795bcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -79,6 +79,7 @@ struct mlx5_flow_attr { u8 inner_match_level; u8 outer_match_level; u8 ip_version; + u8 tun_ip_version; u32 flags; union { struct mlx5_esw_flow_attr esw_attr[0]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 174dfbc996c6..1fa9c18563da 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -931,13 +931,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) mutex_unlock(&table->lock); } +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +#define MLX5_MAX_ASYNC_EQS 4 +#else +#define MLX5_MAX_ASYNC_EQS 3 +#endif + int mlx5_eq_table_create(struct mlx5_core_dev *dev) { struct mlx5_eq_table *eq_table = dev->priv.eq_table; + int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? + MLX5_CAP_GEN(dev, max_num_eqs) : + 1 << MLX5_CAP_GEN(dev, log_max_eq); int err; eq_table->num_comp_eqs = - mlx5_irq_get_num_comp(eq_table->irq_table); + min_t(int, + mlx5_irq_get_num_comp(eq_table->irq_table), + num_eqs - MLX5_MAX_ASYNC_EQS); err = create_async_eqs(dev); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c index 6f6772bf61a2..3da7becc1069 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c @@ -248,7 +248,7 @@ err_mod_hdr_regc0: err_ethertype: kfree(rule); out: - kfree(rule_spec); + kvfree(rule_spec); return err; } @@ -328,7 +328,7 @@ static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw, e->recirc_cnt = 0; out: - kfree(in); + kvfree(in); return err; } @@ -347,7 +347,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { - kfree(in); + kvfree(in); return -ENOMEM; } @@ -371,8 +371,8 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, } err_out: - kfree(spec); - kfree(in); + kvfree(spec); + kvfree(in); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 94cb0217b4f3..d4a2f8d1ee9f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -537,6 +537,14 @@ esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act * return i; } +static bool +esw_src_port_rewrite_supported(struct mlx5_eswitch *esw) +{ + return MLX5_CAP_GEN(esw->dev, reg_c_preserve) && + mlx5_eswitch_vport_match_metadata_enabled(esw) && + MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level); +} + static int esw_setup_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, @@ -550,8 +558,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest, int err = 0; if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && - MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) && - mlx5_eswitch_vport_match_metadata_enabled(esw)) + esw_src_port_rewrite_supported(esw)) attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE; if (attr->dest_ft) { @@ -1715,36 +1722,40 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) } esw->fdb_table.offloads.send_to_vport_grp = g; - /* meta send to vport */ - memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, - MLX5_MATCH_MISC_PARAMETERS_2); - - match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + if (esw_src_port_rewrite_supported(esw)) { + /* meta send to vport */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS_2); - MLX5_SET(fte_match_param, match_criteria, - misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); - MLX5_SET(fte_match_param, match_criteria, - misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); - num_vfs = esw->esw_funcs.num_vfs; - if (num_vfs) { - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1); - ix += num_vfs; + MLX5_SET(fte_match_param, match_criteria, + misc_parameters_2.metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_mask()); + MLX5_SET(fte_match_param, match_criteria, + misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); - g = mlx5_create_flow_group(fdb, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n", - err); - goto send_vport_meta_err; + num_vfs = esw->esw_funcs.num_vfs; + if (num_vfs) { + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); + MLX5_SET(create_flow_group_in, flow_group_in, + end_flow_index, ix + num_vfs - 1); + ix += num_vfs; + + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n", + err); + goto send_vport_meta_err; + } + esw->fdb_table.offloads.send_to_vport_meta_grp = g; + + err = mlx5_eswitch_add_send_to_vport_meta_rules(esw); + if (err) + goto meta_rule_err; } - esw->fdb_table.offloads.send_to_vport_meta_grp = g; - - err = mlx5_eswitch_add_send_to_vport_meta_rules(esw); - if (err) - goto meta_rule_err; } if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 80da50e12915..bd66ab2af5b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -575,6 +575,7 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size)); MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev)); MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma); if (MLX5_CAP_GEN(mdev, cqe_version) == 1) MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 1eeca45cfcdf..6f7cef47e04c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -233,6 +233,7 @@ int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) } qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev)); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, @@ -694,6 +695,7 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) static void mlx5_rdma_netdev_free(struct net_device *netdev) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5i_priv *ipriv = priv->ppriv; const struct mlx5e_profile *profile = priv->profile; @@ -702,7 +704,7 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev) if (!ipriv->sub_interface) { mlx5i_pkey_qpn_ht_cleanup(netdev); - mlx5e_destroy_mdev_resources(priv->mdev); + mlx5e_destroy_mdev_resources(mdev); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index b0e129d0f6d8..1e7f26b240de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -495,15 +495,15 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, return -EINVAL; field_select = MLX5_MTPPS_FS_ENABLE; + pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index); + if (pin < 0) + return -EBUSY; + if (on) { bool rt_mode = mlx5_real_time_mode(mdev); u32 nsec; s64 sec; - pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index); - if (pin < 0) - return -EBUSY; - pin_mode = MLX5_PIN_MODE_OUT; pattern = MLX5_OUT_PATTERN_PERIODIC; ts.tv_sec = rq->perout.period.sec; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index b265f27b2166..90b524c59f3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -181,15 +181,13 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table) u16 max_functions; u16 function_id; int err = 0; - bool ecpu; int i; max_functions = mlx5_sf_max_functions(dev); function_id = MLX5_CAP_GEN(dev, sf_base_id); - ecpu = mlx5_read_embedded_cpu(dev); /* Arm the vhca context as the vhca event notifier */ for (i = 0; i < max_functions; i++) { - err = mlx5_vhca_event_arm(dev, function_id, ecpu); + err = mlx5_vhca_event_arm(dev, function_id); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c index 58b6be0b03d7..a5a0f60bef66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c @@ -6,7 +6,7 @@ #include "sf.h" #include "mlx5_ifc_vhca_event.h" #include "vhca_event.h" -#include "ecpf.h" +#include "mlx5_core.h" struct mlx5_sf_hw { u32 usr_sfnum; @@ -18,7 +18,6 @@ struct mlx5_sf_hw_table { struct mlx5_core_dev *dev; struct mlx5_sf_hw *sfs; int max_local_functions; - u8 ecpu: 1; struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */ struct notifier_block vhca_nb; }; @@ -64,7 +63,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) } if (sw_id == -ENOSPC) { err = -ENOSPC; - goto err; + goto exist_err; } hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id); @@ -72,7 +71,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) if (err) goto err; - err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum); + err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum); if (err) goto vhca_err; @@ -118,7 +117,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id) hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id); mutex_lock(&table->table_lock); - err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out)); + err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out)); if (err) goto err; state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); @@ -164,7 +163,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) table->dev = dev; table->sfs = sfs; table->max_local_functions = max_functions; - table->ecpu = mlx5_read_embedded_cpu(dev); dev->priv.sf_hw_table = table; mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h index 1daf5a122ba3..4fc870140d71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h @@ -20,7 +20,7 @@ struct mlx5_ifc_vhca_state_context_bits { u8 sw_function_id[0x20]; - u8 reserved_at_40[0x80]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_vhca_state_out_bits { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c index af2f2dd9db25..28b14b05086f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c @@ -19,52 +19,51 @@ struct mlx5_vhca_event_work { struct mlx5_vhca_state_event event; }; -int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, - bool ecpu, u32 *out, u32 outlen) +int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen) { u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {}; MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE); MLX5_SET(query_vhca_state_in, in, function_id, function_id); - MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu); + MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, 0); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id, - bool ecpu, u32 *in, u32 inlen) + u32 *in, u32 inlen) { u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); MLX5_SET(modify_vhca_state_in, in, function_id, function_id); - MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } -int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id) +int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id) { u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); MLX5_SET(modify_vhca_state_in, in, function_id, function_id); - MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0); MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1); MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id); return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out); } -int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu) +int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id) { u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1); MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1); - return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in)); + return mlx5_cmd_modify_vhca_state(dev, function_id, in, sizeof(in)); } static void @@ -73,7 +72,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event * u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; int err; - err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out)); + err = mlx5_cmd_query_vhca_state(dev, event->function_id, out, sizeof(out)); if (err) return; @@ -82,7 +81,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event * event->new_vhca_state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); - mlx5_vhca_event_arm(dev, event->function_id, event->ecpu); + mlx5_vhca_event_arm(dev, event->function_id); blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event); } @@ -94,6 +93,7 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work) struct mlx5_core_dev *dev = notifier->dev; mlx5_vhca_event_notify(dev, &work->event); + kfree(work); } static int @@ -110,7 +110,6 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v INIT_WORK(&work->work, &mlx5_vhca_state_work_handler); work->notifier = notifier; work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id); - work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function); mlx5_events_work_enqueue(notifier->dev, &work->work); return NOTIFY_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h index 1fe1ec6f4d4b..013cdfe90616 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h @@ -10,7 +10,6 @@ struct mlx5_vhca_state_event { u16 function_id; u16 sw_function_id; u8 new_vhca_state; - bool ecpu; }; static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev) @@ -25,10 +24,10 @@ void mlx5_vhca_event_start(struct mlx5_core_dev *dev); void mlx5_vhca_event_stop(struct mlx5_core_dev *dev); int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); -int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id); -int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu); +int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id); +int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id); int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, - bool ecpu, u32 *out, u32 outlen); + u32 *out, u32 outlen); #else static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 83c4c877d558..8a6a56f9dc4e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -169,6 +169,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt)); MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt)); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev)); MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma); if (MLX5_CAP_GEN(mdev, cqe_version) == 1) MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 4088d6e51508..9143ec326ebf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -264,8 +264,8 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) { u64 index = - (MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | - MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26); + ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | + ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26); return index << 6; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index d9d9e1f488f9..ba28ac7e79bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -21,6 +21,7 @@ #include <net/red.h> #include <net/vxlan.h> #include <net/flow_offload.h> +#include <net/inet_ecn.h> #include "port.h" #include "core.h" @@ -347,6 +348,20 @@ struct mlxsw_sp_port_type_speed_ops { u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap); }; +static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn, + bool *trap_en) +{ + bool set_ce = false; + + *trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce); + if (set_ce) + return INET_ECN_CE; + else if (outer_ecn == INET_ECN_ECT_1 && inner_ecn == INET_ECN_ECT_0) + return INET_ECN_ECT_1; + else + return inner_ecn; +} + static inline struct net_device * mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 0bd64169bf81..078601d31cde 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1230,16 +1230,22 @@ mlxsw_sp1_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, u32 ptys_eth_proto, struct ethtool_link_ksettings *cmd) { + struct mlxsw_sp1_port_link_mode link; int i; - cmd->link_mode = -1; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->lanes = 0; if (!carrier_ok) return; for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { - if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) - cmd->link_mode = mlxsw_sp1_port_link_mode[i].mask_ethtool; + if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) { + link = mlxsw_sp1_port_link_mode[i]; + ethtool_params_from_link_mode(cmd, + link.mask_ethtool); + } } } @@ -1672,7 +1678,9 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, struct mlxsw_sp2_port_link_mode link; int i; - cmd->link_mode = -1; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->lanes = 0; if (!carrier_ok) return; @@ -1680,7 +1688,8 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) { link = mlxsw_sp2_port_link_mode[i]; - cmd->link_mode = link.mask_ethtool[1]; + ethtool_params_from_link_mode(cmd, + link.mask_ethtool[1]); } } } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 6ccca39bae84..64a8f838eb53 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -335,12 +335,11 @@ static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp, u8 inner_ecn, u8 outer_ecn) { char tidem_pl[MLXSW_REG_TIDEM_LEN]; - bool trap_en, set_ce = false; u8 new_inner_ecn; + bool trap_en; - trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce); - new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn; - + new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn, + &trap_en); mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn, trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index e5ec595593f4..9eba8fa684ae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c @@ -909,12 +909,11 @@ static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp, u8 inner_ecn, u8 outer_ecn) { char tndem_pl[MLXSW_REG_TNDEM_LEN]; - bool trap_en, set_ce = false; u8 new_inner_ecn; + bool trap_en; - trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce); - new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn; - + new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn, + &trap_en); mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn, trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 1c3e204d727c..7b6794aa8ea9 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -885,8 +885,8 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu) } mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_); - mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) & - MAC_RX_MAX_SIZE_MASK_); + mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN) + << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); lan743x_csr_write(adapter, MAC_RX, mac_rx); if (enabled) { @@ -1944,7 +1944,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index) struct sk_buff *skb; dma_addr_t dma_ptr; - buffer_length = netdev->mtu + ETH_HLEN + 4 + RX_HEAD_PADDING; + buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING; descriptor = &rx->ring_cpu_ptr[index]; buffer_info = &rx->buffer_info[index]; @@ -2040,7 +2040,7 @@ lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length) dev_kfree_skb_irq(skb); return NULL; } - frame_length = max_t(int, 0, frame_length - RX_HEAD_PADDING - 4); + frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN); if (skb->len > frame_length) { skb->tail -= skb->len - frame_length; skb->len = frame_length; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 1634ca6d4a8f..c84c8bf2bc20 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -2897,7 +2897,7 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, dev_kfree_skb_any(curr); if (segs != NULL) { curr = segs; - segs = segs->next; + segs = next; curr->next = NULL; dev_kfree_skb_any(segs); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 0e2db6ea79e9..2ec62c8d86e1 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) dev_consume_skb_any(skb); else dev_kfree_skb_any(skb); + return; } nfp_ccm_rx(&bpf->ccm, skb); diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index caf12eec9945..56833a41f3d2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -190,6 +190,7 @@ struct nfp_fl_internal_ports { * @qos_rate_limiters: Current active qos rate limiters * @qos_stats_lock: Lock on qos stats updates * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded + * @merge_table: Hash table to store merged flows */ struct nfp_flower_priv { struct nfp_app *app; @@ -223,6 +224,7 @@ struct nfp_flower_priv { unsigned int qos_rate_limiters; spinlock_t qos_stats_lock; /* Protect the qos stats */ int pre_tun_rule_cnt; + struct rhashtable merge_table; }; /** @@ -350,6 +352,12 @@ struct nfp_fl_payload_link { }; extern const struct rhashtable_params nfp_flower_table_params; +extern const struct rhashtable_params merge_table_params; + +struct nfp_merge_info { + u64 parent_ctx; + struct rhash_head ht_node; +}; struct nfp_fl_stats_frame { __be32 stats_con_id; diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 5defd31d481c..327bb56b3ef5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -327,8 +327,14 @@ int nfp_compile_flow_metadata(struct nfp_app *app, goto err_free_ctx_entry; } + /* Do net allocate a mask-id for pre_tun_rules. These flows are used to + * configure the pre_tun table and are never actually send to the + * firmware as an add-flow message. This causes the mask-id allocation + * on the firmware to get out of sync if allocated here. + */ new_mask_id = 0; - if (!nfp_check_mask_add(app, nfp_flow->mask_data, + if (!nfp_flow->pre_tun_rule.dev && + !nfp_check_mask_add(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, &nfp_flow->meta.flags, &new_mask_id)) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id"); @@ -359,7 +365,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app, goto err_remove_mask; } - if (!nfp_check_mask_remove(app, nfp_flow->mask_data, + if (!nfp_flow->pre_tun_rule.dev && + !nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, NULL, &new_mask_id)) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id"); @@ -374,8 +381,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app, return 0; err_remove_mask: - nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, - NULL, &new_mask_id); + if (!nfp_flow->pre_tun_rule.dev) + nfp_check_mask_remove(app, nfp_flow->mask_data, + nfp_flow->meta.mask_len, + NULL, &new_mask_id); err_remove_rhash: WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table, &ctx_entry->ht_node, @@ -406,9 +415,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app, __nfp_modify_flow_metadata(priv, nfp_flow); - nfp_check_mask_remove(app, nfp_flow->mask_data, - nfp_flow->meta.mask_len, &nfp_flow->meta.flags, - &new_mask_id); + if (!nfp_flow->pre_tun_rule.dev) + nfp_check_mask_remove(app, nfp_flow->mask_data, + nfp_flow->meta.mask_len, &nfp_flow->meta.flags, + &new_mask_id); /* Update flow payload with mask ids. */ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; @@ -480,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = { .automatic_shrinking = true, }; +const struct rhashtable_params merge_table_params = { + .key_offset = offsetof(struct nfp_merge_info, parent_ctx), + .head_offset = offsetof(struct nfp_merge_info, ht_node), + .key_len = sizeof(u64), +}; + int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, unsigned int host_num_mems) { @@ -496,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, if (err) goto err_free_flow_table; + err = rhashtable_init(&priv->merge_table, &merge_table_params); + if (err) + goto err_free_stats_ctx_table; + get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); /* Init ring buffer and unallocated mask_ids. */ @@ -503,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); if (!priv->mask_ids.mask_id_free_list.buf) - goto err_free_stats_ctx_table; + goto err_free_merge_table; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; @@ -540,6 +560,8 @@ err_free_last_used: kfree(priv->mask_ids.last_used); err_free_mask_id: kfree(priv->mask_ids.mask_id_free_list.buf); +err_free_merge_table: + rhashtable_destroy(&priv->merge_table); err_free_stats_ctx_table: rhashtable_destroy(&priv->stats_ctx_table); err_free_flow_table: @@ -558,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) nfp_check_rhashtable_empty, NULL); rhashtable_free_and_destroy(&priv->stats_ctx_table, nfp_check_rhashtable_empty, NULL); + rhashtable_free_and_destroy(&priv->merge_table, + nfp_check_rhashtable_empty, NULL); kvfree(priv->stats); kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.last_used); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 1c59aff2163c..e95969c462e4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1009,6 +1009,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *merge_flow; struct nfp_fl_key_ls merge_key_ls; + struct nfp_merge_info *merge_info; + u64 parent_ctx = 0; int err; ASSERT_RTNL(); @@ -1019,6 +1021,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, nfp_flower_is_merge_flow(sub_flow2)) return -EINVAL; + /* check if the two flows are already merged */ + parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32; + parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id)); + if (rhashtable_lookup_fast(&priv->merge_table, + &parent_ctx, merge_table_params)) { + nfp_flower_cmsg_warn(app, "The two flows are already merged.\n"); + return 0; + } + err = nfp_flower_can_merge(sub_flow1, sub_flow2); if (err) return err; @@ -1060,16 +1071,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, if (err) goto err_release_metadata; + merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL); + if (!merge_info) { + err = -ENOMEM; + goto err_remove_rhash; + } + merge_info->parent_ctx = parent_ctx; + err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node, + merge_table_params); + if (err) + goto err_destroy_merge_info; + err = nfp_flower_xmit_flow(app, merge_flow, NFP_FLOWER_CMSG_TYPE_FLOW_MOD); if (err) - goto err_remove_rhash; + goto err_remove_merge_info; merge_flow->in_hw = true; sub_flow1->in_hw = false; return 0; +err_remove_merge_info: + WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, + &merge_info->ht_node, + merge_table_params)); +err_destroy_merge_info: + kfree(merge_info); err_remove_rhash: WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, &merge_flow->fl_node, @@ -1142,6 +1170,12 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, return -EOPNOTSUPP; } + if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && + !(key_layer & NFP_FLOWER_LAYER_IPV6)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present"); + return -EOPNOTSUPP; + } + /* Skip fields known to exist. */ mask += sizeof(struct nfp_flower_meta_tci); ext += sizeof(struct nfp_flower_meta_tci); @@ -1152,6 +1186,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, mask += sizeof(struct nfp_flower_in_port); ext += sizeof(struct nfp_flower_in_port); + /* Ensure destination MAC address matches pre_tun_dev. */ + mac = (struct nfp_flower_mac_mpls *)ext; + if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); + return -EOPNOTSUPP; + } + /* Ensure destination MAC address is fully matched. */ mac = (struct nfp_flower_mac_mpls *)mask; if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { @@ -1159,6 +1200,11 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, return -EOPNOTSUPP; } + if (mac->mpls_lse) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported"); + return -EOPNOTSUPP; + } + mask += sizeof(struct nfp_flower_mac_mpls); ext += sizeof(struct nfp_flower_mac_mpls); if (key_layer & NFP_FLOWER_LAYER_IPV4 || @@ -1341,7 +1387,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app, { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload_link *link, *temp; + struct nfp_merge_info *merge_info; struct nfp_fl_payload *origin; + u64 parent_ctx = 0; bool mod = false; int err; @@ -1378,8 +1426,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app, err_free_links: /* Clean any links connected with the merged flow. */ list_for_each_entry_safe(link, temp, &merge_flow->linked_flows, - merge_flow.list) + merge_flow.list) { + u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id); + + parent_ctx = (parent_ctx << 32) | (u64)(ctx_id); nfp_flower_unlink_flow(link); + } + + merge_info = rhashtable_lookup_fast(&priv->merge_table, + &parent_ctx, + merge_table_params); + if (merge_info) { + WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, + &merge_info->ht_node, + merge_table_params)); + kfree(merge_info); + } kfree(merge_flow->action_data); kfree(merge_flow->mask_data); diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 7248d248f604..d19c02e99114 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -16,8 +16,9 @@ #define NFP_FL_MAX_ROUTES 32 #define NFP_TUN_PRE_TUN_RULE_LIMIT 32 -#define NFP_TUN_PRE_TUN_RULE_DEL 0x1 -#define NFP_TUN_PRE_TUN_IDX_BIT 0x8 +#define NFP_TUN_PRE_TUN_RULE_DEL BIT(0) +#define NFP_TUN_PRE_TUN_IDX_BIT BIT(3) +#define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7) /** * struct nfp_tun_pre_run_rule - rule matched before decap @@ -1268,6 +1269,7 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, { struct nfp_flower_priv *app_priv = app->priv; struct nfp_tun_offloaded_mac *mac_entry; + struct nfp_flower_meta_tci *key_meta; struct nfp_tun_pre_tun_rule payload; struct net_device *internal_dev; int err; @@ -1290,6 +1292,15 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, if (!mac_entry) return -ENOENT; + /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being + * set/clear for port_idx. + */ + key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data; + if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6) + mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT; + else + mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT; + payload.port_idx = cpu_to_be16(mac_entry->index); /* Copy mac id and vlan to flow - dev may not exist at delete time. */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 162a1ff1e9d2..4087311f7082 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -1079,15 +1079,17 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) { int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems; struct ionic_tx_stats *stats = q_to_tx_stats(q); + int ndescs; int err; - /* If TSO, need roundup(skb->len/mss) descs */ + /* Each desc is mss long max, so a descriptor for each gso_seg */ if (skb_is_gso(skb)) - return (skb->len / skb_shinfo(skb)->gso_size) + 1; + ndescs = skb_shinfo(skb)->gso_segs; + else + ndescs = 1; - /* If non-TSO, just need 1 desc and nr_frags sg elems */ if (skb_shinfo(skb)->nr_frags <= sg_elems) - return 1; + return ndescs; /* Too many frags, so linearize */ err = skb_linearize(skb); @@ -1096,8 +1098,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) stats->linearize++; - /* Need 1 desc and zero sg elems */ - return 1; + return ndescs; } static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 7760a3394e93..7ecb3dfe30bd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1425,6 +1425,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { vfree(fw_dump->tmpl_hdr); + fw_dump->tmpl_hdr = NULL; if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) extended = !qlcnic_83xx_extend_md_capab(adapter); @@ -1443,6 +1444,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) struct qlcnic_83xx_dump_template_hdr *hdr; hdr = fw_dump->tmpl_hdr; + if (!hdr) + return; hdr->drv_cap_mask = 0x1f; fw_dump->cap_mask = 0x1f; dev_info(&pdev->dev, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 7aad0ba53372..1df2c002c9f6 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2350,6 +2350,13 @@ static void rtl_jumbo_config(struct rtl8169_private *tp) if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) pcie_set_readrq(tp->pci_dev, readrq); + + /* Chip doesn't support pause in jumbo mode */ + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising, !jumbo); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising, !jumbo); + phy_start_aneg(tp->phydev); } DECLARE_RTL_COND(rtl_chipcmd_cond) @@ -4630,8 +4637,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp) if (!tp->supports_gmii) phy_set_max_speed(phydev, SPEED_100); - phy_support_asym_pause(phydev); - phy_attached_info(phydev); return 0; @@ -4646,6 +4651,9 @@ static void rtl8169_down(struct rtl8169_private *tp) rtl8169_update_counters(tp); + pci_clear_master(tp->pci_dev); + rtl_pci_commit(tp); + rtl8169_cleanup(tp, true); rtl_prepare_power_down(tp); @@ -4653,6 +4661,7 @@ static void rtl8169_down(struct rtl8169_private *tp) static void rtl8169_up(struct rtl8169_private *tp) { + pci_set_master(tp->pci_dev); phy_resume(tp->phydev); rtl8169_init_phy(tp); napi_enable(&tp->napi); @@ -5307,8 +5316,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_hw_reset(tp); - pci_set_master(pdev); - rc = rtl_alloc_irq(tp); if (rc < 0) { dev_err(&pdev->dev, "Can't allocate interrupt\n"); diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 3c53051bdacf..200785e703c8 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1715,14 +1715,17 @@ static int netsec_netdev_init(struct net_device *ndev) goto err1; /* set phy power down */ - data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) | - BMCR_PDOWN; - netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); + data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR); + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, + data | BMCR_PDOWN); ret = netsec_reset_hardware(priv, true); if (ret) goto err2; + /* Restore phy power state */ + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); + spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock); spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 6b75cf2603ff..e62efd166ec8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -1214,6 +1214,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) plat_dat->init = sun8i_dwmac_init; plat_dat->exit = sun8i_dwmac_exit; plat_dat->setup = sun8i_dwmac_setup; + plat_dat->tx_fifo_size = 4096; + plat_dat->rx_fifo_size = 16384; ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 208cae344ffa..4749bd0af160 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1380,88 +1380,6 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) } /** - * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer. - * @priv: driver private structure - * Description: this function is called to re-allocate a receive buffer, perform - * the DMA mapping and init the descriptor. - */ -static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv) -{ - u32 rx_count = priv->plat->rx_queues_to_use; - u32 queue; - int i; - - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - - for (i = 0; i < priv->dma_rx_size; i++) { - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - - if (buf->page) { - page_pool_recycle_direct(rx_q->page_pool, buf->page); - buf->page = NULL; - } - - if (priv->sph && buf->sec_page) { - page_pool_recycle_direct(rx_q->page_pool, buf->sec_page); - buf->sec_page = NULL; - } - } - } - - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - - for (i = 0; i < priv->dma_rx_size; i++) { - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - struct dma_desc *p; - - if (priv->extend_desc) - p = &((rx_q->dma_erx + i)->basic); - else - p = rx_q->dma_rx + i; - - if (!buf->page) { - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); - if (!buf->page) - goto err_reinit_rx_buffers; - - buf->addr = page_pool_get_dma_addr(buf->page); - } - - if (priv->sph && !buf->sec_page) { - buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); - if (!buf->sec_page) - goto err_reinit_rx_buffers; - - buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); - } - - stmmac_set_desc_addr(priv, p, buf->addr); - if (priv->sph) - stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); - else - stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); - if (priv->dma_buf_sz == BUF_SIZE_16KiB) - stmmac_init_desc3(priv, p); - } - } - - return; - -err_reinit_rx_buffers: - do { - while (--i >= 0) - stmmac_free_rx_buffer(priv, queue, i); - - if (queue == 0) - break; - - i = priv->dma_rx_size; - } while (queue-- > 0); -} - -/** * init_dma_rx_desc_rings - init the RX descriptor rings * @dev: net device structure * @flags: gfp flag. @@ -5428,7 +5346,7 @@ int stmmac_resume(struct device *dev) mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); - stmmac_reinit_rx_buffers(priv); + stmmac_free_tx_skbufs(priv); stmmac_clear_descriptors(priv); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 1e966a39967e..aca7f82f6791 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -504,6 +504,18 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp) return axienet_ior(lp, XAE_MDIO_MCR_OFFSET); } +static inline void axienet_lock_mii(struct axienet_local *lp) +{ + if (lp->mii_bus) + mutex_lock(&lp->mii_bus->mdio_lock); +} + +static inline void axienet_unlock_mii(struct axienet_local *lp) +{ + if (lp->mii_bus) + mutex_unlock(&lp->mii_bus->mdio_lock); +} + /** * axienet_iow - Memory mapped Axi Ethernet register write * @lp: Pointer to axienet local structure diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 3a8775e0ca55..f8f8654ea728 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1053,9 +1053,9 @@ static int axienet_open(struct net_device *ndev) * including the MDIO. MDIO must be disabled before resetting. * Hold MDIO bus lock to avoid MDIO accesses during the reset. */ - mutex_lock(&lp->mii_bus->mdio_lock); + axienet_lock_mii(lp); ret = axienet_device_reset(ndev); - mutex_unlock(&lp->mii_bus->mdio_lock); + axienet_unlock_mii(lp); ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); if (ret) { @@ -1148,9 +1148,9 @@ static int axienet_stop(struct net_device *ndev) } /* Do a reset to ensure DMA is really stopped */ - mutex_lock(&lp->mii_bus->mdio_lock); + axienet_lock_mii(lp); __axienet_device_reset(lp); - mutex_unlock(&lp->mii_bus->mdio_lock); + axienet_unlock_mii(lp); cancel_work_sync(&lp->dma_err_task); @@ -1709,9 +1709,9 @@ static void axienet_dma_err_handler(struct work_struct *work) * including the MDIO. MDIO must be disabled before resetting. * Hold MDIO bus lock to avoid MDIO accesses during the reset. */ - mutex_lock(&lp->mii_bus->mdio_lock); + axienet_lock_mii(lp); __axienet_device_reset(lp); - mutex_unlock(&lp->mii_bus->mdio_lock); + axienet_unlock_mii(lp); for (i = 0; i < lp->tx_bd_num; i++) { cur_p = &lp->tx_bd_v[i]; @@ -1880,7 +1880,7 @@ static int axienet_probe(struct platform_device *pdev) if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); ret = PTR_ERR(lp->regs); - goto free_netdev; + goto cleanup_clk; } lp->regs_start = ethres->start; @@ -1958,18 +1958,18 @@ static int axienet_probe(struct platform_device *pdev) break; default: ret = -EINVAL; - goto free_netdev; + goto cleanup_clk; } } else { ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); if (ret) - goto free_netdev; + goto cleanup_clk; } if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); ret = -EINVAL; - goto free_netdev; + goto cleanup_clk; } /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ @@ -1982,7 +1982,7 @@ static int axienet_probe(struct platform_device *pdev) dev_err(&pdev->dev, "unable to get DMA resource\n"); of_node_put(np); - goto free_netdev; + goto cleanup_clk; } lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); @@ -2002,12 +2002,12 @@ static int axienet_probe(struct platform_device *pdev) if (IS_ERR(lp->dma_regs)) { dev_err(&pdev->dev, "could not map DMA regs\n"); ret = PTR_ERR(lp->dma_regs); - goto free_netdev; + goto cleanup_clk; } if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { dev_err(&pdev->dev, "could not determine irqs\n"); ret = -ENOMEM; - goto free_netdev; + goto cleanup_clk; } /* Autodetect the need for 64-bit DMA pointers. @@ -2037,7 +2037,7 @@ static int axienet_probe(struct platform_device *pdev) ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); - goto free_netdev; + goto cleanup_clk; } /* Check for Ethernet core IRQ (optional) */ @@ -2068,12 +2068,12 @@ static int axienet_probe(struct platform_device *pdev) if (!lp->phy_node) { dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); ret = -EINVAL; - goto free_netdev; + goto cleanup_mdio; } lp->pcs_phy = of_mdio_find_device(lp->phy_node); if (!lp->pcs_phy) { ret = -EPROBE_DEFER; - goto free_netdev; + goto cleanup_mdio; } lp->phylink_config.pcs_poll = true; } @@ -2087,17 +2087,30 @@ static int axienet_probe(struct platform_device *pdev) if (IS_ERR(lp->phylink)) { ret = PTR_ERR(lp->phylink); dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); - goto free_netdev; + goto cleanup_mdio; } ret = register_netdev(lp->ndev); if (ret) { dev_err(lp->dev, "register_netdev() error (%i)\n", ret); - goto free_netdev; + goto cleanup_phylink; } return 0; +cleanup_phylink: + phylink_destroy(lp->phylink); + +cleanup_mdio: + if (lp->pcs_phy) + put_device(&lp->pcs_phy->dev); + if (lp->mii_bus) + axienet_mdio_teardown(lp); + of_node_put(lp->phy_node); + +cleanup_clk: + clk_disable_unprepare(lp->clk); + free_netdev: free_netdev(ndev); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 4ac0373326ef..42f31c681846 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -891,6 +891,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; + if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) + return -EINVAL; + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, geneve->cfg.info.key.tp_dst, sport); @@ -908,8 +911,16 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, info = skb_tunnel_info(skb); if (info) { - info->key.u.ipv4.dst = fl4.saddr; - info->key.u.ipv4.src = fl4.daddr; + struct ip_tunnel_info *unclone; + + unclone = skb_tunnel_info_unclone(skb); + if (unlikely(!unclone)) { + dst_release(&rt->dst); + return -ENOMEM; + } + + unclone->key.u.ipv4.dst = fl4.saddr; + unclone->key.u.ipv4.src = fl4.daddr; } if (!pskb_may_pull(skb, ETH_HLEN)) { @@ -977,6 +988,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; + if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) + return -EINVAL; + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, geneve->cfg.info.key.tp_dst, sport); @@ -993,8 +1007,16 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct ip_tunnel_info *info = skb_tunnel_info(skb); if (info) { - info->key.u.ipv6.dst = fl6.saddr; - info->key.u.ipv6.src = fl6.daddr; + struct ip_tunnel_info *unclone; + + unclone = skb_tunnel_info_unclone(skb); + if (unlikely(!unclone)) { + dst_release(dst); + return -ENOMEM; + } + + unclone->key.u.ipv6.dst = fl6.saddr; + unclone->key.u.ipv6.src = fl6.daddr; } if (!pskb_may_pull(skb, ETH_HLEN)) { diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 0dd0ba915ab9..23ee0b14cbfa 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n) return -ENOMEM; } usb_anchor_urb(urb, &atusb->idle_urbs); + usb_free_urb(urb); n--; } return 0; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index 35e35852c25c..d73b03a80ef8 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -175,21 +175,23 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); if (mem->offset > offset_max || ipa->mem_offset > offset_max - mem->offset) { - dev_err(dev, "IPv%c %s%s table region offset too large " - "(0x%04x + 0x%04x > 0x%04x)\n", - ipv6 ? '6' : '4', hashed ? "hashed " : "", - route ? "route" : "filter", - ipa->mem_offset, mem->offset, offset_max); + dev_err(dev, "IPv%c %s%s table region offset too large\n", + ipv6 ? '6' : '4', hashed ? "hashed " : "", + route ? "route" : "filter"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + ipa->mem_offset, mem->offset, offset_max); + return false; } if (mem->offset > ipa->mem_size || mem->size > ipa->mem_size - mem->offset) { - dev_err(dev, "IPv%c %s%s table region out of range " - "(0x%04x + 0x%04x > 0x%04x)\n", - ipv6 ? '6' : '4', hashed ? "hashed " : "", - route ? "route" : "filter", - mem->offset, mem->size, ipa->mem_size); + dev_err(dev, "IPv%c %s%s table region out of range\n", + ipv6 ? '6' : '4', hashed ? "hashed " : "", + route ? "route" : "filter"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + mem->offset, mem->size, ipa->mem_size); + return false; } @@ -205,22 +207,36 @@ static bool ipa_cmd_header_valid(struct ipa *ipa) u32 size_max; u32 size; + /* In ipa_cmd_hdr_init_local_add() we record the offset and size + * of the header table memory area. Make sure the offset and size + * fit in the fields that need to hold them, and that the entire + * range is within the overall IPA memory range. + */ offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK); if (mem->offset > offset_max || ipa->mem_offset > offset_max - mem->offset) { - dev_err(dev, "header table region offset too large " - "(0x%04x + 0x%04x > 0x%04x)\n", - ipa->mem_offset + mem->offset, offset_max); + dev_err(dev, "header table region offset too large\n"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + ipa->mem_offset, mem->offset, offset_max); + return false; } size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK); size = ipa->mem[IPA_MEM_MODEM_HEADER].size; size += ipa->mem[IPA_MEM_AP_HEADER].size; - if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) { - dev_err(dev, "header table region out of range " - "(0x%04x + 0x%04x > 0x%04x)\n", - mem->offset, size, ipa->mem_size); + + if (size > size_max) { + dev_err(dev, "header table region size too large\n"); + dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max); + + return false; + } + if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) { + dev_err(dev, "header table region out of range\n"); + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", + mem->offset, size, ipa->mem_size); + return false; } diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index 2fc64483f275..e594bf3b600f 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -249,6 +249,7 @@ static const struct qmi_msg_handler ipa_server_msg_handlers[] = { .decoded_size = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ, .fn = ipa_server_driver_init_complete, }, + { }, }; /* Handle an INIT_DRIVER response message from the modem. */ @@ -269,6 +270,7 @@ static const struct qmi_msg_handler ipa_client_msg_handlers[] = { .decoded_size = IPA_QMI_INIT_DRIVER_RSP_SZ, .fn = ipa_client_init_driver, }, + { }, }; /* Return a pointer to an init modem driver request structure, which contains diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 53282a6d5928..287cccf8f7f4 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd); int bcm_phy_set_eee(struct phy_device *phydev, bool enable) { - int val; + int val, mask = 0; /* Enable EEE at PHY level */ val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL); @@ -388,10 +388,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable) if (val < 0) return val; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported)) + mask |= MDIO_EEE_1000T; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phydev->supported)) + mask |= MDIO_EEE_100TX; + if (enable) - val |= (MDIO_EEE_100TX | MDIO_EEE_1000T); + val |= mask; else - val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T); + val &= ~mask; phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index fa0be591ae79..82fe5f43f0e9 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -342,6 +342,10 @@ static int bcm54xx_config_init(struct phy_device *phydev) bcm54xx_adjust_rxrefclk(phydev); switch (BRCM_PHY_MODEL(phydev)) { + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + err = bcm54xx_config_clock_delay(phydev); + break; case PHY_ID_BCM54210E: err = bcm54210e_config_init(phydev); break; @@ -399,6 +403,11 @@ static int bcm54xx_resume(struct phy_device *phydev) if (ret < 0) return ret; + /* Upon exiting power down, the PHY remains in an internal reset state + * for 40us + */ + fsleep(40); + return bcm54xx_config_init(phydev); } diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index e26a5d663f8a..8018ddf7f316 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -3021,9 +3021,34 @@ static struct phy_driver marvell_drivers[] = { .get_stats = marvell_get_stats, }, { - .phy_id = MARVELL_PHY_ID_88E6390, + .phy_id = MARVELL_PHY_ID_88E6341_FAMILY, .phy_id_mask = MARVELL_PHY_ID_MASK, - .name = "Marvell 88E6390", + .name = "Marvell 88E6341 Family", + /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, + .probe = m88e1510_probe, + .config_init = marvell_config_init, + .config_aneg = m88e6390_config_aneg, + .read_status = marvell_read_status, + .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, + .cable_test_start = marvell_vct7_cable_test_start, + .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start, + .cable_test_get_status = marvell_vct7_cable_test_get_status, + }, + { + .phy_id = MARVELL_PHY_ID_88E6390_FAMILY, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E6390 Family", /* PHY_GBIT_FEATURES */ .flags = PHY_POLL_CABLE_TEST, .probe = m88e6390_probe, @@ -3107,7 +3132,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, - { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK }, { } diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 053c92e02cd8..dc2800beacc3 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -476,7 +476,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode, state->interface); if (err < 0) - phylink_err(pl, "mac_prepare failed: %pe\n", + phylink_err(pl, "mac_finish failed: %pe\n", ERR_PTR(err)); } } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index fc86da7f1628..4cf38be26dc9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -69,6 +69,14 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/mutex.h> +#include <linux/ieee802154.h> +#include <linux/if_ltalk.h> +#include <uapi/linux/if_fddi.h> +#include <uapi/linux/if_hippi.h> +#include <uapi/linux/if_fc.h> +#include <net/ax25.h> +#include <net/rose.h> +#include <net/6lowpan.h> #include <linux/uaccess.h> #include <linux/proc_fs.h> @@ -2919,6 +2927,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, return __tun_set_ebpf(tun, prog_p, prog); } +/* Return correct value for tun->dev->addr_len based on tun->dev->type. */ +static unsigned char tun_get_addr_len(unsigned short type) +{ + switch (type) { + case ARPHRD_IP6GRE: + case ARPHRD_TUNNEL6: + return sizeof(struct in6_addr); + case ARPHRD_IPGRE: + case ARPHRD_TUNNEL: + case ARPHRD_SIT: + return 4; + case ARPHRD_ETHER: + return ETH_ALEN; + case ARPHRD_IEEE802154: + case ARPHRD_IEEE802154_MONITOR: + return IEEE802154_EXTENDED_ADDR_LEN; + case ARPHRD_PHONET_PIPE: + case ARPHRD_PPP: + case ARPHRD_NONE: + return 0; + case ARPHRD_6LOWPAN: + return EUI64_ADDR_LEN; + case ARPHRD_FDDI: + return FDDI_K_ALEN; + case ARPHRD_HIPPI: + return HIPPI_ALEN; + case ARPHRD_IEEE802: + return FC_ALEN; + case ARPHRD_ROSE: + return ROSE_ADDR_LEN; + case ARPHRD_NETROM: + return AX25_ADDR_LEN; + case ARPHRD_LOCALTLK: + return LTALK_ALEN; + default: + return 0; + } +} + static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { @@ -3082,6 +3129,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, break; } tun->dev->type = (int) arg; + tun->dev->addr_len = tun_get_addr_len(tun->dev->type); netif_info(tun, drv, tun->dev, "linktype set to %d\n", tun->dev->type); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 02e6bbb17b15..8d1f69dad603 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -387,6 +387,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i err = register_netdev(dev); if (err) { + /* Set disconnected flag so that disconnect() returns early. */ + pnd->disconnected = 1; usb_driver_release_interface(&usbpn_driver, data_intf); goto out; } diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 31d51346786a..9bc58e64b5b7 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -611,7 +611,7 @@ static struct hso_serial *get_serial_by_index(unsigned index) return serial; } -static int get_free_serial_index(void) +static int obtain_minor(struct hso_serial *serial) { int index; unsigned long flags; @@ -619,8 +619,10 @@ static int get_free_serial_index(void) spin_lock_irqsave(&serial_table_lock, flags); for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { if (serial_table[index] == NULL) { + serial_table[index] = serial->parent; + serial->minor = index; spin_unlock_irqrestore(&serial_table_lock, flags); - return index; + return 0; } } spin_unlock_irqrestore(&serial_table_lock, flags); @@ -629,15 +631,12 @@ static int get_free_serial_index(void) return -1; } -static void set_serial_by_index(unsigned index, struct hso_serial *serial) +static void release_minor(struct hso_serial *serial) { unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); - if (serial) - serial_table[index] = serial->parent; - else - serial_table[index] = NULL; + serial_table[serial->minor] = NULL; spin_unlock_irqrestore(&serial_table_lock, flags); } @@ -2230,6 +2229,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev) static void hso_serial_tty_unregister(struct hso_serial *serial) { tty_unregister_device(tty_drv, serial->minor); + release_minor(serial); } static void hso_serial_common_free(struct hso_serial *serial) @@ -2253,24 +2253,22 @@ static void hso_serial_common_free(struct hso_serial *serial) static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, int rx_size, int tx_size) { - int minor; int i; tty_port_init(&serial->port); - minor = get_free_serial_index(); - if (minor < 0) + if (obtain_minor(serial)) goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, - tty_drv, minor, &serial->parent->interface->dev, + tty_drv, serial->minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); - if (IS_ERR(serial->parent->dev)) + if (IS_ERR(serial->parent->dev)) { + release_minor(serial); goto exit2; + } - /* fill in specific data for later use */ - serial->minor = minor; serial->magic = HSO_SERIAL_MAGIC; spin_lock_init(&serial->serial_lock); serial->num_rx_urbs = num_urbs; @@ -2667,9 +2665,6 @@ static struct hso_device *hso_create_bulk_serial_device( serial->write_data = hso_std_serial_write_data; - /* and record this serial */ - set_serial_by_index(serial->minor, serial); - /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); @@ -2726,9 +2721,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface, serial->shared_int->ref_count++; mutex_unlock(&serial->shared_int->shared_int_lock); - /* and record this serial */ - set_serial_by_index(serial->minor, serial); - /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); @@ -3113,7 +3105,6 @@ static void hso_free_interface(struct usb_interface *interface) cancel_work_sync(&serial_table[i]->async_get_intf); hso_serial_tty_unregister(serial); kref_put(&serial_table[i]->ref, hso_serial_ref_free); - set_serial_by_index(i, NULL); } } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 90f1c0200042..20fb5638ac65 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -6553,7 +6553,10 @@ static int rtl_ops_init(struct r8152 *tp) ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; - tp->rx_buf_sz = 32 * 1024; + if (tp->udev->speed < USB_SPEED_SUPER) + tp->rx_buf_sz = 16 * 1024; + else + tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index aa1a66ad2ce5..34e49c75db42 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -302,8 +302,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) if (rxq < rcv->real_num_rx_queues) { rq = &rcv_priv->rq[rxq]; rcv_xdp = rcu_access_pointer(rq->xdp_prog); - if (rcv_xdp) - skb_record_rx_queue(skb, rxq); + skb_record_rx_queue(skb, rxq); } skb_tx_timestamp(skb); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 82e520d2cb12..0824e6999e49 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -406,9 +406,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, offset += hdr_padded_len; p += hdr_padded_len; - copy = len; - if (copy > skb_tailroom(skb)) - copy = skb_tailroom(skb); + /* Copy all frame if it fits skb->head, otherwise + * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. + */ + if (len <= skb_tailroom(skb)) + copy = len; + else + copy = ETH_HLEN + metasize; skb_put_data(skb, p, copy); if (metasize) { diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 6d9130859c55..503e2fd7ce51 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -471,9 +471,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, skb_dst_drop(skb); - /* if dst.dev is loopback or the VRF device again this is locally - * originated traffic destined to a local address. Short circuit - * to Rx path + /* if dst.dev is the VRF device again this is locally originated traffic + * destined to a local address. Short circuit to Rx path. */ if (dst->dev == dev) return vrf_local_xmit(skb, dev, dst); @@ -547,9 +546,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, skb_dst_drop(skb); - /* if dst.dev is loopback or the VRF device again this is locally - * originated traffic destined to a local address. Short circuit - * to Rx path + /* if dst.dev is the VRF device again this is locally originated traffic + * destined to a local address. Short circuit to Rx path. */ if (rt->dst.dev == vrf_dev) return vrf_local_xmit(skb, vrf_dev, &rt->dst); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 666dd201c3d5..53dbc67e8a34 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2725,12 +2725,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto tx_error; } else if (err) { if (info) { + struct ip_tunnel_info *unclone; struct in_addr src, dst; + unclone = skb_tunnel_info_unclone(skb); + if (unlikely(!unclone)) + goto tx_error; + src = remote_ip.sin.sin_addr; dst = local_ip.sin.sin_addr; - info->key.u.ipv4.src = src.s_addr; - info->key.u.ipv4.dst = dst.s_addr; + unclone->key.u.ipv4.src = src.s_addr; + unclone->key.u.ipv4.dst = dst.s_addr; } vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); dst_release(ndst); @@ -2781,12 +2786,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto tx_error; } else if (err) { if (info) { + struct ip_tunnel_info *unclone; struct in6_addr src, dst; + unclone = skb_tunnel_info_unclone(skb); + if (unlikely(!unclone)) + goto tx_error; + src = remote_ip.sin6.sin6_addr; dst = local_ip.sin6.sin6_addr; - info->key.u.ipv6.src = src; - info->key.u.ipv6.dst = dst; + unclone->key.u.ipv6.src = src; + unclone->key.u.ipv6.dst = dst; } vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 0720f5f92caa..4d9dc7d15908 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) if (pad > 0) { /* Pad the frame with zeros */ if (__skb_pad(skb, pad, false)) - goto drop; + goto out; skb_put(skb, pad); } } @@ -448,8 +448,9 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; drop: - dev->stats.tx_dropped++; kfree_skb(skb); +out: + dev->stats.tx_dropped++; return NETDEV_TX_OK; } diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index 4aaa6388b9ee..5a6a945f6c81 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -23,6 +23,8 @@ struct x25_state { x25_hdlc_proto settings; + bool up; + spinlock_t up_lock; /* Protects "up" */ }; static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); @@ -104,6 +106,8 @@ static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb) static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) { + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); int result; /* There should be a pseudo header of 1 byte added by upper layers. @@ -114,11 +118,19 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } + spin_lock_bh(&x25st->up_lock); + if (!x25st->up) { + spin_unlock_bh(&x25st->up_lock); + kfree_skb(skb); + return NETDEV_TX_OK; + } + switch (skb->data[0]) { case X25_IFACE_DATA: /* Data to be transmitted */ skb_pull(skb, 1); if ((result = lapb_data_request(dev, skb)) != LAPB_OK) dev_kfree_skb(skb); + spin_unlock_bh(&x25st->up_lock); return NETDEV_TX_OK; case X25_IFACE_CONNECT: @@ -147,6 +159,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) break; } + spin_unlock_bh(&x25st->up_lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -164,6 +177,7 @@ static int x25_open(struct net_device *dev) .data_transmit = x25_data_transmit, }; hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); struct lapb_parms_struct params; int result; @@ -190,6 +204,10 @@ static int x25_open(struct net_device *dev) if (result != LAPB_OK) return -EINVAL; + spin_lock_bh(&x25st->up_lock); + x25st->up = true; + spin_unlock_bh(&x25st->up_lock); + return 0; } @@ -197,6 +215,13 @@ static int x25_open(struct net_device *dev) static void x25_close(struct net_device *dev) { + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); + + spin_lock_bh(&x25st->up_lock); + x25st->up = false; + spin_unlock_bh(&x25st->up_lock); + lapb_unregister(dev); } @@ -205,15 +230,28 @@ static void x25_close(struct net_device *dev) static int x25_rx(struct sk_buff *skb) { struct net_device *dev = skb->dev; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct x25_state *x25st = state(hdlc); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { dev->stats.rx_dropped++; return NET_RX_DROP; } - if (lapb_data_received(dev, skb) == LAPB_OK) + spin_lock_bh(&x25st->up_lock); + if (!x25st->up) { + spin_unlock_bh(&x25st->up_lock); + kfree_skb(skb); + dev->stats.rx_dropped++; + return NET_RX_DROP; + } + + if (lapb_data_received(dev, skb) == LAPB_OK) { + spin_unlock_bh(&x25st->up_lock); return NET_RX_SUCCESS; + } + spin_unlock_bh(&x25st->up_lock); dev->stats.rx_errors++; dev_kfree_skb_any(skb); return NET_RX_DROP; @@ -298,6 +336,8 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) return result; memcpy(&state(hdlc)->settings, &new_settings, size); + state(hdlc)->up = false; + spin_lock_init(&state(hdlc)->up_lock); /* There's no header_ops so hard_header_len should be 0. */ dev->hard_header_len = 0; diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 71e2ada86793..72e2e71aac0e 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -251,7 +251,7 @@ void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc) int first_slot = ATH_BCBUF; int slot; - tasklet_disable(&sc->bcon_tasklet); + tasklet_disable_in_atomic(&sc->bcon_tasklet); /* Find first taken slot. */ for (slot = 0; slot < ATH_BCBUF; slot++) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 6d30a0fcecea..34cd8a7401fe 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -2439,7 +2439,7 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool locked) vif = ifp->vif; cfg = wdev_to_cfg(&vif->wdev); cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; - if (locked) { + if (!locked) { rtnl_lock(); wiphy_lock(cfg->wiphy); cfg80211_unregister_wdev(&vif->wdev); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c index 3dbc6f3f92cc..231d2517f398 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014 Intel Corporation + * Copyright (C) 2005-2014, 2021 Intel Corporation * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include <linux/sched.h> @@ -26,7 +26,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait, if (!list_empty(¬if_wait->notif_waits)) { struct iwl_notification_wait *w; - spin_lock(¬if_wait->notif_wait_lock); + spin_lock_bh(¬if_wait->notif_wait_lock); list_for_each_entry(w, ¬if_wait->notif_waits, list) { int i; bool found = false; @@ -59,7 +59,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait, triggered = true; } } - spin_unlock(¬if_wait->notif_wait_lock); + spin_unlock_bh(¬if_wait->notif_wait_lock); } return triggered; @@ -70,10 +70,10 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) { struct iwl_notification_wait *wait_entry; - spin_lock(¬if_wait->notif_wait_lock); + spin_lock_bh(¬if_wait->notif_wait_lock); list_for_each_entry(wait_entry, ¬if_wait->notif_waits, list) wait_entry->aborted = true; - spin_unlock(¬if_wait->notif_wait_lock); + spin_unlock_bh(¬if_wait->notif_wait_lock); wake_up_all(¬if_wait->notif_waitq); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 75f99ff7f908..c4f5da76f1c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -414,6 +414,7 @@ struct iwl_cfg { #define IWL_CFG_MAC_TYPE_QNJ 0x36 #define IWL_CFG_MAC_TYPE_SO 0x37 #define IWL_CFG_MAC_TYPE_SNJ 0x42 +#define IWL_CFG_MAC_TYPE_SOF 0x43 #define IWL_CFG_MAC_TYPE_MA 0x44 #define IWL_CFG_RF_TYPE_TH 0x105 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index af684f80b0cc..c5a1e84dc1ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -232,7 +232,7 @@ enum iwl_reg_capa_flags_v2 { REG_CAPA_V2_MCS_9_ALLOWED = BIT(6), REG_CAPA_V2_WEATHER_DISABLED = BIT(7), REG_CAPA_V2_40MHZ_ALLOWED = BIT(8), - REG_CAPA_V2_11AX_DISABLED = BIT(13), + REG_CAPA_V2_11AX_DISABLED = BIT(10), }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 130760572262..34ddef97b099 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1786,10 +1786,13 @@ static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf, return -EINVAL; /* value zero triggers re-sending the default table to the device */ - if (!op_id) + if (!op_id) { + mutex_lock(&mvm->mutex); ret = iwl_rfi_send_config_cmd(mvm, NULL); - else + mutex_unlock(&mvm->mutex); + } else { ret = -EOPNOTSUPP; /* in the future a new table will be added */ + } return ret ?: count; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c index 873919048143..0b818067067c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2020 Intel Corporation + * Copyright (C) 2020 - 2021 Intel Corporation */ #include "mvm.h" @@ -66,6 +66,8 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) return -EOPNOTSUPP; + lockdep_assert_held(&mvm->mutex); + /* in case no table is passed, use the default one */ if (!rfi_table) { memcpy(cmd.table, iwl_rfi_table, sizeof(cmd.table)); @@ -75,9 +77,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t cmd.oem = 1; } - mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &hcmd); - mutex_unlock(&mvm->mutex); if (ret) IWL_ERR(mvm, "Failed to send RFI config cmd %d\n", ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index c21736f80c29..af5a6dd81c41 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -272,10 +272,10 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, rx_status->chain_signal[2] = S8_MIN; } -static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta, - struct ieee80211_hdr *hdr, - struct iwl_rx_mpdu_desc *desc, - u32 status) +static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, + struct ieee80211_hdr *hdr, + struct iwl_rx_mpdu_desc *desc, + u32 status) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_vif *mvmvif; @@ -285,6 +285,9 @@ static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta, u32 len = le16_to_cpu(desc->mpdu_len); const u8 *frame = (void *)hdr; + if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE) + return 0; + /* * For non-beacon, we don't really care. But beacons may * be filtered out, and we thus need the firmware's replay @@ -356,6 +359,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) return -1; + if (unlikely(ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_has_protected(hdr->frame_control))) + return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status); + if (!ieee80211_has_protected(hdr->frame_control) || (status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE) @@ -411,7 +418,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, stats->flag |= RX_FLAG_DECRYPTED; return 0; case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC: - return iwl_mvm_rx_mgmt_crypto(sta, hdr, desc, status); + break; default: /* * Sometimes we can get frames that were not decrypted diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 8fba190e84cf..cecc32e7dbe8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include "iwl-trans.h" #include "iwl-fh.h" @@ -75,15 +75,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, const struct fw_img *fw) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | - u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, - CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) | - u32_encode_bits(250, - CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) | - CSR_LTR_LONG_VAL_AD_SNOOP_REQ | - u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, - CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) | - u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL); struct iwl_context_info_gen3 *ctxt_info_gen3; struct iwl_prph_scratch *prph_scratch; struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; @@ -217,26 +208,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA); - /* - * To workaround hardware latency issues during the boot process, - * initialize the LTR to ~250 usec (see ltr_val above). - * The firmware initializes this again later (to a smaller value). - */ - if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || - trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) && - !trans->trans_cfg->integrated) { - iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val); - } else if (trans->trans_cfg->integrated && - trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { - iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL); - iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val); - } - - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); - else - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT); - return 0; err_free_ctxt_info: diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index d1bb273d6b6d..74ce31fdf45e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include "iwl-trans.h" #include "iwl-fh.h" @@ -240,7 +240,6 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, /* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); - iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); /* Context info will be released upon alive or failure to get one */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index ffaf973dae94..558a0b2ef0fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -592,6 +592,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL), /* So with HR */ IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL), @@ -1040,7 +1041,31 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_hr_a0, iwl_ax201_name) + iwl_cfg_so_a0_hr_a0, iwl_ax201_name), + +/* So-F with Hr */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_so_a0_hr_a0, iwl_ax203_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_so_a0_hr_a0, iwl_ax101_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_so_a0_hr_a0, iwl_ax201_name), + +/* So-F with Gf */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), #endif /* CONFIG_IWLMVM */ }; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 497ef3405da3..94ffc1ae484d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -266,6 +266,34 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) mutex_unlock(&trans_pcie->mutex); } +static void iwl_pcie_set_ltr(struct iwl_trans *trans) +{ + u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | + u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, + CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) | + u32_encode_bits(250, + CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) | + CSR_LTR_LONG_VAL_AD_SNOOP_REQ | + u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, + CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) | + u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL); + + /* + * To workaround hardware latency issues during the boot process, + * initialize the LTR to ~250 usec (see ltr_val above). + * The firmware initializes this again later (to a smaller value). + */ + if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || + trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) && + !trans->trans_cfg->integrated) { + iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val); + } else if (trans->trans_cfg->integrated && + trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { + iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL); + iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val); + } +} + int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { @@ -332,6 +360,13 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, if (ret) goto out; + iwl_pcie_set_ltr(trans); + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); + else + iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); + /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 381e8f90b6f2..7ae32491b5da 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -928,6 +928,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, u32 cmd_pos; const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; + unsigned long flags; if (WARN(!trans->wide_cmd_header && group_id > IWL_ALWAYS_LONG_GROUP, @@ -1011,10 +1012,10 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - spin_lock_bh(&txq->lock); + spin_lock_irqsave(&txq->lock, flags); if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { - spin_unlock_bh(&txq->lock); + spin_unlock_irqrestore(&txq->lock, flags); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); @@ -1174,7 +1175,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, unlock_reg: spin_unlock(&trans_pcie->reg_lock); out: - spin_unlock_bh(&txq->lock); + spin_unlock_irqrestore(&txq->lock, flags); free_dup_buf: if (idx < 0) kfree(dup_buf); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h index 18980bb32dee..6dad7f6ab09d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h @@ -135,10 +135,10 @@ #define MT_WTBLON_TOP_BASE 0x34000 #define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs)) -#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0) +#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x200) #define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0) -#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030) +#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x230) #define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0) #define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12) #define MT_WTBL_UPDATE_BUSY BIT(31) diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c index c878097f0dda..1df959532c7d 100644 --- a/drivers/net/wireless/virt_wifi.c +++ b/drivers/net/wireless/virt_wifi.c @@ -12,6 +12,7 @@ #include <net/cfg80211.h> #include <net/rtnetlink.h> #include <linux/etherdevice.h> +#include <linux/math64.h> #include <linux/module.h> static struct wiphy *common_wiphy; @@ -168,11 +169,11 @@ static void virt_wifi_scan_result(struct work_struct *work) scan_result.work); struct wiphy *wiphy = priv_to_wiphy(priv); struct cfg80211_scan_info scan_info = { .aborted = false }; + u64 tsf = div_u64(ktime_get_boottime_ns(), 1000); informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz, CFG80211_BSS_FTYPE_PRESP, - fake_router_bssid, - ktime_get_boottime_ns(), + fake_router_bssid, tsf, WLAN_CAPABILITY_ESS, 0, (void *)&ssid, sizeof(ssid), DBM_TO_MBM(-50), GFP_KERNEL); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index a5439c130130..d24b7a7993aa 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -824,11 +824,15 @@ static void connect(struct backend_info *be) xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, - hotplug_status_changed, - "%s/%s", dev->nodename, "hotplug-status"); - if (!err) + if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, + NULL, hotplug_status_changed, + "%s/%s", dev->nodename, + "hotplug-status"); + if (err) + goto err; be->have_hotplug_status_watch = 1; + } netif_tx_wake_all_queues(be->vif->dev); diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 48f0985ca8a0..3a777d0073b7 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -631,16 +631,14 @@ void nvdimm_check_and_set_ro(struct gendisk *disk) struct nd_region *nd_region = to_nd_region(dev->parent); int disk_ro = get_disk_ro(disk); - /* - * Upgrade to read-only if the region is read-only preserve as - * read-only if the disk is already read-only. - */ - if (disk_ro || nd_region->ro == disk_ro) + /* catch the disk up with the region ro state */ + if (disk_ro == nd_region->ro) return; - dev_info(dev, "%s read-only, marking %s read-only\n", - dev_name(&nd_region->dev), disk->disk_name); - set_disk_ro(disk, 1); + dev_info(dev, "%s read-%s, marking %s read-%s\n", + dev_name(&nd_region->dev), nd_region->ro ? "only" : "write", + disk->disk_name, nd_region->ro ? "only" : "write"); + set_disk_ro(disk, nd_region->ro); } EXPORT_SYMBOL(nvdimm_check_and_set_ro); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index b8a85bfb2e95..7daac795db39 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -26,6 +26,7 @@ #include <linux/mm.h> #include <asm/cacheflush.h> #include "pmem.h" +#include "btt.h" #include "pfn.h" #include "nd.h" @@ -585,7 +586,7 @@ static void nd_pmem_shutdown(struct device *dev) nvdimm_flush(to_nd_region(dev->parent), NULL); } -static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) +static void pmem_revalidate_poison(struct device *dev) { struct nd_region *nd_region; resource_size_t offset = 0, end_trunc = 0; @@ -595,9 +596,6 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) struct range range; struct kernfs_node *bb_state; - if (event != NVDIMM_REVALIDATE_POISON) - return; - if (is_nd_btt(dev)) { struct nd_btt *nd_btt = to_nd_btt(dev); @@ -635,6 +633,37 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) sysfs_notify_dirent(bb_state); } +static void pmem_revalidate_region(struct device *dev) +{ + struct pmem_device *pmem; + + if (is_nd_btt(dev)) { + struct nd_btt *nd_btt = to_nd_btt(dev); + struct btt *btt = nd_btt->btt; + + nvdimm_check_and_set_ro(btt->btt_disk); + return; + } + + pmem = dev_get_drvdata(dev); + nvdimm_check_and_set_ro(pmem->disk); +} + +static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) +{ + switch (event) { + case NVDIMM_REVALIDATE_POISON: + pmem_revalidate_poison(dev); + break; + case NVDIMM_REVALIDATE_REGION: + pmem_revalidate_region(dev); + break; + default: + dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event); + break; + } +} + MODULE_ALIAS("pmem"); MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index ef23119db574..9ccf3d608799 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -518,6 +518,12 @@ static ssize_t read_only_show(struct device *dev, return sprintf(buf, "%d\n", nd_region->ro); } +static int revalidate_read_only(struct device *dev, void *data) +{ + nd_device_notify(dev, NVDIMM_REVALIDATE_REGION); + return 0; +} + static ssize_t read_only_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { @@ -529,6 +535,7 @@ static ssize_t read_only_store(struct device *dev, return rc; nd_region->ro = ro; + device_for_each_child(dev, NULL, revalidate_read_only); return len; } static DEVICE_ATTR_RW(read_only); @@ -1239,6 +1246,11 @@ int nvdimm_has_flush(struct nd_region *nd_region) || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) return -ENXIO; + /* Test if an explicit flush function is defined */ + if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush) + return 1; + + /* Test if any flush hints for the region are available */ for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; @@ -1249,8 +1261,8 @@ int nvdimm_has_flush(struct nd_region *nd_region) } /* - * The platform defines dimm devices without hints, assume - * platform persistence mechanism like ADR + * The platform defines dimm devices without hints nor explicit flush, + * assume platform persistence mechanism like ADR */ return 0; } diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index dcc1dd96911a..adb26aff481d 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -205,7 +205,7 @@ static void populate_properties(const void *blob, *pprev = NULL; } -static bool populate_node(const void *blob, +static int populate_node(const void *blob, int offset, void **mem, struct device_node *dad, @@ -214,24 +214,24 @@ static bool populate_node(const void *blob, { struct device_node *np; const char *pathp; - unsigned int l, allocl; + int len; - pathp = fdt_get_name(blob, offset, &l); + pathp = fdt_get_name(blob, offset, &len); if (!pathp) { *pnp = NULL; - return false; + return len; } - allocl = ++l; + len++; - np = unflatten_dt_alloc(mem, sizeof(struct device_node) + allocl, + np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len, __alignof__(struct device_node)); if (!dryrun) { char *fn; of_node_init(np); np->full_name = fn = ((char *)np) + sizeof(*np); - memcpy(fn, pathp, l); + memcpy(fn, pathp, len); if (dad != NULL) { np->parent = dad; @@ -295,6 +295,7 @@ static int unflatten_dt_nodes(const void *blob, struct device_node *nps[FDT_MAX_DEPTH]; void *base = mem; bool dryrun = !base; + int ret; if (nodepp) *nodepp = NULL; @@ -322,9 +323,10 @@ static int unflatten_dt_nodes(const void *blob, !of_fdt_device_is_available(blob, offset)) continue; - if (!populate_node(blob, offset, &mem, nps[depth], - &nps[depth+1], dryrun)) - return mem - base; + ret = populate_node(blob, offset, &mem, nps[depth], + &nps[depth+1], dryrun); + if (ret < 0) + return ret; if (!dryrun && nodepp && !*nodepp) *nodepp = nps[depth+1]; @@ -372,6 +374,10 @@ void *__unflatten_device_tree(const void *blob, { int size; void *mem; + int ret; + + if (mynodes) + *mynodes = NULL; pr_debug(" -> unflatten_device_tree()\n"); @@ -392,7 +398,7 @@ void *__unflatten_device_tree(const void *blob, /* First pass, scan for size */ size = unflatten_dt_nodes(blob, NULL, dad, NULL); - if (size < 0) + if (size <= 0) return NULL; size = ALIGN(size, 4); @@ -410,12 +416,16 @@ void *__unflatten_device_tree(const void *blob, pr_debug(" unflattening %p...\n", mem); /* Second pass, do actual unflattening */ - unflatten_dt_nodes(blob, mem, dad, mynodes); + ret = unflatten_dt_nodes(blob, mem, dad, mynodes); + if (be32_to_cpup(mem + size) != 0xdeadbeef) pr_warn("End of tree marker overwritten: %08x\n", be32_to_cpup(mem + size)); - if (detached && mynodes) { + if (ret <= 0) + return NULL; + + if (detached && mynodes && *mynodes) { of_node_set_flag(*mynodes, OF_DETACHED); pr_debug("unflattened tree is detached\n"); } diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index d9e6a324de0a..d717efbd637d 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -8,6 +8,8 @@ * Copyright (C) 1996-2005 Paul Mackerras. */ +#define FDT_ALIGN_SIZE 8 + /** * struct alias_prop - Alias property in 'aliases' node * @link: List node to link the structure in aliases_lookup list diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 50bbe0edf538..23effe5e50ec 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -57,7 +57,7 @@ struct fragment { * struct overlay_changeset * @id: changeset identifier * @ovcs_list: list on which we are located - * @fdt: FDT that was unflattened to create @overlay_tree + * @fdt: base of memory allocated to hold aligned FDT that was unflattened to create @overlay_tree * @overlay_tree: expanded device tree that contains the fragment nodes * @count: count of fragment structures * @fragments: fragment nodes in the overlay expanded device tree @@ -719,8 +719,8 @@ static struct device_node *find_target(struct device_node *info_node) /** * init_overlay_changeset() - initialize overlay changeset from overlay tree * @ovcs: Overlay changeset to build - * @fdt: the FDT that was unflattened to create @tree - * @tree: Contains all the overlay fragments and overlay fixup nodes + * @fdt: base of memory allocated to hold aligned FDT that was unflattened to create @tree + * @tree: Contains the overlay fragments and overlay fixup nodes * * Initialize @ovcs. Populate @ovcs->fragments with node information from * the top level of @tree. The relevant top level nodes are the fragment @@ -873,7 +873,7 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs) * internal documentation * * of_overlay_apply() - Create and apply an overlay changeset - * @fdt: the FDT that was unflattened to create @tree + * @fdt: base of memory allocated to hold the aligned FDT * @tree: Expanded overlay device tree * @ovcs_id: Pointer to overlay changeset id * @@ -953,7 +953,9 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree, /* * after overlay_notify(), ovcs->overlay_tree related pointers may have * leaked to drivers, so can not kfree() tree, aka ovcs->overlay_tree; - * and can not free fdt, aka ovcs->fdt + * and can not free memory containing aligned fdt. The aligned fdt + * is contained within the memory at ovcs->fdt, possibly at an offset + * from ovcs->fdt. */ ret = overlay_notify(ovcs, OF_OVERLAY_PRE_APPLY); if (ret) { @@ -1014,10 +1016,11 @@ out: int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, int *ovcs_id) { - const void *new_fdt; + void *new_fdt; + void *new_fdt_align; int ret; u32 size; - struct device_node *overlay_root; + struct device_node *overlay_root = NULL; *ovcs_id = 0; ret = 0; @@ -1036,11 +1039,14 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, * Must create permanent copy of FDT because of_fdt_unflatten_tree() * will create pointers to the passed in FDT in the unflattened tree. */ - new_fdt = kmemdup(overlay_fdt, size, GFP_KERNEL); + new_fdt = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL); if (!new_fdt) return -ENOMEM; - of_fdt_unflatten_tree(new_fdt, NULL, &overlay_root); + new_fdt_align = PTR_ALIGN(new_fdt, FDT_ALIGN_SIZE); + memcpy(new_fdt_align, overlay_fdt, size); + + of_fdt_unflatten_tree(new_fdt_align, NULL, &overlay_root); if (!overlay_root) { pr_err("unable to unflatten overlay_fdt\n"); ret = -EINVAL; diff --git a/drivers/of/property.c b/drivers/of/property.c index 5036a362f52e..78427c85bef3 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -1262,7 +1262,16 @@ DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL) DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL) DEFINE_SUFFIX_PROP(regulators, "-supply", NULL) DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells") -DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells") + +static struct device_node *parse_gpios(struct device_node *np, + const char *prop_name, int index) +{ + if (!strcmp_suffix(prop_name, ",nr-gpios")) + return NULL; + + return parse_suffix_prop_cells(np, prop_name, index, "-gpios", + "#gpio-cells"); +} static struct device_node *parse_iommu_maps(struct device_node *np, const char *prop_name, int index) diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index eb100627c186..819a20acaa93 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/device.h> #include <linux/platform_device.h> +#include <linux/kernel.h> #include <linux/i2c.h> #include <linux/i2c-mux.h> @@ -1408,7 +1409,8 @@ static void attach_node_and_children(struct device_node *np) static int __init unittest_data_add(void) { void *unittest_data; - struct device_node *unittest_data_node, *np; + void *unittest_data_align; + struct device_node *unittest_data_node = NULL, *np; /* * __dtb_testcases_begin[] and __dtb_testcases_end[] are magically * created by cmd_dt_S_dtb in scripts/Makefile.lib @@ -1417,21 +1419,29 @@ static int __init unittest_data_add(void) extern uint8_t __dtb_testcases_end[]; const int size = __dtb_testcases_end - __dtb_testcases_begin; int rc; + void *ret; if (!size) { - pr_warn("%s: No testcase data to attach; not running tests\n", - __func__); + pr_warn("%s: testcases is empty\n", __func__); return -ENODATA; } /* creating copy */ - unittest_data = kmemdup(__dtb_testcases_begin, size, GFP_KERNEL); + unittest_data = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL); if (!unittest_data) return -ENOMEM; - of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node); + unittest_data_align = PTR_ALIGN(unittest_data, FDT_ALIGN_SIZE); + memcpy(unittest_data_align, __dtb_testcases_begin, size); + + ret = of_fdt_unflatten_tree(unittest_data_align, NULL, &unittest_data_node); + if (!ret) { + pr_warn("%s: unflatten testcases tree failed\n", __func__); + kfree(unittest_data); + return -ENODATA; + } if (!unittest_data_node) { - pr_warn("%s: No tree to attach; not running tests\n", __func__); + pr_warn("%s: testcases tree is empty\n", __func__); kfree(unittest_data); return -ENODATA; } diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 27a17a1e4a7c..a313708bcf75 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1458,7 +1458,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * Prevents hv_pci_onchannelcallback() from running concurrently * in the tasklet. */ - tasklet_disable(&channel->callback_event); + tasklet_disable_in_atomic(&channel->callback_event); /* * Since this function is called with IRQ locks held, can't diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 7d3370289938..6e6825d17a1d 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1604,8 +1604,8 @@ static int pinctrl_pins_show(struct seq_file *s, void *what) unsigned i, pin; #ifdef CONFIG_GPIOLIB struct pinctrl_gpio_range *range; - unsigned int gpio_num; struct gpio_chip *chip; + int gpio_num; #endif seq_printf(s, "registered pins: %d\n", pctldev->desc->npins); @@ -1625,7 +1625,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what) seq_printf(s, "pin %d (%s) ", pin, desc->name); #ifdef CONFIG_GPIOLIB - gpio_num = 0; + gpio_num = -1; list_for_each_entry(range, &pctldev->gpio_ranges, node) { if ((pin >= range->pin_base) && (pin < (range->pin_base + range->npins))) { @@ -1633,10 +1633,12 @@ static int pinctrl_pins_show(struct seq_file *s, void *what) break; } } - chip = gpio_to_chip(gpio_num); - if (chip && chip->gpiodev && chip->gpiodev->base) - seq_printf(s, "%u:%s ", gpio_num - - chip->gpiodev->base, chip->label); + if (gpio_num >= 0) + chip = gpio_to_chip(gpio_num); + else + chip = NULL; + if (chip) + seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label); else seq_puts(s, "0:? "); #endif diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 8085782cd8f9..9f3361c13ded 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -1357,6 +1357,7 @@ static int intel_pinctrl_add_padgroups_by_gpps(struct intel_pinctrl *pctrl, gpps[i].gpio_base = 0; break; case INTEL_GPIO_BASE_NOMAP: + break; default: break; } @@ -1393,6 +1394,7 @@ static int intel_pinctrl_add_padgroups_by_size(struct intel_pinctrl *pctrl, gpps[i].size = min(gpp_size, npins); npins -= gpps[i].size; + gpps[i].gpio_base = gpps[i].base; gpps[i].padown_num = padown_num; /* @@ -1491,8 +1493,13 @@ static int intel_pinctrl_probe(struct platform_device *pdev, if (IS_ERR(regs)) return PTR_ERR(regs); - /* Determine community features based on the revision */ + /* + * Determine community features based on the revision. + * A value of all ones means the device is not present. + */ value = readl(regs + REVID); + if (value == ~0u) + return -ENODEV; if (((value & REVID_MASK) >> REVID_SHIFT) >= 0x94) { community->features |= PINCTRL_FEATURE_DEBOUNCE; community->features |= PINCTRL_FEATURE_1K_PD; diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c index 7fdf4257df1e..ad4b446d588e 100644 --- a/drivers/pinctrl/intel/pinctrl-lewisburg.c +++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c @@ -299,9 +299,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = { static const struct intel_community lbg_communities[] = { LBG_COMMUNITY(0, 0, 71), LBG_COMMUNITY(1, 72, 132), - LBG_COMMUNITY(3, 133, 144), - LBG_COMMUNITY(4, 145, 180), - LBG_COMMUNITY(5, 181, 246), + LBG_COMMUNITY(3, 133, 143), + LBG_COMMUNITY(4, 144, 178), + LBG_COMMUNITY(5, 179, 246), }; static const struct intel_pinctrl_soc_data lbg_soc_data = { diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c index f35edb0eac40..c12fa57ebd12 100644 --- a/drivers/pinctrl/pinctrl-microchip-sgpio.c +++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c @@ -572,7 +572,7 @@ static void microchip_sgpio_irq_settype(struct irq_data *data, /* Type value spread over 2 registers sets: low, high bit */ sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, addr.bit, BIT(addr.port), (!!(type & 0x1)) << addr.port); - sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER + SGPIO_MAX_BITS, addr.bit, + sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, SGPIO_MAX_BITS + addr.bit, BIT(addr.port), (!!(type & 0x2)) << addr.port); if (type == SGPIO_INT_TRG_LEVEL) diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index aa1a1c850d05..53a0badc6b03 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -3727,12 +3727,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev) static int __maybe_unused rockchip_pinctrl_resume(struct device *dev) { struct rockchip_pinctrl *info = dev_get_drvdata(dev); - int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, - rk3288_grf_gpio6c_iomux | - GPIO6C6_SEL_WRITE_ENABLE); + int ret; - if (ret) - return ret; + if (info->ctrl->type == RK3288) { + ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, + rk3288_grf_gpio6c_iomux | + GPIO6C6_SEL_WRITE_ENABLE); + if (ret) + return ret; + } return pinctrl_force_default(info->pctl_dev); } diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c index 369ee20a7ea9..2f19ab4db720 100644 --- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c @@ -392,7 +392,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group, unsigned long *configs, unsigned int nconfs) { struct lpi_pinctrl *pctrl = dev_get_drvdata(pctldev->dev); - unsigned int param, arg, pullup, strength; + unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2; bool value, output_enabled = false; const struct lpi_pingroup *g; unsigned long sval; diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c index 8daccd530285..9d41abfca37e 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc7280.c +++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c @@ -1439,14 +1439,14 @@ static const struct msm_pingroup sc7280_groups[] = { [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _), [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _), [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _), - [175] = UFS_RESET(ufs_reset, 0x1be000), - [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1b3000, 15, 0), - [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1b3000, 13, 6), - [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1b3000, 11, 3), - [179] = SDC_QDSD_PINGROUP(sdc1_data, 0x1b3000, 9, 0), - [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1b4000, 14, 6), - [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1b4000, 11, 3), - [182] = SDC_QDSD_PINGROUP(sdc2_data, 0x1b4000, 9, 0), + [175] = UFS_RESET(ufs_reset, 0xbe000), + [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6), + [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6), + [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xb3000, 11, 3), + [179] = SDC_QDSD_PINGROUP(sdc1_data, 0xb3000, 9, 0), + [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0xb4000, 14, 6), + [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xb4000, 11, 3), + [182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0), }; static const struct msm_pinctrl_soc_data sc7280_pinctrl = { diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c index 2b5b0e2b03ad..5aaf57b40407 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdx55.c +++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c @@ -423,7 +423,7 @@ static const char * const gpio_groups[] = { static const char * const qdss_stm_groups[] = { "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13", - "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19" "gpio20", "gpio21", "gpio22", + "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", "gpio23", "gpio44", "gpio45", "gpio52", "gpio53", "gpio56", "gpio57", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", }; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index ad4e630e73e2..461ec61530eb 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1173,15 +1173,20 @@ config INTEL_PMC_CORE depends on PCI help The Intel Platform Controller Hub for Intel Core SoCs provides access - to Power Management Controller registers via a PCI interface. This + to Power Management Controller registers via various interfaces. This driver can utilize debugging capabilities and supported features as - exposed by the Power Management Controller. + exposed by the Power Management Controller. It also may perform some + tasks in the PMC in order to enable transition into the SLPS0 state. + It should be selected on all Intel platforms supported by the driver. Supported features: - SLP_S0_RESIDENCY counter - PCH IP Power Gating status - - LTR Ignore + - LTR Ignore / LTR Show - MPHY/PLL gating status (Sunrisepoint PCH only) + - SLPS0 Debug registers (Cannonlake/Icelake PCH) + - Low Power Mode registers (Tigerlake and beyond) + - PMC quirks as needed to enable SLPS0/S0ix config INTEL_PMT_CLASS tristate diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c index 80f4b7785c6c..091e48c217ed 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c @@ -185,5 +185,8 @@ void exit_enum_attributes(void) sysfs_remove_group(wmi_priv.enumeration_data[instance_id].attr_name_kobj, &enumeration_attr_group); } + wmi_priv.enumeration_instances_count = 0; + kfree(wmi_priv.enumeration_data); + wmi_priv.enumeration_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c index 75aedbb733be..8a49ba6e44f9 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c @@ -175,5 +175,8 @@ void exit_int_attributes(void) sysfs_remove_group(wmi_priv.integer_data[instance_id].attr_name_kobj, &integer_attr_group); } + wmi_priv.integer_instances_count = 0; + kfree(wmi_priv.integer_data); + wmi_priv.integer_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c index 3abcd95477c0..834b3e82ad9f 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c @@ -183,5 +183,8 @@ void exit_po_attributes(void) sysfs_remove_group(wmi_priv.po_data[instance_id].attr_name_kobj, &po_attr_group); } + wmi_priv.po_instances_count = 0; + kfree(wmi_priv.po_data); + wmi_priv.po_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c index ac75dce88a4c..552537852459 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c @@ -155,5 +155,8 @@ void exit_str_attributes(void) sysfs_remove_group(wmi_priv.str_data[instance_id].attr_name_kobj, &str_attr_group); } + wmi_priv.str_instances_count = 0; + kfree(wmi_priv.str_data); + wmi_priv.str_data = NULL; } diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c index cb81010ba1a2..7410ccae650c 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c @@ -210,25 +210,17 @@ static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot); */ static int create_attributes_level_sysfs_files(void) { - int ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); + int ret; - if (ret) { - pr_debug("could not create reset_bios file\n"); + ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); + if (ret) return ret; - } ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); - if (ret) { - pr_debug("could not create changing_pending_reboot file\n"); - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); - } - return ret; -} + if (ret) + return ret; -static void release_reset_bios_data(void) -{ - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); + return 0; } static ssize_t wmi_sysman_attr_show(struct kobject *kobj, struct attribute *attr, @@ -373,8 +365,6 @@ static void destroy_attribute_objs(struct kset *kset) */ static void release_attributes_data(void) { - release_reset_bios_data(); - mutex_lock(&wmi_priv.mutex); exit_enum_attributes(); exit_int_attributes(); @@ -386,11 +376,13 @@ static void release_attributes_data(void) wmi_priv.authentication_dir_kset = NULL; } if (wmi_priv.main_dir_kset) { + sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); + sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); destroy_attribute_objs(wmi_priv.main_dir_kset); kset_unregister(wmi_priv.main_dir_kset); + wmi_priv.main_dir_kset = NULL; } mutex_unlock(&wmi_priv.mutex); - } /** @@ -497,7 +489,6 @@ nextobj: err_attr_init: mutex_unlock(&wmi_priv.mutex); - release_attributes_data(); kfree(obj); return retval; } @@ -513,102 +504,91 @@ static int __init sysman_init(void) } ret = init_bios_attr_set_interface(); - if (ret || !wmi_priv.bios_attr_wdev) { - pr_debug("failed to initialize set interface\n"); - goto fail_set_interface; - } + if (ret) + return ret; ret = init_bios_attr_pass_interface(); - if (ret || !wmi_priv.password_attr_wdev) { - pr_debug("failed to initialize pass interface\n"); - goto fail_pass_interface; + if (ret) + goto err_exit_bios_attr_set_interface; + + if (!wmi_priv.bios_attr_wdev || !wmi_priv.password_attr_wdev) { + pr_debug("failed to find set or pass interface\n"); + ret = -ENODEV; + goto err_exit_bios_attr_pass_interface; } ret = class_register(&firmware_attributes_class); if (ret) - goto fail_class; + goto err_exit_bios_attr_pass_interface; wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), NULL, "%s", DRIVER_NAME); if (IS_ERR(wmi_priv.class_dev)) { ret = PTR_ERR(wmi_priv.class_dev); - goto fail_classdev; + goto err_unregister_class; } wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL, &wmi_priv.class_dev->kobj); if (!wmi_priv.main_dir_kset) { ret = -ENOMEM; - goto fail_main_kset; + goto err_destroy_classdev; } wmi_priv.authentication_dir_kset = kset_create_and_add("authentication", NULL, &wmi_priv.class_dev->kobj); if (!wmi_priv.authentication_dir_kset) { ret = -ENOMEM; - goto fail_authentication_kset; + goto err_release_attributes_data; } ret = create_attributes_level_sysfs_files(); if (ret) { pr_debug("could not create reset BIOS attribute\n"); - goto fail_reset_bios; + goto err_release_attributes_data; } ret = init_bios_attributes(ENUM, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate enumeration type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } ret = init_bios_attributes(INT, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate integer type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } ret = init_bios_attributes(STR, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate string type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } ret = init_bios_attributes(PO, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID); if (ret) { pr_debug("failed to populate pass object type attributes\n"); - goto fail_create_group; + goto err_release_attributes_data; } return 0; -fail_create_group: +err_release_attributes_data: release_attributes_data(); -fail_reset_bios: - if (wmi_priv.authentication_dir_kset) { - kset_unregister(wmi_priv.authentication_dir_kset); - wmi_priv.authentication_dir_kset = NULL; - } - -fail_authentication_kset: - if (wmi_priv.main_dir_kset) { - kset_unregister(wmi_priv.main_dir_kset); - wmi_priv.main_dir_kset = NULL; - } - -fail_main_kset: +err_destroy_classdev: device_destroy(&firmware_attributes_class, MKDEV(0, 0)); -fail_classdev: +err_unregister_class: class_unregister(&firmware_attributes_class); -fail_class: +err_exit_bios_attr_pass_interface: exit_bios_attr_pass_interface(); -fail_pass_interface: +err_exit_bios_attr_set_interface: exit_bios_attr_set_interface(); -fail_set_interface: return ret; } diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 2f5b8d09143e..078648a9201b 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -90,6 +90,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"), }, }, + { + .ident = "Lenovo ThinkPad X1 Tablet Gen 2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"), + }, + }, { } }; @@ -476,11 +483,16 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) goto wakeup; /* - * Switch events will wake the device and report the new switch - * position to the input subsystem. + * Some devices send (duplicate) tablet-mode events when moved + * around even though the mode has not changed; and they do this + * even when suspended. + * Update the switch state in case it changed and then return + * without waking up to avoid spurious wakeups. */ - if (priv->switches && (event == 0xcc || event == 0xcd)) - goto wakeup; + if (event == 0xcc || event == 0xcd) { + report_tablet_mode_event(priv->switches, event); + return; + } /* Wake up on 5-button array events only. */ if (event == 0xc0 || !priv->array) @@ -494,9 +506,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) wakeup: pm_wakeup_hard_event(&device->dev); - if (report_tablet_mode_event(priv->switches, event)) - return; - return; } diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 8a8017f9ca91..3fdf4cbec9ad 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -48,8 +48,16 @@ static const struct key_entry intel_vbtn_keymap[] = { }; static const struct key_entry intel_vbtn_switchmap[] = { - { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ - { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ + /* + * SW_DOCK should only be reported for docking stations, but DSDTs using the + * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set + * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0). + * This causes userspace to think the laptop is docked to a port-replicator + * and to disable suspend-on-lid-close, which is undesirable. + * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting. + */ + { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ + { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */ { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */ { KE_END } diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index ee2f757515b0..b5888aeb4bcf 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -863,34 +863,45 @@ out_unlock: } DEFINE_SHOW_ATTRIBUTE(pmc_core_pll); -static ssize_t pmc_core_ltr_ignore_write(struct file *file, - const char __user *userbuf, - size_t count, loff_t *ppos) +static int pmc_core_send_ltr_ignore(u32 value) { struct pmc_dev *pmcdev = &pmc; const struct pmc_reg_map *map = pmcdev->map; - u32 val, buf_size, fd; - int err; - - buf_size = count < 64 ? count : 64; - - err = kstrtou32_from_user(userbuf, buf_size, 10, &val); - if (err) - return err; + u32 reg; + int err = 0; mutex_lock(&pmcdev->lock); - if (val > map->ltr_ignore_max) { + if (value > map->ltr_ignore_max) { err = -EINVAL; goto out_unlock; } - fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); - fd |= (1U << val); - pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd); + reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); + reg |= BIT(value); + pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg); out_unlock: mutex_unlock(&pmcdev->lock); + + return err; +} + +static ssize_t pmc_core_ltr_ignore_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + u32 buf_size, value; + int err; + + buf_size = min_t(u32, count, 64); + + err = kstrtou32_from_user(userbuf, buf_size, 10, &value); + if (err) + return err; + + err = pmc_core_send_ltr_ignore(value); + return err == 0 ? count : err; } @@ -1244,6 +1255,15 @@ static int pmc_core_probe(struct platform_device *pdev) pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); dmi_check_system(pmc_core_dmi_table); + /* + * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when + * a cable is attached. Tell the PMC to ignore it. + */ + if (pmcdev->map == &tgl_reg_map) { + dev_dbg(&pdev->dev, "ignoring GBE LTR\n"); + pmc_core_send_ltr_ignore(3); + } + pmc_core_dbgfs_register(pmcdev); device_initialized = true; diff --git a/drivers/platform/x86/intel_pmt_class.c b/drivers/platform/x86/intel_pmt_class.c index c8939fba4509..ee2b3bbeb83d 100644 --- a/drivers/platform/x86/intel_pmt_class.c +++ b/drivers/platform/x86/intel_pmt_class.c @@ -173,7 +173,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns, struct device *parent) { - struct resource res; + struct resource res = {0}; struct device *dev; int ret; diff --git a/drivers/platform/x86/intel_pmt_crashlog.c b/drivers/platform/x86/intel_pmt_crashlog.c index 97dd749c8290..92d315a16cfd 100644 --- a/drivers/platform/x86/intel_pmt_crashlog.c +++ b/drivers/platform/x86/intel_pmt_crashlog.c @@ -23,18 +23,17 @@ #define CRASH_TYPE_OOBMSM 1 /* Control Flags */ -#define CRASHLOG_FLAG_DISABLE BIT(27) +#define CRASHLOG_FLAG_DISABLE BIT(28) /* - * Bits 28 and 29 control the state of bit 31. + * Bits 29 and 30 control the state of bit 31. * - * Bit 28 will clear bit 31, if set, allowing a new crashlog to be captured. - * Bit 29 will immediately trigger a crashlog to be generated, setting bit 31. - * Bit 30 is read-only and reserved as 0. + * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured. + * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31. * Bit 31 is the read-only status with a 1 indicating log is complete. */ -#define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(28) -#define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(29) +#define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(29) +#define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(30) #define CRASHLOG_FLAG_TRIGGER_COMPLETE BIT(31) #define CRASHLOG_FLAG_TRIGGER_MASK GENMASK(31, 28) diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index b881044b31b0..0d9e2ddbf904 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -4081,13 +4081,19 @@ static bool hotkey_notify_6xxx(const u32 hkey, case TP_HKEY_EV_KEY_NUMLOCK: case TP_HKEY_EV_KEY_FN: - case TP_HKEY_EV_KEY_FN_ESC: /* key press events, we just ignore them as long as the EC * is still reporting them in the normal keyboard stream */ *send_acpi_ev = false; *ignore_acpi_ev = true; return true; + case TP_HKEY_EV_KEY_FN_ESC: + /* Get the media key status to foce the status LED to update */ + acpi_evalf(hkey_handle, NULL, "GMKS", "v"); + *send_acpi_ev = false; + *ignore_acpi_ev = true; + return true; + case TP_HKEY_EV_TABLET_CHANGED: tpacpi_input_send_tabletsw(); hotkey_tablet_mode_notify_change(); @@ -9845,6 +9851,11 @@ static struct ibm_struct lcdshadow_driver_data = { * Thinkpad sensor interfaces */ +#define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */ +#define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */ +#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */ +#define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */ + #define DYTC_CMD_GET 2 /* To get current IC function and mode */ #define DYTC_GET_LAPMODE_BIT 17 /* Set when in lapmode */ @@ -9855,6 +9866,7 @@ static bool has_palmsensor; static bool has_lapsensor; static bool palm_state; static bool lap_state; +static int dytc_version; static int dytc_command(int command, int *output) { @@ -9869,6 +9881,33 @@ static int dytc_command(int command, int *output) return 0; } +static int dytc_get_version(void) +{ + int err, output; + + /* Check if we've been called before - and just return cached value */ + if (dytc_version) + return dytc_version; + + /* Otherwise query DYTC and extract version information */ + err = dytc_command(DYTC_CMD_QUERY, &output); + /* + * If support isn't available (ENODEV) then don't return an error + * and don't create the sysfs group + */ + if (err == -ENODEV) + return 0; + /* For all other errors we can flag the failure */ + if (err) + return err; + + /* Check DYTC is enabled and supports mode setting */ + if (output & BIT(DYTC_QUERY_ENABLE_BIT)) + dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF; + + return 0; +} + static int lapsensor_get(bool *present, bool *state) { int output, err; @@ -9974,7 +10013,18 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm) if (err) return err; } - if (has_lapsensor) { + + /* Check if we know the DYTC version, if we don't then get it */ + if (!dytc_version) { + err = dytc_get_version(); + if (err) + return err; + } + /* + * Platforms before DYTC version 5 claim to have a lap sensor, but it doesn't work, so we + * ignore them + */ + if (has_lapsensor && (dytc_version >= 5)) { err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_dytc_lapmode.attr); if (err) return err; @@ -9999,14 +10049,9 @@ static struct ibm_struct proxsensor_driver_data = { * DYTC Platform Profile interface */ -#define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */ #define DYTC_CMD_SET 1 /* To enable/disable IC function mode */ #define DYTC_CMD_RESET 0x1ff /* To reset back to default */ -#define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */ -#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */ -#define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */ - #define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */ #define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */ @@ -10142,8 +10187,13 @@ static int dytc_profile_set(struct platform_profile_handler *pprof, return err; if (profile == PLATFORM_PROFILE_BALANCED) { - /* To get back to balanced mode we just issue a reset command */ - err = dytc_command(DYTC_CMD_RESET, &output); + /* + * To get back to balanced mode we need to issue a reset command. + * Note we still need to disable CQL mode before hand and re-enable + * it afterwards, otherwise dytc_lapmode gets reset to 0 and stays + * stuck at 0 for aprox. 30 minutes. + */ + err = dytc_cql_command(DYTC_CMD_RESET, &output); if (err) goto unlock; } else { @@ -10211,28 +10261,28 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) if (err) return err; + /* Check if we know the DYTC version, if we don't then get it */ + if (!dytc_version) { + err = dytc_get_version(); + if (err) + return err; + } /* Check DYTC is enabled and supports mode setting */ - if (output & BIT(DYTC_QUERY_ENABLE_BIT)) { - /* Only DYTC v5.0 and later has this feature. */ - int dytc_version; - - dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF; - if (dytc_version >= 5) { - dbg_printk(TPACPI_DBG_INIT, - "DYTC version %d: thermal mode available\n", dytc_version); - /* Create platform_profile structure and register */ - err = platform_profile_register(&dytc_profile); - /* - * If for some reason platform_profiles aren't enabled - * don't quit terminally. - */ - if (err) - return 0; + if (dytc_version >= 5) { + dbg_printk(TPACPI_DBG_INIT, + "DYTC version %d: thermal mode available\n", dytc_version); + /* Create platform_profile structure and register */ + err = platform_profile_register(&dytc_profile); + /* + * If for some reason platform_profiles aren't enabled + * don't quit terminally. + */ + if (err) + return 0; - dytc_profile_available = true; - /* Ensure initial values are correct */ - dytc_profile_refresh(); - } + dytc_profile_available = true; + /* Ensure initial values are correct */ + dytc_profile_refresh(); } return 0; } diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c index beb5f74944cd..08f4cf0ad9e3 100644 --- a/drivers/ptp/ptp_qoriq.c +++ b/drivers/ptp/ptp_qoriq.c @@ -189,15 +189,16 @@ int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) tmr_add = ptp_qoriq->tmr_add; adj = tmr_add; - /* calculate diff as adj*(scaled_ppm/65536)/1000000 - * and round() to the nearest integer + /* + * Calculate diff and round() to the nearest integer + * + * diff = adj * (ppb / 1000000000) + * = adj * scaled_ppm / 65536000000 */ - adj *= scaled_ppm; - diff = div_u64(adj, 8000000); - diff = (diff >> 13) + ((diff >> 12) & 1); + diff = mul_u64_u64_div_u64(adj, scaled_ppm, 32768000000); + diff = DIV64_U64_ROUND_UP(diff, 2); tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; - ptp_qoriq->write(®s->ctrl_regs->tmr_add, tmr_add); return 0; diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index ddecf25b5dd4..d7894f178bd4 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -309,11 +309,20 @@ static bool sanity_check(struct ce_array *ca) return ret; } +/** + * cec_add_elem - Add an element to the CEC array. + * @pfn: page frame number to insert + * + * Return values: + * - <0: on error + * - 0: on success + * - >0: when the inserted pfn was offlined + */ static int cec_add_elem(u64 pfn) { struct ce_array *ca = &ce_arr; + int count, err, ret = 0; unsigned int to = 0; - int count, ret = 0; /* * We can be called very early on the identify_cpu() path where we are @@ -330,8 +339,8 @@ static int cec_add_elem(u64 pfn) if (ca->n == MAX_ELEMS) WARN_ON(!del_lru_elem_unlocked(ca)); - ret = find_elem(ca, pfn, &to); - if (ret < 0) { + err = find_elem(ca, pfn, &to); + if (err < 0) { /* * Shift range [to-end] to make room for one more element. */ diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c index 7b0cd08db446..ba020a45f238 100644 --- a/drivers/regulator/bd9571mwv-regulator.c +++ b/drivers/regulator/bd9571mwv-regulator.c @@ -125,7 +125,7 @@ static const struct regulator_ops vid_ops = { static const struct regulator_desc regulators[] = { BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f, - 0x80, 600000, 10000, 0x3c), + 0x6f, 600000, 10000, 0x3c), BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf, 16, 1625000, 25000, 0), BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf, @@ -134,7 +134,7 @@ static const struct regulator_desc regulators[] = { 11, 2800000, 100000, 0), BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops, BD9571MWV_DVFS_MONIVDAC, 0x7f, - 0x80, 600000, 10000, 0x3c), + 0x6f, 600000, 10000, 0x3c), }; #ifdef CONFIG_PM_SLEEP @@ -174,7 +174,7 @@ static ssize_t backup_mode_show(struct device *dev, { struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off"); + return sysfs_emit(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off"); } static ssize_t backup_mode_store(struct device *dev, @@ -301,7 +301,7 @@ static int bd9571mwv_regulator_probe(struct platform_device *pdev) &config); if (IS_ERR(rdev)) { dev_err(&pdev->dev, "failed to register %s regulator\n", - pdev->name); + regulators[i].name); return PTR_ERR(rdev); } } diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c index 2667919d76b3..dcb380e868df 100644 --- a/drivers/remoteproc/pru_rproc.c +++ b/drivers/remoteproc/pru_rproc.c @@ -450,6 +450,24 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len) if (len == 0) return NULL; + /* + * GNU binutils do not support multiple address spaces. The GNU + * linker's default linker script places IRAM at an arbitrary high + * offset, in order to differentiate it from DRAM. Hence we need to + * strip the artificial offset in the IRAM addresses coming from the + * ELF file. + * + * The TI proprietary linker would never set those higher IRAM address + * bits anyway. PRU architecture limits the program counter to 16-bit + * word-address range. This in turn corresponds to 18-bit IRAM + * byte-address range for ELF. + * + * Two more bits are added just in case to make the final 20-bit mask. + * Idea is to have a safeguard in case TI decides to add banking + * in future SoCs. + */ + da &= 0xfffff; + if (da >= PRU_IRAM_DA && da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) { offset = da - PRU_IRAM_DA; @@ -585,7 +603,7 @@ pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw) break; } - if (pru->data->is_k3 && is_iram) { + if (pru->data->is_k3) { ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset, filesz); if (ret) { diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c index 5521c4437ffa..7c007dd7b200 100644 --- a/drivers/remoteproc/qcom_pil_info.c +++ b/drivers/remoteproc/qcom_pil_info.c @@ -56,7 +56,7 @@ static int qcom_pil_info_init(void) memset_io(base, 0, resource_size(&imem)); _reloc.base = base; - _reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE; + _reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE; return 0; } diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index d126bb877250..ba6a3aa8d954 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -20,6 +20,11 @@ #ifndef HPSA_CMD_H #define HPSA_CMD_H +#include <linux/compiler.h> + +#include <linux/build_bug.h> /* static_assert */ +#include <linux/stddef.h> /* offsetof */ + /* general boundary defintions */ #define SENSEINFOBYTES 32 /* may vary between hbas */ #define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */ @@ -200,12 +205,10 @@ union u64bit { MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */ /* SCSI-3 Commands */ -#pragma pack(1) - #define HPSA_INQUIRY 0x12 struct InquiryData { u8 data_byte[36]; -}; +} __packed; #define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */ #define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ @@ -221,7 +224,7 @@ struct raid_map_disk_data { u8 xor_mult[2]; /**< XOR multipliers for this position, * valid for data disks only */ u8 reserved[2]; -}; +} __packed; struct raid_map_data { __le32 structure_size; /* Size of entire structure in bytes */ @@ -247,14 +250,14 @@ struct raid_map_data { __le16 dekindex; /* Data encryption key index. */ u8 reserved[16]; struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; -}; +} __packed; struct ReportLUNdata { u8 LUNListLength[4]; u8 extended_response_flag; u8 reserved[3]; u8 LUN[HPSA_MAX_LUN][8]; -}; +} __packed; struct ext_report_lun_entry { u8 lunid[8]; @@ -269,20 +272,20 @@ struct ext_report_lun_entry { u8 lun_count; /* multi-lun device, how many luns */ u8 redundant_paths; u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */ -}; +} __packed; struct ReportExtendedLUNdata { u8 LUNListLength[4]; u8 extended_response_flag; u8 reserved[3]; struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN]; -}; +} __packed; struct SenseSubsystem_info { u8 reserved[36]; u8 portname[8]; u8 reserved1[1108]; -}; +} __packed; /* BMIC commands */ #define BMIC_READ 0x26 @@ -317,7 +320,7 @@ union SCSI3Addr { u8 Targ:6; u8 Mode:2; /* b10 */ } LogUnit; -}; +} __packed; struct PhysDevAddr { u32 TargetId:24; @@ -325,20 +328,20 @@ struct PhysDevAddr { u32 Mode:2; /* 2 level target device addr */ union SCSI3Addr Target[2]; -}; +} __packed; struct LogDevAddr { u32 VolId:30; u32 Mode:2; u8 reserved[4]; -}; +} __packed; union LUNAddr { u8 LunAddrBytes[8]; union SCSI3Addr SCSI3Lun[4]; struct PhysDevAddr PhysDev; struct LogDevAddr LogDev; -}; +} __packed; struct CommandListHeader { u8 ReplyQueue; @@ -346,7 +349,7 @@ struct CommandListHeader { __le16 SGTotal; __le64 tag; union LUNAddr LUN; -}; +} __packed; struct RequestBlock { u8 CDBLen; @@ -365,18 +368,18 @@ struct RequestBlock { #define GET_DIR(tad) (((tad) >> 6) & 0x03) u16 Timeout; u8 CDB[16]; -}; +} __packed; struct ErrDescriptor { __le64 Addr; __le32 Len; -}; +} __packed; struct SGDescriptor { __le64 Addr; __le32 Len; __le32 Ext; -}; +} __packed; union MoreErrInfo { struct { @@ -390,7 +393,8 @@ union MoreErrInfo { u8 offense_num; /* byte # of offense 0-base */ u32 offense_value; } Invalid_Cmd; -}; +} __packed; + struct ErrorInfo { u8 ScsiStatus; u8 SenseLen; @@ -398,7 +402,7 @@ struct ErrorInfo { u32 ResidualCnt; union MoreErrInfo MoreErrInfo; u8 SenseInfo[SENSEINFOBYTES]; -}; +} __packed; /* Command types */ #define CMD_IOCTL_PEND 0x01 #define CMD_SCSI 0x03 @@ -453,6 +457,15 @@ struct CommandList { atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */ } __aligned(COMMANDLIST_ALIGNMENT); +/* + * Make sure our embedded atomic variable is aligned. Otherwise we break atomic + * operations on architectures that don't support unaligned atomics like IA64. + * + * The assert guards against reintroductin against unwanted __packed to + * the struct CommandList. + */ +static_assert(offsetof(struct CommandList, refcount) % __alignof__(atomic_t) == 0); + /* Max S/G elements in I/O accelerator command */ #define IOACCEL1_MAXSGENTRIES 24 #define IOACCEL2_MAXSGENTRIES 28 @@ -489,7 +502,7 @@ struct io_accel1_cmd { __le64 host_addr; /* 0x70 - 0x77 */ u8 CISS_LUN[8]; /* 0x78 - 0x7F */ struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; -} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); +} __packed __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); #define IOACCEL1_FUNCTION_SCSIIO 0x00 #define IOACCEL1_SGLOFFSET 32 @@ -519,7 +532,7 @@ struct ioaccel2_sg_element { u8 chain_indicator; #define IOACCEL2_CHAIN 0x80 #define IOACCEL2_LAST_SG 0x40 -}; +} __packed; /* * SCSI Response Format structure for IO Accelerator Mode 2 @@ -559,7 +572,7 @@ struct io_accel2_scsi_response { u8 sense_data_len; /* sense/response data length */ u8 resid_cnt[4]; /* residual count */ u8 sense_data_buff[32]; /* sense/response data buffer */ -}; +} __packed; /* * Structure for I/O accelerator (mode 2 or m2) commands. @@ -592,7 +605,7 @@ struct io_accel2_cmd { __le32 tweak_upper; /* Encryption tweak, upper 4 bytes */ struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; struct io_accel2_scsi_response error_data; -} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); +} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); /* * defines for Mode 2 command struct @@ -618,7 +631,7 @@ struct hpsa_tmf_struct { __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */ __le64 error_ptr; /* Error Pointer */ __le32 error_len; /* Error Length */ -} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); +} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); /* Configuration Table Structure */ struct HostWrite { @@ -626,7 +639,7 @@ struct HostWrite { __le32 command_pool_addr_hi; __le32 CoalIntDelay; __le32 CoalIntCount; -}; +} __packed; #define SIMPLE_MODE 0x02 #define PERFORMANT_MODE 0x04 @@ -675,7 +688,7 @@ struct CfgTable { #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30) #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31) __le32 clear_event_notify; -}; +} __packed; #define NUM_BLOCKFETCH_ENTRIES 8 struct TransTable_struct { @@ -686,14 +699,14 @@ struct TransTable_struct { __le32 RepQCtrAddrHigh32; #define MAX_REPLY_QUEUES 64 struct vals32 RepQAddr[MAX_REPLY_QUEUES]; -}; +} __packed; struct hpsa_pci_info { unsigned char bus; unsigned char dev_fn; unsigned short domain; u32 board_id; -}; +} __packed; struct bmic_identify_controller { u8 configured_logical_drive_count; /* offset 0 */ @@ -702,7 +715,7 @@ struct bmic_identify_controller { u8 pad2[136]; u8 controller_mode; /* offset 292 */ u8 pad3[32]; -}; +} __packed; struct bmic_identify_physical_device { @@ -845,7 +858,7 @@ struct bmic_identify_physical_device { u8 max_link_rate[256]; u8 neg_phys_link_rate[256]; u8 box_conn_name[8]; -} __attribute((aligned(512))); +} __packed __attribute((aligned(512))); struct bmic_sense_subsystem_info { u8 primary_slot_number; @@ -858,7 +871,7 @@ struct bmic_sense_subsystem_info { u8 secondary_array_serial_number[32]; u8 secondary_cache_serial_number[32]; u8 pad[332]; -}; +} __packed; struct bmic_sense_storage_box_params { u8 reserved[36]; @@ -870,7 +883,6 @@ struct bmic_sense_storage_box_params { u8 reserver_3[84]; u8 phys_connector[2]; u8 reserved_4[296]; -}; +} __packed; -#pragma pack() #endif /* HPSA_CMD_H */ diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index f140eafc0d6a..61831f2fdb30 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2372,6 +2372,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device) } /** + * ibmvfc_event_is_free - Check if event is free or not + * @evt: ibmvfc event struct + * + * Returns: + * true / false + **/ +static bool ibmvfc_event_is_free(struct ibmvfc_event *evt) +{ + struct ibmvfc_event *loop_evt; + + list_for_each_entry(loop_evt, &evt->queue->free, queue_list) + if (loop_evt == evt) + return true; + + return false; +} + +/** * ibmvfc_wait_for_ops - Wait for ops to complete * @vhost: ibmvfc host struct * @device: device to match (starget or sdev) @@ -2385,35 +2403,58 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device, { struct ibmvfc_event *evt; DECLARE_COMPLETION_ONSTACK(comp); - int wait; + int wait, i, q_index, q_size; unsigned long flags; signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ; + struct ibmvfc_queue *queues; ENTER; + if (vhost->mq_enabled && vhost->using_channels) { + queues = vhost->scsi_scrqs.scrqs; + q_size = vhost->scsi_scrqs.active_queues; + } else { + queues = &vhost->crq; + q_size = 1; + } + do { wait = 0; - spin_lock_irqsave(&vhost->crq.l_lock, flags); - list_for_each_entry(evt, &vhost->crq.sent, queue_list) { - if (match(evt, device)) { - evt->eh_comp = ∁ - wait++; + spin_lock_irqsave(vhost->host->host_lock, flags); + for (q_index = 0; q_index < q_size; q_index++) { + spin_lock(&queues[q_index].l_lock); + for (i = 0; i < queues[q_index].evt_pool.size; i++) { + evt = &queues[q_index].evt_pool.events[i]; + if (!ibmvfc_event_is_free(evt)) { + if (match(evt, device)) { + evt->eh_comp = ∁ + wait++; + } + } } + spin_unlock(&queues[q_index].l_lock); } - spin_unlock_irqrestore(&vhost->crq.l_lock, flags); + spin_unlock_irqrestore(vhost->host->host_lock, flags); if (wait) { timeout = wait_for_completion_timeout(&comp, timeout); if (!timeout) { wait = 0; - spin_lock_irqsave(&vhost->crq.l_lock, flags); - list_for_each_entry(evt, &vhost->crq.sent, queue_list) { - if (match(evt, device)) { - evt->eh_comp = NULL; - wait++; + spin_lock_irqsave(vhost->host->host_lock, flags); + for (q_index = 0; q_index < q_size; q_index++) { + spin_lock(&queues[q_index].l_lock); + for (i = 0; i < queues[q_index].evt_pool.size; i++) { + evt = &queues[q_index].evt_pool.events[i]; + if (!ibmvfc_event_is_free(evt)) { + if (match(evt, device)) { + evt->eh_comp = NULL; + wait++; + } + } } + spin_unlock(&queues[q_index].l_lock); } - spin_unlock_irqrestore(&vhost->crq.l_lock, flags); + spin_unlock_irqrestore(vhost->host->host_lock, flags); if (wait) dev_err(vhost->dev, "Timed out waiting for aborted commands\n"); LEAVE; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 04633e5157e9..4834219497ee 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -3179,9 +3179,10 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) } } -static void iscsi_start_session_recovery(struct iscsi_session *session, - struct iscsi_conn *conn, int flag) +void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) { + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_session *session = conn->session; int old_stop_stage; mutex_lock(&session->eh_mutex); @@ -3239,27 +3240,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); } - -void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - struct iscsi_session *session = conn->session; - - switch (flag) { - case STOP_CONN_RECOVER: - cls_conn->state = ISCSI_CONN_FAILED; - break; - case STOP_CONN_TERM: - cls_conn->state = ISCSI_CONN_DOWN; - break; - default: - iscsi_conn_printk(KERN_ERR, conn, - "invalid stop flag %d\n", flag); - return; - } - - iscsi_start_session_recovery(session, conn, flag); -} EXPORT_SYMBOL_GPL(iscsi_conn_stop); int iscsi_conn_bind(struct iscsi_cls_session *cls_session, diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 024e5a550759..8b9a39077dba 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -201,18 +201,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); task->total_xfer_len = qc->nbytes; task->num_scatter = qc->n_elem; + task->data_dir = qc->dma_dir; + } else if (qc->tf.protocol == ATA_PROT_NODATA) { + task->data_dir = DMA_NONE; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) xfer += sg_dma_len(sg); task->total_xfer_len = xfer; task->num_scatter = si; - } - - if (qc->tf.protocol == ATA_PROT_NODATA) - task->data_dir = DMA_NONE; - else task->data_dir = qc->dma_dir; + } task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index ac066f86bb14..ac0eef975f17 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -7806,14 +7806,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pend_os_device_add_sz++; ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, GFP_KERNEL); - if (!ioc->pend_os_device_add) + if (!ioc->pend_os_device_add) { + r = -ENOMEM; goto out_free_resources; + } ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; ioc->device_remove_in_progress = kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); - if (!ioc->device_remove_in_progress) + if (!ioc->device_remove_in_progress) { + r = -ENOMEM; goto out_free_resources; + } ioc->fwfault_debug = mpt3sas_fwfault_debug; diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 49bf2f70a470..31e5455d280c 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -223,7 +223,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) PM8001_EVENT_LOG_SIZE; pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; - for (i = 0; i < PM8001_MAX_INB_NUM; i++) { + for (i = 0; i < pm8001_ha->max_q_num; i++) { pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); pm8001_ha->inbnd_q_tbl[i].upper_base_addr = @@ -249,7 +249,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; } - for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { + for (i = 0; i < pm8001_ha->max_q_num; i++) { pm8001_ha->outbnd_q_tbl[i].element_size_cnt = PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); pm8001_ha->outbnd_q_tbl[i].upper_base_addr = @@ -671,9 +671,9 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) read_outbnd_queue_table(pm8001_ha); /* update main config table ,inbound table and outbound table */ update_main_config_table(pm8001_ha); - for (i = 0; i < PM8001_MAX_INB_NUM; i++) + for (i = 0; i < pm8001_ha->max_q_num; i++) update_inbnd_queue_table(pm8001_ha, i); - for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) + for (i = 0; i < pm8001_ha->max_q_num; i++) update_outbnd_queue_table(pm8001_ha, i); /* 8081 controller donot require these operations */ if (deviceid != 0x8081 && deviceid != 0x0042) { diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 47ad64b06623..69c5b5ee2169 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1675,6 +1675,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi) if (!qedi->global_queues[i]) { QEDI_ERR(&qedi->dbg_ctx, "Unable to allocation global queue %d.\n", i); + status = -ENOMEM; goto mem_alloc_failure; } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index c48daf52725d..480e7d2dcf3e 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -3222,8 +3222,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || (cmd->sess && cmd->sess->deleted)) { cmd->state = QLA_TGT_STATE_PROCESSED; - res = 0; - goto free; + return 0; } ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, @@ -3234,8 +3233,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); - if (unlikely(res != 0)) - goto free; + if (unlikely(res != 0)) { + return res; + } spin_lock_irqsave(qpair->qp_lock_ptr, flags); @@ -3255,8 +3255,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - res = 0; - goto free; + return 0; } /* Does F/W have an IOCBs for this request */ @@ -3359,8 +3358,6 @@ out_unmap_unlock: qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); -free: - vha->hw->tgt.tgt_ops->free_cmd(cmd); return res; } EXPORT_SYMBOL(qlt_xmit_response); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index b55fc768a2a7..8b4890cdd4ca 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -644,7 +644,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - struct scsi_qla_host *vha = cmd->vha; if (cmd->aborted) { /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task @@ -657,7 +656,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); - vha->hw->tgt.tgt_ops->free_cmd(cmd); return 0; } @@ -685,7 +683,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - struct scsi_qla_host *vha = cmd->vha; int xmit_type = QLA_TGT_XMIT_STATUS; if (cmd->aborted) { @@ -699,7 +696,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd, kref_read(&cmd->se_cmd.cmd_kref), cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); - vha->hw->tgt.tgt_ops->free_cmd(cmd); return 0; } cmd->bufflen = se_cmd->data_length; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 91074fd97f64..441f0152193f 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2474,9 +2474,22 @@ static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag) * it works. */ mutex_lock(&conn_mutex); + switch (flag) { + case STOP_CONN_RECOVER: + conn->state = ISCSI_CONN_FAILED; + break; + case STOP_CONN_TERM: + conn->state = ISCSI_CONN_DOWN; + break; + default: + iscsi_cls_conn_printk(KERN_ERR, conn, + "invalid stop flag %d\n", flag); + goto unlock; + } + conn->transport->stop_conn(conn, flag); +unlock: mutex_unlock(&conn_mutex); - } static void stop_conn_work_fn(struct work_struct *work) @@ -2901,6 +2914,13 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) default: err = transport->set_param(conn, ev->u.set_param.param, data, ev->u.set_param.len); + if ((conn->state == ISCSI_CONN_BOUND) || + (conn->state == ISCSI_CONN_UP)) { + err = transport->set_param(conn, ev->u.set_param.param, + data, ev->u.set_param.len); + } else { + return -ENOTCONN; + } } return err; @@ -2960,6 +2980,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, mutex_lock(&conn->ep_mutex); conn->ep = NULL; mutex_unlock(&conn->ep_mutex); + conn->state = ISCSI_CONN_FAILED; } transport->ep_disconnect(ep); @@ -3727,6 +3748,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) ev->r.retcode = transport->bind_conn(session, conn, ev->u.b_conn.transport_eph, ev->u.b_conn.is_leading); + if (!ev->r.retcode) + conn->state = ISCSI_CONN_BOUND; mutex_unlock(&conn_mutex); if (ev->r.retcode || !transport->ep_connect) @@ -3966,7 +3989,8 @@ iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR); static const char *const connection_state_names[] = { [ISCSI_CONN_UP] = "up", [ISCSI_CONN_DOWN] = "down", - [ISCSI_CONN_FAILED] = "failed" + [ISCSI_CONN_FAILED] = "failed", + [ISCSI_CONN_BOUND] = "bound" }; static ssize_t show_conn_state(struct device *dev, diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 1e939a2a387f..98a34ed10f1a 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport) res = mutex_lock_interruptible(&rport->mutex); if (res) goto out; - if (rport->state != SRP_RPORT_FAIL_FAST) + if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST) /* * sdev state must be SDEV_TRANSPORT_OFFLINE, transition * to SDEV_BLOCK is illegal. Calling scsi_target_unblock() diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index c86760788c72..d3d05e997c13 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -6386,37 +6386,34 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, DECLARE_COMPLETION_ONSTACK(wait); struct request *req; unsigned long flags; - int free_slot, task_tag, err; + int task_tag, err; /* - * Get free slot, sleep if slots are unavailable. - * Even though we use wait_event() which sleeps indefinitely, - * the maximum wait time is bounded by %TM_CMD_TIMEOUT. + * blk_get_request() is used here only to get a free tag. */ req = blk_get_request(q, REQ_OP_DRV_OUT, 0); if (IS_ERR(req)) return PTR_ERR(req); req->end_io_data = &wait; - free_slot = req->tag; - WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs); ufshcd_hold(hba, false); spin_lock_irqsave(host->host_lock, flags); - task_tag = hba->nutrs + free_slot; + blk_mq_start_request(req); + task_tag = req->tag; treq->req_header.dword_0 |= cpu_to_be32(task_tag); - memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq)); - ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function); + memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); + ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); /* send command to the controller */ - __set_bit(free_slot, &hba->outstanding_tasks); + __set_bit(task_tag, &hba->outstanding_tasks); /* Make sure descriptors are ready before ringing the task doorbell */ wmb(); - ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); @@ -6436,24 +6433,24 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); - if (ufshcd_clear_tm_cmd(hba, free_slot)) - dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", - __func__, free_slot); + if (ufshcd_clear_tm_cmd(hba, task_tag)) + dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", + __func__, task_tag); err = -ETIMEDOUT; } else { err = 0; - memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); + memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); } spin_lock_irqsave(hba->host->host_lock, flags); - __clear_bit(free_slot, &hba->outstanding_tasks); + __clear_bit(task_tag, &hba->outstanding_tasks); spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_release(hba); blk_put_request(req); - ufshcd_release(hba); return err; } diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index a14684ffe4c1..ca4f4ca413f1 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -179,6 +179,21 @@ static unsigned int __init save_reg(struct intc_desc_int *d, return 0; } +static bool __init intc_map(struct irq_domain *domain, int irq) +{ + if (!irq_to_desc(irq) && irq_alloc_desc_at(irq, NUMA_NO_NODE) != irq) { + pr_err("uname to allocate IRQ %d\n", irq); + return false; + } + + if (irq_domain_associate(domain, irq, irq)) { + pr_err("domain association failure\n"); + return false; + } + + return true; +} + int __init register_intc_controller(struct intc_desc *desc) { unsigned int i, k, smp; @@ -311,24 +326,12 @@ int __init register_intc_controller(struct intc_desc *desc) for (i = 0; i < hw->nr_vectors; i++) { struct intc_vect *vect = hw->vectors + i; unsigned int irq = evt2irq(vect->vect); - int res; if (!vect->enum_id) continue; - res = irq_create_identity_mapping(d->domain, irq); - if (unlikely(res)) { - if (res == -EEXIST) { - res = irq_domain_associate(d->domain, irq, irq); - if (unlikely(res)) { - pr_err("domain association failure\n"); - continue; - } - } else { - pr_err("can't identity map IRQ %d\n", irq); - continue; - } - } + if (!intc_map(d->domain, irq)) + continue; intc_irq_xlate_set(irq, vect->enum_id, d); intc_register_irq(desc, d, vect->enum_id, irq); @@ -345,22 +348,8 @@ int __init register_intc_controller(struct intc_desc *desc) * IRQ support, each vector still needs to have * its own backing irq_desc. */ - res = irq_create_identity_mapping(d->domain, irq2); - if (unlikely(res)) { - if (res == -EEXIST) { - res = irq_domain_associate(d->domain, - irq2, irq2); - if (unlikely(res)) { - pr_err("domain association " - "failure\n"); - continue; - } - } else { - pr_err("can't identity map IRQ %d\n", - irq); - continue; - } - } + if (!intc_map(d->domain, irq2)) + continue; vect2->enum_id = 0; diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index a1b9be1d105a..fde4edd83c14 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -186,7 +186,7 @@ struct qm_eqcr_entry { __be32 tag; struct qm_fd fd; u8 __reserved3[32]; -} __packed; +} __packed __aligned(8); #define QM_EQCR_VERB_VBIT 0x80 #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ #define QM_EQCR_VERB_CMD_ENQUEUE 0x01 diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c index 6268bfa7f0d6..c3e379a990f2 100644 --- a/drivers/soc/litex/litex_soc_ctrl.c +++ b/drivers/soc/litex/litex_soc_ctrl.c @@ -13,7 +13,6 @@ #include <linux/platform_device.h> #include <linux/printk.h> #include <linux/module.h> -#include <linux/errno.h> #include <linux/io.h> #include <linux/reboot.h> diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index f42954e2c98e..5bdfb1565c14 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -3,7 +3,6 @@ #include <linux/acpi.h> #include <linux/clk.h> -#include <linux/console.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/io.h> @@ -92,14 +91,11 @@ struct geni_wrapper { struct device *dev; void __iomem *base; struct clk_bulk_data ahb_clks[NUM_AHB_CLKS]; - struct geni_icc_path to_core; }; static const char * const icc_path_names[] = {"qup-core", "qup-config", "qup-memory"}; -static struct geni_wrapper *earlycon_wrapper; - #define QUP_HW_VER_REG 0x4 /* Common SE registers */ @@ -760,6 +756,9 @@ int geni_icc_get(struct geni_se *se, const char *icc_ddr) int i, err; const char *icc_names[] = {"qup-core", "qup-config", icc_ddr}; + if (has_acpi_companion(se->dev)) + return 0; + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { if (!icc_names[i]) continue; @@ -843,44 +842,11 @@ int geni_icc_disable(struct geni_se *se) } EXPORT_SYMBOL(geni_icc_disable); -void geni_remove_earlycon_icc_vote(void) -{ - struct platform_device *pdev; - struct geni_wrapper *wrapper; - struct device_node *parent; - struct device_node *child; - - if (!earlycon_wrapper) - return; - - wrapper = earlycon_wrapper; - parent = of_get_next_parent(wrapper->dev->of_node); - for_each_child_of_node(parent, child) { - if (!of_device_is_compatible(child, "qcom,geni-se-qup")) - continue; - - pdev = of_find_device_by_node(child); - if (!pdev) - continue; - - wrapper = platform_get_drvdata(pdev); - icc_put(wrapper->to_core.path); - wrapper->to_core.path = NULL; - - } - of_node_put(parent); - - earlycon_wrapper = NULL; -} -EXPORT_SYMBOL(geni_remove_earlycon_icc_vote); - static int geni_se_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct geni_wrapper *wrapper; - struct console __maybe_unused *bcon; - bool __maybe_unused has_earlycon = false; int ret; wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL); @@ -903,43 +869,6 @@ static int geni_se_probe(struct platform_device *pdev) } } -#ifdef CONFIG_SERIAL_EARLYCON - for_each_console(bcon) { - if (!strcmp(bcon->name, "qcom_geni")) { - has_earlycon = true; - break; - } - } - if (!has_earlycon) - goto exit; - - wrapper->to_core.path = devm_of_icc_get(dev, "qup-core"); - if (IS_ERR(wrapper->to_core.path)) - return PTR_ERR(wrapper->to_core.path); - /* - * Put minmal BW request on core clocks on behalf of early console. - * The vote will be removed earlycon exit function. - * - * Note: We are putting vote on each QUP wrapper instead only to which - * earlycon is connected because QUP core clock of different wrapper - * share same voltage domain. If core1 is put to 0, then core2 will - * also run at 0, if not voted. Default ICC vote will be removed ASA - * we touch any of the core clock. - * core1 = core2 = max(core1, core2) - */ - ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW, - GENI_DEFAULT_BW); - if (ret) { - dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n", - __func__, ret); - return ret; - } - - if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart")) - earlycon_wrapper = wrapper; - of_node_put(pdev->dev.of_node); -exit: -#endif dev_set_drvdata(dev, wrapper); dev_dbg(dev, "GENI SE Driver probed\n"); return devm_of_platform_populate(dev); diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c index bf1468e5bccb..51143a68a889 100644 --- a/drivers/soc/ti/omap_prm.c +++ b/drivers/soc/ti/omap_prm.c @@ -332,7 +332,7 @@ static const struct omap_prm_data dra7_prm_data[] = { { .name = "l3init", .base = 0x4ae07300, .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon, - .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012, + .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01, .clkdm_name = "pcie" }, { @@ -830,8 +830,12 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev, reset->prm->data->name, id); exit: - if (reset->clkdm) + if (reset->clkdm) { + /* At least dra7 iva needs a delay before clkdm idle */ + if (has_rstst) + udelay(1); pdata->clkdm_allow_idle(reset->clkdm); + } return ret; } diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index b84f00b8d18b..4cabaf21c1ca 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -1105,7 +1105,7 @@ struct rtllib_network { bool bWithAironetIE; bool bCkipSupported; bool bCcxRmEnable; - u16 CcxRmState[2]; + u8 CcxRmState[2]; bool bMBssidValid; u8 MBssidMask; u8 MBssid[ETH_ALEN]; diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index 66c135321da4..15bbb63ca130 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -1967,7 +1967,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee, info_element->data[2] == 0x96 && info_element->data[3] == 0x01) { if (info_element->len == 6) { - memcpy(network->CcxRmState, &info_element[4], 2); + memcpy(network->CcxRmState, &info_element->data[4], 2); if (network->CcxRmState[0] != 0) network->bCcxRmEnable = true; else diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d0e7ed8f28cc..e5c443bfbdf9 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1166,6 +1166,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, target_get_sess_cmd(&cmd->se_cmd, true); + cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb); if (cmd->sense_reason) { if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { @@ -1180,8 +1181,6 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (cmd->sense_reason) goto attach_cmd; - /* only used for printks or comparing with ->ref_task_tag */ - cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd); if (cmd->sense_reason) goto attach_cmd; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 3cbc074992bc..9ee797b8cb7e 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -882,7 +882,6 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, if (!bio) { new_bio: nr_vecs = bio_max_segs(nr_pages); - nr_pages -= nr_vecs; /* * Calls bio_kmalloc() and sets bio->bi_end_io() */ @@ -939,6 +938,14 @@ new_bio: return 0; fail: + if (bio) + bio_put(bio); + while (req->bio) { + bio = req->bio; + req->bio = bio->bi_next; + bio_put(bio); + } + req->biotail = NULL; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index 620bcf586ee2..c44fad2b9fbb 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -347,7 +347,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) ret = tb_retimer_nvm_add(rt); if (ret) { dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret); - device_del(&rt->dev); + device_unregister(&rt->dev); return ret; } @@ -406,7 +406,7 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index) */ int tb_retimer_scan(struct tb_port *port) { - u32 status[TB_MAX_RETIMER_INDEX] = {}; + u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; int ret, i, last_idx = 0; if (!port->cap_usb4) diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 291649f02821..0d85b55ea823 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1177,12 +1177,6 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se, struct console *con) { } #endif -static int qcom_geni_serial_earlycon_exit(struct console *con) -{ - geni_remove_earlycon_icc_vote(); - return 0; -} - static struct qcom_geni_private_data earlycon_private_data; static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev, @@ -1233,7 +1227,6 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev, writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN); dev->con->write = qcom_geni_serial_earlycon_write; - dev->con->exit = qcom_geni_serial_earlycon_exit; dev->con->setup = NULL; qcom_geni_serial_enable_early_read(&se, dev->con); diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c index f2ebbacd932e..d7d4bdd57f46 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.c +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -1128,6 +1128,10 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep, return -ESHUTDOWN; } + /* Requests has been dequeued during disabling endpoint. */ + if (!(pep->ep_state & EP_ENABLED)) + return 0; + spin_lock_irqsave(&pdev->lock, flags); ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request)); spin_unlock_irqrestore(&pdev->lock, flags); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 39ddb5585ded..3fda1ec961d7 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control) #define acm_send_break(acm, ms) \ acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0) -static void acm_kill_urbs(struct acm *acm) +static void acm_poison_urbs(struct acm *acm) { int i; - usb_kill_urb(acm->ctrlurb); + usb_poison_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) - usb_kill_urb(acm->wb[i].urb); + usb_poison_urb(acm->wb[i].urb); for (i = 0; i < acm->rx_buflimit; i++) - usb_kill_urb(acm->read_urbs[i]); + usb_poison_urb(acm->read_urbs[i]); +} + +static void acm_unpoison_urbs(struct acm *acm) +{ + int i; + + for (i = 0; i < acm->rx_buflimit; i++) + usb_unpoison_urb(acm->read_urbs[i]); + for (i = 0; i < ACM_NW; i++) + usb_unpoison_urb(acm->wb[i].urb); + usb_unpoison_urb(acm->ctrlurb); } + /* * Write buffer management. * All of these assume proper locks taken by the caller. @@ -226,9 +238,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb) rc = usb_submit_urb(wb->urb, GFP_ATOMIC); if (rc < 0) { - dev_err(&acm->data->dev, - "%s - usb_submit_urb(write bulk) failed: %d\n", - __func__, rc); + if (rc != -EPERM) + dev_err(&acm->data->dev, + "%s - usb_submit_urb(write bulk) failed: %d\n", + __func__, rc); acm_write_done(acm, wb); } return rc; @@ -313,8 +326,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf) acm->iocount.dsr++; if (difference & ACM_CTRL_DCD) acm->iocount.dcd++; - if (newctrl & ACM_CTRL_BRK) + if (newctrl & ACM_CTRL_BRK) { acm->iocount.brk++; + tty_insert_flip_char(&acm->port, 0, TTY_BREAK); + } if (newctrl & ACM_CTRL_RI) acm->iocount.rng++; if (newctrl & ACM_CTRL_FRAMING) @@ -480,11 +495,6 @@ static void acm_read_bulk_callback(struct urb *urb) dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", rb->index, urb->actual_length, status); - if (!acm->dev) { - dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__); - return; - } - switch (status) { case 0: usb_mark_last_busy(acm->dev); @@ -649,7 +659,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise) res = acm_set_control(acm, val); if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE)) - dev_err(&acm->control->dev, "failed to set dtr/rts\n"); + /* This is broken in too many devices to spam the logs */ + dev_dbg(&acm->control->dev, "failed to set dtr/rts\n"); } static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) @@ -731,6 +742,7 @@ static void acm_port_shutdown(struct tty_port *port) * Need to grab write_lock to prevent race with resume, but no need to * hold it due to the tty-port initialised flag. */ + acm_poison_urbs(acm); spin_lock_irq(&acm->write_lock); spin_unlock_irq(&acm->write_lock); @@ -747,7 +759,8 @@ static void acm_port_shutdown(struct tty_port *port) usb_autopm_put_interface_async(acm->control); } - acm_kill_urbs(acm); + acm_unpoison_urbs(acm); + } static void acm_tty_cleanup(struct tty_struct *tty) @@ -1296,13 +1309,6 @@ skip_normal_probe: if (!combined_interfaces && intf != control_interface) return -ENODEV; - if (!combined_interfaces && usb_interface_claimed(data_interface)) { - /* valid in this context */ - dev_dbg(&intf->dev, "The data interface isn't available\n"); - return -EBUSY; - } - - if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 || control_interface->cur_altsetting->desc.bNumEndpoints == 0) return -EINVAL; @@ -1323,8 +1329,8 @@ made_compressed_probe: dev_dbg(&intf->dev, "interfaces are valid\n"); acm = kzalloc(sizeof(struct acm), GFP_KERNEL); - if (acm == NULL) - goto alloc_fail; + if (!acm) + return -ENOMEM; tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; @@ -1341,7 +1347,7 @@ made_compressed_probe: minor = acm_alloc_minor(acm); if (minor < 0) - goto alloc_fail1; + goto err_put_port; acm->minor = minor; acm->dev = usb_dev; @@ -1372,15 +1378,15 @@ made_compressed_probe: buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); if (!buf) - goto alloc_fail1; + goto err_put_port; acm->ctrl_buffer = buf; if (acm_write_buffers_alloc(acm) < 0) - goto alloc_fail2; + goto err_free_ctrl_buffer; acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL); if (!acm->ctrlurb) - goto alloc_fail3; + goto err_free_write_buffers; for (i = 0; i < num_rx_buf; i++) { struct acm_rb *rb = &(acm->read_buffers[i]); @@ -1389,13 +1395,13 @@ made_compressed_probe: rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL, &rb->dma); if (!rb->base) - goto alloc_fail4; + goto err_free_read_urbs; rb->index = i; rb->instance = acm; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) - goto alloc_fail4; + goto err_free_read_urbs; urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; urb->transfer_dma = rb->dma; @@ -1416,8 +1422,8 @@ made_compressed_probe: struct acm_wb *snd = &(acm->wb[i]); snd->urb = usb_alloc_urb(0, GFP_KERNEL); - if (snd->urb == NULL) - goto alloc_fail5; + if (!snd->urb) + goto err_free_write_urbs; if (usb_endpoint_xfer_int(epwrite)) usb_fill_int_urb(snd->urb, usb_dev, acm->out, @@ -1435,7 +1441,7 @@ made_compressed_probe: i = device_create_file(&intf->dev, &dev_attr_bmCapabilities); if (i < 0) - goto alloc_fail5; + goto err_free_write_urbs; if (h.usb_cdc_country_functional_desc) { /* export the country data */ struct usb_cdc_country_functional_desc * cfd = @@ -1480,20 +1486,21 @@ skip_countries: acm->nb_index = 0; acm->nb_size = 0; - dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor); - acm->line.dwDTERate = cpu_to_le32(9600); acm->line.bDataBits = 8; acm_set_line(acm, &acm->line); - usb_driver_claim_interface(&acm_driver, data_interface, acm); - usb_set_intfdata(data_interface, acm); + if (!acm->combined_interfaces) { + rv = usb_driver_claim_interface(&acm_driver, data_interface, acm); + if (rv) + goto err_remove_files; + } tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, &control_interface->dev); if (IS_ERR(tty_dev)) { rv = PTR_ERR(tty_dev); - goto alloc_fail6; + goto err_release_data_interface; } if (quirks & CLEAR_HALT_CONDITIONS) { @@ -1501,32 +1508,39 @@ skip_countries: usb_clear_halt(usb_dev, acm->out); } + dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor); + return 0; -alloc_fail6: + +err_release_data_interface: + if (!acm->combined_interfaces) { + /* Clear driver data so that disconnect() returns early. */ + usb_set_intfdata(data_interface, NULL); + usb_driver_release_interface(&acm_driver, data_interface); + } +err_remove_files: if (acm->country_codes) { device_remove_file(&acm->control->dev, &dev_attr_wCountryCodes); device_remove_file(&acm->control->dev, &dev_attr_iCountryCodeRelDate); - kfree(acm->country_codes); } device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); -alloc_fail5: - usb_set_intfdata(intf, NULL); +err_free_write_urbs: for (i = 0; i < ACM_NW; i++) usb_free_urb(acm->wb[i].urb); -alloc_fail4: +err_free_read_urbs: for (i = 0; i < num_rx_buf; i++) usb_free_urb(acm->read_urbs[i]); acm_read_buffers_free(acm); usb_free_urb(acm->ctrlurb); -alloc_fail3: +err_free_write_buffers: acm_write_buffers_free(acm); -alloc_fail2: +err_free_ctrl_buffer: usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); -alloc_fail1: +err_put_port: tty_port_put(&acm->port); -alloc_fail: + return rv; } @@ -1540,8 +1554,14 @@ static void acm_disconnect(struct usb_interface *intf) if (!acm) return; - mutex_lock(&acm->mutex); acm->disconnected = true; + /* + * there is a circular dependency. acm_softint() can resubmit + * the URBs in error handling so we need to block any + * submission right away + */ + acm_poison_urbs(acm); + mutex_lock(&acm->mutex); if (acm->country_codes) { device_remove_file(&acm->control->dev, &dev_attr_wCountryCodes); @@ -1560,7 +1580,6 @@ static void acm_disconnect(struct usb_interface *intf) tty_kref_put(tty); } - acm_kill_urbs(acm); cancel_delayed_work_sync(&acm->dwork); tty_unregister_device(acm_tty_driver, acm->minor); @@ -1602,7 +1621,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) if (cnt) return 0; - acm_kill_urbs(acm); + acm_poison_urbs(acm); cancel_delayed_work_sync(&acm->dwork); acm->urbs_in_error_delay = 0; @@ -1615,6 +1634,7 @@ static int acm_resume(struct usb_interface *intf) struct urb *urb; int rv = 0; + acm_unpoison_urbs(acm); spin_lock_irq(&acm->write_lock); if (--acm->susp_count) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 6ade3daf7858..76ac5d6555ae 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -498,6 +498,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, + /* Fibocom L850-GL LTE Modem */ + { USB_DEVICE(0x2cb7, 0x0007), .driver_info = + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index fc3269f5faf1..1a9789ec5847 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -4322,7 +4322,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) if (hsotg->op_state == OTG_STATE_B_PERIPHERAL) goto unlock; - if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL) + if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL || + hsotg->flags.b.port_connect_status == 0) goto skip_power_saving; /* @@ -5398,7 +5399,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg) dwc2_writel(hsotg, hprt0, HPRT0); /* Wait for the HPRT0.PrtSusp register field to be set */ - if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000)) + if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000)) dev_warn(hsotg->dev, "Suspend wasn't generated\n"); /* diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 3d3918a8d5fb..4c5c6972124a 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -120,6 +120,8 @@ static const struct property_entry dwc3_pci_intel_properties[] = { static const struct property_entry dwc3_pci_mrfld_properties[] = { PROPERTY_ENTRY_STRING("dr_mode", "otg"), PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), + PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"), + PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), {} }; diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index fcaf04483ad0..3de291ab951a 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -244,6 +244,9 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom) struct device *dev = qcom->dev; int ret; + if (has_acpi_companion(dev)) + return 0; + qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr"); if (IS_ERR(qcom->icc_path_ddr)) { dev_err(dev, "failed to get usb-ddr path: %ld\n", diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 4a337f348651..c7ef218e7a8c 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -791,10 +791,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) reg &= ~DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); - dep->stream_capable = false; - dep->type = 0; - dep->flags = 0; - /* Clear out the ep descriptors for non-ep0 */ if (dep->number > 1) { dep->endpoint.comp_desc = NULL; @@ -803,6 +799,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) dwc3_remove_requests(dwc, dep); + dep->stream_capable = false; + dep->type = 0; + dep->flags = 0; + return 0; } @@ -2083,7 +2083,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc) u32 reg; speed = dwc->gadget_max_speed; - if (speed > dwc->maximum_speed) + if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed) speed = dwc->maximum_speed; if (speed == USB_SPEED_SUPER_PLUS && @@ -2523,6 +2523,7 @@ static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g, unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); + dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS; dwc->gadget_ssp_rate = rate; spin_unlock_irqrestore(&dwc->lock, flags); } diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c index 8d387e0e4d91..c80f9bd51b75 100644 --- a/drivers/usb/gadget/udc/amd5536udc_pci.c +++ b/drivers/usb/gadget/udc/amd5536udc_pci.c @@ -153,6 +153,11 @@ static int udc_pci_probe( pci_set_master(pdev); pci_try_set_mwi(pdev); + dev->phys_addr = resource; + dev->irq = pdev->irq; + dev->pdev = pdev; + dev->dev = &pdev->dev; + /* init dma pools */ if (use_dma) { retval = init_dma_pools(dev); @@ -160,11 +165,6 @@ static int udc_pci_probe( goto err_dma; } - dev->phys_addr = resource; - dev->irq = pdev->irq; - dev->pdev = pdev; - dev->dev = &pdev->dev; - /* general probing */ if (udc_probe(dev)) { retval = -ENODEV; diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index fe010cc61f19..2f27dc0d9c6b 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -397,6 +397,13 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_SPURIOUS_SUCCESS; if (mtk->lpm_support) xhci->quirks |= XHCI_LPM_SUPPORT; + + /* + * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream, + * and it's 3 when support it. + */ + if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4) + xhci->quirks |= XHCI_BROKEN_STREAMS; } /* called during probe() after chip reset completes */ @@ -548,7 +555,8 @@ static int xhci_mtk_probe(struct platform_device *pdev) if (ret) goto put_usb3_hcd; - if (HCC_MAX_PSA(xhci->hcc_params) >= 4) + if (HCC_MAX_PSA(xhci->hcc_params) >= 4 && + !(xhci->quirks & XHCI_BROKEN_STREAMS)) xhci->shared_hcd->can_do_streams = 1; ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 1cd87729ba60..fc0457db62e1 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2004,10 +2004,14 @@ static void musb_pm_runtime_check_session(struct musb *musb) MUSB_DEVCTL_HR; switch (devctl & ~s) { case MUSB_QUIRK_B_DISCONNECT_99: - musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); - schedule_delayed_work(&musb->irq_work, - msecs_to_jiffies(1000)); - break; + if (musb->quirk_retries && !musb->flush_irq_work) { + musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + musb->quirk_retries--; + break; + } + fallthrough; case MUSB_QUIRK_B_INVALID_VBUS_91: if (musb->quirk_retries && !musb->flush_irq_work) { musb_dbg(musb, diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 8f1de1fbbeed..d8d3892e5a69 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -63,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a dev_info(dev, "stub up\n"); + mutex_lock(&sdev->ud.sysfs_lock); spin_lock_irq(&sdev->ud.lock); if (sdev->ud.status != SDEV_ST_AVAILABLE) { @@ -87,13 +88,13 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); if (IS_ERR(tcp_rx)) { sockfd_put(socket); - return -EINVAL; + goto unlock_mutex; } tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); if (IS_ERR(tcp_tx)) { kthread_stop(tcp_rx); sockfd_put(socket); - return -EINVAL; + goto unlock_mutex; } /* get task structs now */ @@ -112,6 +113,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a wake_up_process(sdev->ud.tcp_rx); wake_up_process(sdev->ud.tcp_tx); + mutex_unlock(&sdev->ud.sysfs_lock); + } else { dev_info(dev, "stub down\n"); @@ -122,6 +125,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a spin_unlock_irq(&sdev->ud.lock); usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN); + mutex_unlock(&sdev->ud.sysfs_lock); } return count; @@ -130,6 +134,8 @@ sock_err: sockfd_put(socket); err: spin_unlock_irq(&sdev->ud.lock); +unlock_mutex: + mutex_unlock(&sdev->ud.sysfs_lock); return -EINVAL; } static DEVICE_ATTR_WO(usbip_sockfd); @@ -270,6 +276,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev) sdev->ud.side = USBIP_STUB; sdev->ud.status = SDEV_ST_AVAILABLE; spin_lock_init(&sdev->ud.lock); + mutex_init(&sdev->ud.sysfs_lock); sdev->ud.tcp_socket = NULL; sdev->ud.sockfd = -1; diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index d60ce17d3dd2..ea2a20e6d27d 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -263,6 +263,9 @@ struct usbip_device { /* lock for status */ spinlock_t lock; + /* mutex for synchronizing sysfs store paths */ + struct mutex sysfs_lock; + int sockfd; struct socket *tcp_socket; diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c index 5d88917c9631..086ca76dd053 100644 --- a/drivers/usb/usbip/usbip_event.c +++ b/drivers/usb/usbip/usbip_event.c @@ -70,6 +70,7 @@ static void event_handler(struct work_struct *work) while ((ud = get_event()) != NULL) { usbip_dbg_eh("pending event %lx\n", ud->event); + mutex_lock(&ud->sysfs_lock); /* * NOTE: shutdown must come first. * Shutdown the device. @@ -90,6 +91,7 @@ static void event_handler(struct work_struct *work) ud->eh_ops.unusable(ud); unset_event(ud, USBIP_EH_UNUSABLE); } + mutex_unlock(&ud->sysfs_lock); wake_up(&ud->eh_waitq); } diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 3209b5ddd30c..4ba6bcdaa8e9 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -594,6 +594,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, pr_err("invalid port number %d\n", wIndex); goto error; } + if (wValue >= 32) + goto error; if (hcd->speed == HCD_USB3) { if ((vhci_hcd->port_status[rhport] & USB_SS_PORT_STAT_POWER) != 0) { @@ -1099,6 +1101,7 @@ static void vhci_device_init(struct vhci_device *vdev) vdev->ud.side = USBIP_VHCI; vdev->ud.status = VDEV_ST_NULL; spin_lock_init(&vdev->ud.lock); + mutex_init(&vdev->ud.sysfs_lock); INIT_LIST_HEAD(&vdev->priv_rx); INIT_LIST_HEAD(&vdev->priv_tx); diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index c4b4256e5dad..e2847cd3e6e3 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -185,6 +185,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) usbip_dbg_vhci_sysfs("enter\n"); + mutex_lock(&vdev->ud.sysfs_lock); + /* lock */ spin_lock_irqsave(&vhci->lock, flags); spin_lock(&vdev->ud.lock); @@ -195,6 +197,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) /* unlock */ spin_unlock(&vdev->ud.lock); spin_unlock_irqrestore(&vhci->lock, flags); + mutex_unlock(&vdev->ud.sysfs_lock); return -EINVAL; } @@ -205,6 +208,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN); + mutex_unlock(&vdev->ud.sysfs_lock); + return 0; } @@ -349,30 +354,36 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, else vdev = &vhci->vhci_hcd_hs->vdev[rhport]; + mutex_lock(&vdev->ud.sysfs_lock); + /* Extract socket from fd. */ socket = sockfd_lookup(sockfd, &err); if (!socket) { dev_err(dev, "failed to lookup sock"); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; } if (socket->type != SOCK_STREAM) { dev_err(dev, "Expecting SOCK_STREAM - found %d", socket->type); sockfd_put(socket); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; } /* create threads before locking */ tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); if (IS_ERR(tcp_rx)) { sockfd_put(socket); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; } tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); if (IS_ERR(tcp_tx)) { kthread_stop(tcp_rx); sockfd_put(socket); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; } /* get task structs now */ @@ -397,7 +408,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, * Will be retried from userspace * if there's another free port. */ - return -EBUSY; + err = -EBUSY; + goto unlock_mutex; } dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n", @@ -423,7 +435,15 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, rh_port_connect(vdev, speed); + dev_info(dev, "Device attached\n"); + + mutex_unlock(&vdev->ud.sysfs_lock); + return count; + +unlock_mutex: + mutex_unlock(&vdev->ud.sysfs_lock); + return err; } static DEVICE_ATTR_WO(attach); diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c index c8eeabdd9b56..2bc428f2e261 100644 --- a/drivers/usb/usbip/vudc_dev.c +++ b/drivers/usb/usbip/vudc_dev.c @@ -572,6 +572,7 @@ static int init_vudc_hw(struct vudc *udc) init_waitqueue_head(&udc->tx_waitq); spin_lock_init(&ud->lock); + mutex_init(&ud->sysfs_lock); ud->status = SDEV_ST_AVAILABLE; ud->side = USBIP_VUDC; diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index 7383a543c6d1..f7633ee655a1 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c @@ -112,6 +112,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, dev_err(dev, "no device"); return -ENODEV; } + mutex_lock(&udc->ud.sysfs_lock); spin_lock_irqsave(&udc->lock, flags); /* Don't export what we don't have */ if (!udc->driver || !udc->pullup) { @@ -187,6 +188,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, wake_up_process(udc->ud.tcp_rx); wake_up_process(udc->ud.tcp_tx); + + mutex_unlock(&udc->ud.sysfs_lock); return count; } else { @@ -207,6 +210,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, } spin_unlock_irqrestore(&udc->lock, flags); + mutex_unlock(&udc->ud.sysfs_lock); return count; @@ -216,6 +220,7 @@ unlock_ud: spin_unlock_irq(&udc->ud.lock); unlock: spin_unlock_irqrestore(&udc->lock, flags); + mutex_unlock(&udc->ud.sysfs_lock); return ret; } diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 08f742fd2409..b6cc53ba980c 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -4,9 +4,13 @@ #ifndef __MLX5_VDPA_H__ #define __MLX5_VDPA_H__ +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> #include <linux/vdpa.h> #include <linux/mlx5/driver.h> +#define MLX5V_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) + struct mlx5_vdpa_direct_mr { u64 start; u64 end; diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index d300f799efcd..800cfd1967ad 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -219,6 +219,11 @@ static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_m mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey); } +static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev) +{ + return &mvdev->mdev->pdev->dev; +} + static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr, struct vhost_iotlb *iotlb) { @@ -234,7 +239,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr u64 pa; u64 paend; struct scatterlist *sg; - struct device *dma = mvdev->mdev->device; + struct device *dma = get_dma_device(mvdev); for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) { @@ -273,8 +278,10 @@ done: mr->log_size = log_entity_size; mr->nsg = nsg; mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); - if (!mr->nent) + if (!mr->nent) { + err = -ENOMEM; goto err_map; + } err = create_direct_mr(mvdev, mr); if (err) @@ -291,7 +298,7 @@ err_map: static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) { - struct device *dma = mvdev->mdev->device; + struct device *dma = get_dma_device(mvdev); destroy_direct_mr(mvdev, mr); dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c index 96e6421c5d1c..6521cbd0f5c2 100644 --- a/drivers/vdpa/mlx5/core/resources.c +++ b/drivers/vdpa/mlx5/core/resources.c @@ -246,7 +246,8 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) if (err) goto err_key; - kick_addr = pci_resource_start(mdev->pdev, 0) + offset; + kick_addr = mdev->bar_addr + offset; + res->kick_addr = ioremap(kick_addr, PAGE_SIZE); if (!res->kick_addr) { err = -ENOMEM; diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 71397fdafa6a..4d2809c7d4e3 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -820,7 +820,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn); MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent); MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, - !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1)); + !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); @@ -1169,6 +1169,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m return; } mvq->avail_idx = attr.available_index; + mvq->used_idx = attr.used_index; } static void suspend_vqs(struct mlx5_vdpa_net *ndev) @@ -1426,6 +1427,7 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx, return -EINVAL; } + mvq->used_idx = state->avail_index; mvq->avail_idx = state->avail_index; return 0; } @@ -1443,7 +1445,11 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa * that cares about emulating the index after vq is stopped. */ if (!mvq->initialized) { - state->avail_index = mvq->avail_idx; + /* Firmware returns a wrong value for the available index. + * Since both values should be identical, we take the value of + * used_idx which is reported correctly. + */ + state->avail_index = mvq->used_idx; return 0; } @@ -1452,7 +1458,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n"); return err; } - state->avail_index = attr.available_index; + state->avail_index = attr.used_index; return 0; } @@ -1540,21 +1546,11 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) } } -static void clear_virtqueues(struct mlx5_vdpa_net *ndev) -{ - int i; - - for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { - ndev->vqs[i].avail_idx = 0; - ndev->vqs[i].used_idx = 0; - } -} - /* TODO: cross-endian support */ static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) { return virtio_legacy_is_little_endian() || - (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1)); + (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1)); } static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val) @@ -1785,7 +1781,6 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) if (!status) { mlx5_vdpa_info(mvdev, "performing device reset\n"); teardown_driver(ndev); - clear_virtqueues(ndev); mlx5_vdpa_destroy_mr(&ndev->mvdev); ndev->mvdev.status = 0; ndev->mvdev.mlx_features = 0; @@ -1907,6 +1902,19 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = { .free = mlx5_vdpa_free, }; +static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu) +{ + u16 hw_mtu; + int err; + + err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu); + if (err) + return err; + + *mtu = hw_mtu - MLX5V_ETH_HARD_MTU; + return 0; +} + static int alloc_resources(struct mlx5_vdpa_net *ndev) { struct mlx5_vdpa_net_resources *res = &ndev->res; @@ -1992,7 +2000,7 @@ static int mlx5v_probe(struct auxiliary_device *adev, init_mvqs(ndev); mutex_init(&ndev->reslock); config = &ndev->config; - err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu); + err = query_mtu(mdev, &ndev->mtu); if (err) goto err_mtu; diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index ac3c1dd3edef..4abddbebd4b2 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -42,6 +42,6 @@ config VFIO_PCI_IGD config VFIO_PCI_NVLINK2 def_bool y - depends on VFIO_PCI && PPC_POWERNV + depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU help VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 65e7e6b44578..5023e23db3bc 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -1656,6 +1656,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) + return -EINVAL; if (vma->vm_end < vma->vm_start) return -EINVAL; if ((vma->vm_flags & VM_SHARED) == 0) @@ -1664,7 +1666,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) int regnum = index - VFIO_PCI_NUM_REGIONS; struct vfio_pci_region *region = vdev->region + regnum; - if (region && region->ops && region->ops->mmap && + if (region->ops && region->ops->mmap && (region->flags & VFIO_REGION_INFO_FLAG_MMAP)) return region->ops->mmap(vdev, region, vma); return -EINVAL; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index be444407664a..45cbfd4879a5 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -739,6 +739,12 @@ out: ret = vfio_lock_acct(dma, lock_acct, false); unpin_out: + if (batch->size == 1 && !batch->offset) { + /* May be a VM_PFNMAP pfn, which the batch can't remember. */ + put_pfn(pfn, dma->prot); + batch->size = 0; + } + if (ret < 0) { if (pinned && !rsvd) { for (pfn = *pfn_base ; pinned ; pfn++, pinned--) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index e0a27e336293..bfa4c6ef554e 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -745,9 +745,11 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, const struct vdpa_config_ops *ops = vdpa->config; int r = 0; + mutex_lock(&dev->mutex); + r = vhost_dev_check_owner(dev); if (r) - return r; + goto unlock; switch (msg->type) { case VHOST_IOTLB_UPDATE: @@ -768,6 +770,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, r = -EINVAL; break; } +unlock: + mutex_unlock(&dev->mutex); return r; } diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 44a5cd2f54cc..3406067985b1 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1333,6 +1333,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode) ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; + if (!ops->cursor) + return; + ops->cursor(vc, info, mode, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); } diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index c8b0ae676809..4dc9077dd2ac 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -1031,7 +1031,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) PCI_DEVICE_ID_HYPERV_VIDEO, NULL); if (!pdev) { pr_err("Unable to find PCI Hyper-V video\n"); - kfree(info->apertures); return -ENODEV; } @@ -1129,7 +1128,6 @@ getmem_done: } else { pci_dev_put(pdev); } - kfree(info->apertures); return 0; @@ -1141,7 +1139,6 @@ err2: err1: if (!gen2vm) pci_dev_put(pdev); - kfree(info->apertures); return -ENOMEM; } diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c index e5dcb26d85f0..1635f421ef2c 100644 --- a/drivers/watchdog/armada_37xx_wdt.c +++ b/drivers/watchdog/armada_37xx_wdt.c @@ -2,7 +2,7 @@ /* * Watchdog driver for Marvell Armada 37xx SoCs * - * Author: Marek Behun <marek.behun@nic.cz> + * Author: Marek BehĂșn <kabel@kernel.org> */ #include <linux/clk.h> @@ -366,7 +366,7 @@ static struct platform_driver armada_37xx_wdt_driver = { module_platform_driver(armada_37xx_wdt_driver); -MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); MODULE_DESCRIPTION("Armada 37xx CPU Watchdog"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 41645fe6ad48..ea0efd290c37 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -50,11 +50,11 @@ config XEN_BALLOON_MEMORY_HOTPLUG SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'" -config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT +config XEN_MEMORY_HOTPLUG_LIMIT int "Hotplugged memory limit (in GiB) for a PV guest" default 512 depends on XEN_HAVE_PVMMU - depends on XEN_BALLOON_MEMORY_HOTPLUG + depends on MEMORY_HOTPLUG help Maxmium amount of memory (in GiB) that a PV guest can be expanded to when using memory hotplug. diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 8236e2364eeb..7bbfd58958bc 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -110,7 +110,7 @@ struct irq_info { unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */ unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ u64 eoi_time; /* Time in jiffies when to EOI. */ - spinlock_t lock; + raw_spinlock_t lock; union { unsigned short virq; @@ -312,7 +312,7 @@ static int xen_irq_info_common_setup(struct irq_info *info, info->evtchn = evtchn; info->cpu = cpu; info->mask_reason = EVT_MASK_REASON_EXPLICIT; - spin_lock_init(&info->lock); + raw_spin_lock_init(&info->lock); ret = set_evtchn_to_irq(evtchn, irq); if (ret < 0) @@ -472,28 +472,28 @@ static void do_mask(struct irq_info *info, u8 reason) { unsigned long flags; - spin_lock_irqsave(&info->lock, flags); + raw_spin_lock_irqsave(&info->lock, flags); if (!info->mask_reason) mask_evtchn(info->evtchn); info->mask_reason |= reason; - spin_unlock_irqrestore(&info->lock, flags); + raw_spin_unlock_irqrestore(&info->lock, flags); } static void do_unmask(struct irq_info *info, u8 reason) { unsigned long flags; - spin_lock_irqsave(&info->lock, flags); + raw_spin_lock_irqsave(&info->lock, flags); info->mask_reason &= ~reason; if (!info->mask_reason) unmask_evtchn(info->evtchn); - spin_unlock_irqrestore(&info->lock, flags); + raw_spin_unlock_irqrestore(&info->lock, flags); } #ifdef CONFIG_X86 diff --git a/drivers/xen/time.c b/drivers/xen/time.c index 108edbcbc040..152dd33bb223 100644 --- a/drivers/xen/time.c +++ b/drivers/xen/time.c @@ -7,6 +7,7 @@ #include <linux/math64.h> #include <linux/gfp.h> #include <linux/slab.h> +#include <linux/static_call.h> #include <asm/paravirt.h> #include <asm/xen/hypervisor.h> @@ -175,7 +176,7 @@ void __init xen_time_setup_guest(void) xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_runstate_update_flag); - pv_ops.time.steal_clock = xen_steal_clock; + static_call_update(pv_steal_clock, xen_steal_clock); static_key_slow_inc(¶virt_steal_enabled); if (xen_runstate_remote) |