diff options
author | Mauro Carvalho Chehab <mchehab+samsung@kernel.org> | 2018-05-10 13:19:23 +0200 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab+samsung@kernel.org> | 2018-05-10 13:19:23 +0200 |
commit | 71db1cd7ff4ea3b525ae7d9c97633ea281b7d981 (patch) | |
tree | 53ff3b8ecfc2869ec919e3423d69e07ddfaddd9a /drivers | |
parent | media: dvbsky: use just one mutex for serializing device R/W ops (diff) | |
parent | Linux 4.17-rc4 (diff) | |
download | linux-71db1cd7ff4ea3b525ae7d9c97633ea281b7d981.tar.xz linux-71db1cd7ff4ea3b525ae7d9c97633ea281b7d981.zip |
Merge tag 'v4.17-rc4' into patchwork
Linux 4.17-rc4
* tag 'v4.17-rc4': (920 commits)
Linux 4.17-rc4
KVM: x86: remove APIC Timer periodic/oneshot spikes
genksyms: fix typo in parse.tab.{c,h} generation rules
kbuild: replace hardcoded bison in cmd_bison_h with $(YACC)
gcc-plugins: fix build condition of SANCOV plugin
MAINTAINERS: Update Kbuild entry with a few paths
Revert "usb: host: ehci: Use dma_pool_zalloc()"
platform/x86: Kconfig: Fix dell-laptop dependency chain.
platform/x86: asus-wireless: Fix NULL pointer dereference
arm64: vgic-v2: Fix proxying of cpuif access
KVM: arm/arm64: vgic_init: Cleanup reference to process_maintenance
KVM: arm64: Fix order of vcpu_write_sys_reg() arguments
MAINTAINERS & files: Canonize the e-mails I use at files
media: imx-media-csi: Fix inconsistent IS_ERR and PTR_ERR
tools: power/acpi, revert to LD = gcc
bdi: Fix oops in wb_workfn()
RDMA/cma: Do not query GID during QP state transition to RTR
IB/mlx4: Fix integer overflow when calculating optimal MTT size
IB/hfi1: Fix memory leak in exception path in get_irq_affinity()
IB/{hfi1, rdmavt}: Fix memory leak in hfi1_alloc_devdata() upon failure
...
Diffstat (limited to 'drivers')
423 files changed, 4743 insertions, 2544 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 76fb96966f7b..2f2e737be0f8 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void) return opregion; } +static bool dmi_is_desktop(void) +{ + const char *chassis_type; + + chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); + if (!chassis_type) + return false; + + if (!strcmp(chassis_type, "3") || /* 3: Desktop */ + !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */ + !strcmp(chassis_type, "5") || /* 5: Pizza Box */ + !strcmp(chassis_type, "6") || /* 6: Mini Tower */ + !strcmp(chassis_type, "7") || /* 7: Tower */ + !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */ + return true; + + return false; +} + int acpi_video_register(void) { int ret = 0; @@ -2143,8 +2162,12 @@ int acpi_video_register(void) * win8 ready (where we also prefer the native backlight driver, so * normally the acpi_video code should not register there anyways). */ - if (only_lcd == -1) - only_lcd = acpi_osi_is_win8(); + if (only_lcd == -1) { + if (dmi_is_desktop() && acpi_osi_is_win8()) + only_lcd = true; + else + only_lcd = false; + } dmi_check_system(video_dmi_table); diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index ebb626ffb5fa..4bde16fb97d8 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -12,23 +12,64 @@ #define pr_fmt(fmt) "ACPI: watchdog: " fmt #include <linux/acpi.h> +#include <linux/dmi.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include "internal.h" +static const struct dmi_system_id acpi_watchdog_skip[] = { + { + /* + * On Lenovo Z50-70 there are two issues with the WDAT + * table. First some of the instructions use RTC SRAM + * to store persistent information. This does not work well + * with Linux RTC driver. Second, more important thing is + * that the instructions do not actually reset the system. + * + * On this particular system iTCO_wdt seems to work just + * fine so we prefer that over WDAT for now. + * + * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033. + */ + .ident = "Lenovo Z50-70", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20354"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"), + }, + }, + {} +}; + +static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) +{ + const struct acpi_table_wdat *wdat = NULL; + acpi_status status; + + if (acpi_disabled) + return NULL; + + if (dmi_check_system(acpi_watchdog_skip)) + return NULL; + + status = acpi_get_table(ACPI_SIG_WDAT, 0, + (struct acpi_table_header **)&wdat); + if (ACPI_FAILURE(status)) { + /* It is fine if there is no WDAT */ + return NULL; + } + + return wdat; +} + /** * Returns true if this system should prefer ACPI based watchdog instead of * the native one (which are typically the same hardware). */ bool acpi_has_watchdog(void) { - struct acpi_table_header hdr; - - if (acpi_disabled) - return false; - - return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr)); + return !!acpi_watchdog_get_wdat(); } EXPORT_SYMBOL_GPL(acpi_has_watchdog); @@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void) struct platform_device *pdev; struct resource *resources; size_t nresources = 0; - acpi_status status; int i; - status = acpi_get_table(ACPI_SIG_WDAT, 0, - (struct acpi_table_header **)&wdat); - if (ACPI_FAILURE(status)) { + wdat = acpi_watchdog_get_wdat(); + if (!wdat) { /* It is fine if there is no WDAT */ return; } diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index e1eee7a60fad..f1cc4f9d31cd 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -635,4 +635,26 @@ module_param_call(lid_init_state, NULL, 0644); MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state"); -module_acpi_driver(acpi_button_driver); +static int acpi_button_register_driver(struct acpi_driver *driver) +{ + /* + * Modules such as nouveau.ko and i915.ko have a link time dependency + * on acpi_lid_open(), and would therefore not be loadable on ACPI + * capable kernels booted in non-ACPI mode if the return value of + * acpi_bus_register_driver() is returned from here with ACPI disabled + * when this driver is built as a module. + */ + if (acpi_disabled) + return 0; + + return acpi_bus_register_driver(driver); +} + +static void acpi_button_unregister_driver(struct acpi_driver *driver) +{ + if (!acpi_disabled) + acpi_bus_unregister_driver(driver); +} + +module_driver(acpi_button_driver, acpi_button_register_driver, + acpi_button_unregister_driver); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index cc234e6a6297..970dd87d347c 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2166,10 +2166,10 @@ int __init acpi_scan_init(void) acpi_cmos_rtc_init(); acpi_container_init(); acpi_memory_hotplug_init(); + acpi_watchdog_init(); acpi_pnp_init(); acpi_int340x_thermal_init(); acpi_amba_init(); - acpi_watchdog_init(); acpi_init_lpit(); acpi_scan_add_handler(&generic_device_handler); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 99a1a650326d..974e58457697 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), }, }, + /* + * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using + * the Low Power S0 Idle firmware interface (see + * https://bugzilla.kernel.org/show_bug.cgi?id=199057). + */ + { + .callback = init_no_lps0, + .ident = "ThinkPad X1 Tablet(2016)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"), + }, + }, {}, }; diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 594c228d2f02..4a3ac31c07d0 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -69,11 +69,12 @@ static ssize_t driver_override_show(struct device *_dev, struct device_attribute *attr, char *buf) { struct amba_device *dev = to_amba_device(_dev); + ssize_t len; - if (!dev->driver_override) - return 0; - - return sprintf(buf, "%s\n", dev->driver_override); + device_lock(_dev); + len = sprintf(buf, "%s\n", dev->driver_override); + device_unlock(_dev); + return len; } static ssize_t driver_override_store(struct device *_dev, @@ -81,9 +82,10 @@ static ssize_t driver_override_store(struct device *_dev, const char *buf, size_t count) { struct amba_device *dev = to_amba_device(_dev); - char *driver_override, *old = dev->driver_override, *cp; + char *driver_override, *old, *cp; - if (count > PATH_MAX) + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); @@ -94,12 +96,15 @@ static ssize_t driver_override_store(struct device *_dev, if (cp) *cp = '\0'; + device_lock(_dev); + old = dev->driver_override; if (strlen(driver_override)) { dev->driver_override = driver_override; } else { kfree(driver_override); dev->driver_override = NULL; } + device_unlock(_dev); kfree(old); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 764b63a5aade..e578eee31589 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2839,6 +2839,14 @@ static void binder_transaction(struct binder_proc *proc, else return_error = BR_DEAD_REPLY; mutex_unlock(&context->context_mgr_node_lock); + if (target_node && target_proc == proc) { + binder_user_error("%d:%d got transaction to context manager from process owning it\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_invalid_target_handle; + } } if (!target_node) { /* diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 44abb8a0a5e5..be076606d30e 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -671,7 +671,7 @@ static void ia_tx_poll (IADEV *iadev) { if ((vcc->pop) && (skb1->len != 0)) { vcc->pop(vcc, skb1); - IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n", + IF_EVENT(printk("Transmit Done - skb 0x%lx return\n", (long)skb1);) } else @@ -1665,7 +1665,7 @@ static void tx_intr(struct atm_dev *dev) status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG); if (status & TRANSMIT_DONE){ - IF_EVENT(printk("Tansmit Done Intr logic run\n");) + IF_EVENT(printk("Transmit Done Intr logic run\n");) spin_lock_irqsave(&iadev->tx_lock, flags); ia_tx_poll(iadev); spin_unlock_irqrestore(&iadev->tx_lock, flags); diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 1e6396bb807b..597d40893862 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -312,8 +312,9 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, * This checks whether the memory was allocated from the per-device * coherent memory pool and if so, maps that memory to the provided vma. * - * Returns 1 if we correctly mapped the memory, or 0 if the caller should - * proceed with mapping memory from generic pools. + * Returns 1 if @vaddr belongs to the device coherent pool and the caller + * should return @ret, or 0 if they should proceed with mapping memory from + * generic areas. */ int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 3b118353ea17..d82566d6e237 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -226,7 +226,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); unsigned long off = vma->vm_pgoff; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -234,12 +233,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; - if (off < count && user_count <= (count - off)) { + if (off < count && user_count <= (count - off)) ret = remap_pfn_range(vma, vma->vm_start, - pfn + off, + page_to_pfn(virt_to_page(cpu_addr)) + off, user_count << PAGE_SHIFT, vma->vm_page_prot); - } #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ return ret; diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index 31b5015b59fe..358354148dec 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -537,8 +537,8 @@ exit: } /** - * fw_load_sysfs_fallback - load a firmware via the syfs fallback mechanism - * @fw_sysfs: firmware syfs information for the firmware to load + * fw_load_sysfs_fallback - load a firmware via the sysfs fallback mechanism + * @fw_sysfs: firmware sysfs information for the firmware to load * @opt_flags: flags of options, FW_OPT_* * @timeout: timeout to wait for the load * diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h index dfebc644ed35..f8255670a663 100644 --- a/drivers/base/firmware_loader/fallback.h +++ b/drivers/base/firmware_loader/fallback.h @@ -6,7 +6,7 @@ #include <linux/device.h> /** - * struct firmware_fallback_config - firmware fallback configuratioon settings + * struct firmware_fallback_config - firmware fallback configuration settings * * Helps describe and fine tune the fallback mechanism. * diff --git a/drivers/block/loop.c b/drivers/block/loop.c index c9d04497a415..5d4e31655d96 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq) static void lo_complete_rq(struct request *rq) { struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); + blk_status_t ret = BLK_STS_OK; - if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio && - cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) { - struct bio *bio = cmd->rq->bio; - - bio_advance(bio, cmd->ret); - zero_fill_bio(bio); + if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || + req_op(rq) != REQ_OP_READ) { + if (cmd->ret < 0) + ret = BLK_STS_IOERR; + goto end_io; } - blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); + /* + * Short READ - if we got some data, advance our request and + * retry it. If we got no data, end the rest with EIO. + */ + if (cmd->ret) { + blk_update_request(rq, BLK_STS_OK, cmd->ret); + cmd->ret = 0; + blk_mq_requeue_request(rq, true); + } else { + if (cmd->use_aio) { + struct bio *bio = rq->bio; + + while (bio) { + zero_fill_bio(bio); + bio = bio->bi_next; + } + } + ret = BLK_STS_IOERR; +end_io: + blk_mq_end_request(rq, ret); + } } static void lo_rw_aio_do_completion(struct loop_cmd *cmd) { + struct request *rq = blk_mq_rq_from_pdu(cmd); + if (!atomic_dec_and_test(&cmd->ref)) return; kfree(cmd->bvec); cmd->bvec = NULL; - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(rq); } static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) @@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, { struct iov_iter iter; struct bio_vec *bvec; - struct request *rq = cmd->rq; + struct request *rq = blk_mq_rq_from_pdu(cmd); struct bio *bio = rq->bio; struct file *file = lo->lo_backing_file; unsigned int offset; @@ -1702,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer); static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { - struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); - struct loop_device *lo = cmd->rq->q->queuedata; + struct request *rq = bd->rq; + struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); + struct loop_device *lo = rq->q->queuedata; - blk_mq_start_request(bd->rq); + blk_mq_start_request(rq); if (lo->lo_state != Lo_bound) return BLK_STS_IOERR; - switch (req_op(cmd->rq)) { + switch (req_op(rq)) { case REQ_OP_FLUSH: case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES: @@ -1723,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, /* always use the first bio's css */ #ifdef CONFIG_BLK_CGROUP - if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) { - cmd->css = cmd->rq->bio->bi_css; + if (cmd->use_aio && rq->bio && rq->bio->bi_css) { + cmd->css = rq->bio->bi_css; css_get(cmd->css); } else #endif @@ -1736,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, static void loop_handle_cmd(struct loop_cmd *cmd) { - const bool write = op_is_write(req_op(cmd->rq)); - struct loop_device *lo = cmd->rq->q->queuedata; + struct request *rq = blk_mq_rq_from_pdu(cmd); + const bool write = op_is_write(req_op(rq)); + struct loop_device *lo = rq->q->queuedata; int ret = 0; if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { @@ -1745,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd) goto failed; } - ret = do_req_filebacked(lo, cmd->rq); + ret = do_req_filebacked(lo, rq); failed: /* complete non-aio request */ if (!cmd->use_aio || ret) { cmd->ret = ret ? -EIO : 0; - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(rq); } } @@ -1767,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq, { struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); - cmd->rq = rq; kthread_init_work(&cmd->work, loop_queue_work); - return 0; } diff --git a/drivers/block/loop.h b/drivers/block/loop.h index 0f45416e4fcf..b78de9879f4f 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -66,7 +66,6 @@ struct loop_device { struct loop_cmd { struct kthread_work work; - struct request *rq; bool use_aio; /* use AIO interface to handle I/O */ atomic_t ref; /* only for aio */ long ret; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 07dc5419bd63..8e8b04cc569a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) */ enum { Opt_queue_depth, + Opt_lock_timeout, Opt_last_int, /* int args above */ Opt_last_string, @@ -740,11 +741,13 @@ enum { Opt_read_write, Opt_lock_on_read, Opt_exclusive, + Opt_notrim, Opt_err }; static match_table_t rbd_opts_tokens = { {Opt_queue_depth, "queue_depth=%d"}, + {Opt_lock_timeout, "lock_timeout=%d"}, /* int args above */ /* string args above */ {Opt_read_only, "read_only"}, @@ -753,20 +756,25 @@ static match_table_t rbd_opts_tokens = { {Opt_read_write, "rw"}, /* Alternate spelling */ {Opt_lock_on_read, "lock_on_read"}, {Opt_exclusive, "exclusive"}, + {Opt_notrim, "notrim"}, {Opt_err, NULL} }; struct rbd_options { int queue_depth; + unsigned long lock_timeout; bool read_only; bool lock_on_read; bool exclusive; + bool trim; }; #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ +#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ #define RBD_READ_ONLY_DEFAULT false #define RBD_LOCK_ON_READ_DEFAULT false #define RBD_EXCLUSIVE_DEFAULT false +#define RBD_TRIM_DEFAULT true static int parse_rbd_opts_token(char *c, void *private) { @@ -796,6 +804,14 @@ static int parse_rbd_opts_token(char *c, void *private) } rbd_opts->queue_depth = intval; break; + case Opt_lock_timeout: + /* 0 is "wait forever" (i.e. infinite timeout) */ + if (intval < 0 || intval > INT_MAX / 1000) { + pr_err("lock_timeout out of range\n"); + return -EINVAL; + } + rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000); + break; case Opt_read_only: rbd_opts->read_only = true; break; @@ -808,6 +824,9 @@ static int parse_rbd_opts_token(char *c, void *private) case Opt_exclusive: rbd_opts->exclusive = true; break; + case Opt_notrim: + rbd_opts->trim = false; + break; default: /* libceph prints "bad option" msg */ return -EINVAL; @@ -1392,7 +1411,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req) case OBJ_OP_DISCARD: return true; default: - rbd_assert(0); + BUG(); } } @@ -2466,7 +2485,7 @@ again: } return false; default: - rbd_assert(0); + BUG(); } } @@ -2494,7 +2513,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req) } return false; default: - rbd_assert(0); + BUG(); } } @@ -3533,9 +3552,22 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, /* * lock_rwsem must be held for read */ -static void rbd_wait_state_locked(struct rbd_device *rbd_dev) +static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire) { DEFINE_WAIT(wait); + unsigned long timeout; + int ret = 0; + + if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) + return -EBLACKLISTED; + + if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) + return 0; + + if (!may_acquire) { + rbd_warn(rbd_dev, "exclusive lock required"); + return -EROFS; + } do { /* @@ -3547,12 +3579,22 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev) prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, TASK_UNINTERRUPTIBLE); up_read(&rbd_dev->lock_rwsem); - schedule(); + timeout = schedule_timeout(ceph_timeout_jiffies( + rbd_dev->opts->lock_timeout)); down_read(&rbd_dev->lock_rwsem); - } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && - !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); + if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { + ret = -EBLACKLISTED; + break; + } + if (!timeout) { + rbd_warn(rbd_dev, "timed out waiting for lock"); + ret = -ETIMEDOUT; + break; + } + } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); finish_wait(&rbd_dev->lock_waitq, &wait); + return ret; } static void rbd_queue_workfn(struct work_struct *work) @@ -3638,19 +3680,10 @@ static void rbd_queue_workfn(struct work_struct *work) (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); if (must_be_locked) { down_read(&rbd_dev->lock_rwsem); - if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && - !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { - if (rbd_dev->opts->exclusive) { - rbd_warn(rbd_dev, "exclusive lock required"); - result = -EROFS; - goto err_unlock; - } - rbd_wait_state_locked(rbd_dev); - } - if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { - result = -EBLACKLISTED; + result = rbd_wait_state_locked(rbd_dev, + !rbd_dev->opts->exclusive); + if (result) goto err_unlock; - } } img_request = rbd_img_request_create(rbd_dev, op_type, snapc); @@ -3902,7 +3935,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) { struct gendisk *disk; struct request_queue *q; - u64 segment_size; + unsigned int objset_bytes = + rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; int err; /* create gendisk info */ @@ -3942,20 +3976,19 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ - /* set io sizes to object size */ - segment_size = rbd_obj_bytes(&rbd_dev->header); - blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); + blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); q->limits.max_sectors = queue_max_hw_sectors(q); blk_queue_max_segments(q, USHRT_MAX); blk_queue_max_segment_size(q, UINT_MAX); - blk_queue_io_min(q, segment_size); - blk_queue_io_opt(q, segment_size); + blk_queue_io_min(q, objset_bytes); + blk_queue_io_opt(q, objset_bytes); - /* enable the discard support */ - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - q->limits.discard_granularity = segment_size; - blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); - blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); + if (rbd_dev->opts->trim) { + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + q->limits.discard_granularity = objset_bytes; + blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); + blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); + } if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; @@ -5179,8 +5212,10 @@ static int rbd_add_parse_args(const char *buf, rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; + rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT; rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; + rbd_opts->trim = RBD_TRIM_DEFAULT; copts = ceph_parse_options(options, mon_addrs, mon_addrs + mon_addrs_size - 1, @@ -5216,6 +5251,8 @@ static void rbd_dev_image_unlock(struct rbd_device *rbd_dev) static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) { + int ret; + if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); return -EINVAL; @@ -5223,9 +5260,9 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) /* FIXME: "rbd map --exclusive" should be in interruptible */ down_read(&rbd_dev->lock_rwsem); - rbd_wait_state_locked(rbd_dev); + ret = rbd_wait_state_locked(rbd_dev, true); up_read(&rbd_dev->lock_rwsem); - if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { + if (ret) { rbd_warn(rbd_dev, "failed to acquire exclusive lock"); return -EROFS; } diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 64e066eba72e..0e31884a9519 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -110,7 +110,7 @@ struct iwm { /* Select values for swim_select and swim_readbit */ #define READ_DATA_0 0x074 -#define TWOMEG_DRIVE 0x075 +#define ONEMEG_DRIVE 0x075 #define SINGLE_SIDED 0x076 #define DRIVE_PRESENT 0x077 #define DISK_IN 0x170 @@ -118,9 +118,9 @@ struct iwm { #define TRACK_ZERO 0x172 #define TACHO 0x173 #define READ_DATA_1 0x174 -#define MFM_MODE 0x175 +#define GCR_MODE 0x175 #define SEEK_COMPLETE 0x176 -#define ONEMEG_MEDIA 0x177 +#define TWOMEG_MEDIA 0x177 /* Bits in handshake register */ @@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs) struct floppy_struct *g; fs->disk_in = 1; fs->write_protected = swim_readbit(base, WRITE_PROT); - fs->type = swim_readbit(base, ONEMEG_MEDIA); if (swim_track00(base)) printk(KERN_ERR @@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs) swim_track00(base); + fs->type = swim_readbit(base, TWOMEG_MEDIA) ? + HD_MEDIA : DD_MEDIA; + fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2; get_floppy_geometry(fs, 0, &g); fs->total_secs = g->size; fs->secpercyl = g->head * g->sect; @@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2); udelay(10); - swim_drive(base, INTERNAL_DRIVE); + swim_drive(base, fs->location); swim_motor(base, ON); swim_action(base, SETMFM); if (fs->ejected) @@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) goto out; } + set_capacity(fs->disk, fs->total_secs); + if (mode & FMODE_NDELAY) return 0; @@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, if (copy_to_user((void __user *) param, (void *) &floppy_type, sizeof(struct floppy_struct))) return -EFAULT; - break; - - default: - printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n", - cmd); - return -ENOSYS; + return 0; } - return 0; + return -ENOTTY; } static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo) @@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) struct swim_priv *swd = data; int drive = (*part & 3); - if (drive > swd->floppy_count) + if (drive >= swd->floppy_count) return NULL; *part = 0; @@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) swim_motor(base, OFF); - if (swim_readbit(base, SINGLE_SIDED)) - fs->head_number = 1; - else - fs->head_number = 2; + fs->type = HD_MEDIA; + fs->head_number = 2; + fs->ref_count = 0; fs->ejected = 1; @@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd) /* scan floppy drives */ swim_drive(base, INTERNAL_DRIVE); - if (swim_readbit(base, DRIVE_PRESENT)) + if (swim_readbit(base, DRIVE_PRESENT) && + !swim_readbit(base, ONEMEG_DRIVE)) swim_add_floppy(swd, INTERNAL_DRIVE); swim_drive(base, EXTERNAL_DRIVE); - if (swim_readbit(base, DRIVE_PRESENT)) + if (swim_readbit(base, DRIVE_PRESENT) && + !swim_readbit(base, ONEMEG_DRIVE)) swim_add_floppy(swd, EXTERNAL_DRIVE); /* register floppy drives */ @@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd) &swd->lock); if (!swd->unit[drive].disk->queue) { err = -ENOMEM; - put_disk(swd->unit[drive].disk); goto exit_put_disks; } blk_queue_bounce_limit(swd->unit[drive].disk->queue, @@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev) goto out; } - swim_base = ioremap(res->start, resource_size(res)); + swim_base = (struct swim __iomem *)res->start; if (!swim_base) { ret = -ENOMEM; goto out_release_io; @@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev) if (!get_swim_mode(swim_base)) { printk(KERN_INFO "SWIM device not found !\n"); ret = -ENODEV; - goto out_iounmap; + goto out_release_io; } /* set platform driver data */ @@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev) swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL); if (!swd) { ret = -ENOMEM; - goto out_iounmap; + goto out_release_io; } platform_set_drvdata(dev, swd); @@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev) out_kfree: kfree(swd); -out_iounmap: - iounmap(swim_base); out_release_io: release_mem_region(res->start, resource_size(res)); out: @@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev) for (drive = 0; drive < swd->floppy_count; drive++) floppy_eject(&swd->unit[drive]); - iounmap(swd->base); - res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index af51015d056e..469541c1e51e 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -148,7 +148,7 @@ struct swim3 { #define MOTOR_ON 2 #define RELAX 3 /* also eject in progress */ #define READ_DATA_0 4 -#define TWOMEG_DRIVE 5 +#define ONEMEG_DRIVE 5 #define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */ #define DRIVE_PRESENT 7 #define DISK_IN 8 @@ -156,9 +156,9 @@ struct swim3 { #define TRACK_ZERO 10 #define TACHO 11 #define READ_DATA_1 12 -#define MFM_MODE 13 +#define GCR_MODE 13 #define SEEK_COMPLETE 14 -#define ONEMEG_MEDIA 15 +#define TWOMEG_MEDIA 15 /* Definitions of values used in writing and formatting */ #define DATA_ESCAPE 0x99 diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index d1c0b60e9326..6dc177bf4c42 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -33,6 +33,7 @@ config HISILICON_LPC bool "Support for ISA I/O space on HiSilicon Hip06/7" depends on ARM64 && (ARCH_HISI || COMPILE_TEST) select INDIRECT_PIO + select MFD_CORE if ACPI help Driver to enable I/O access to devices attached to the Low Pin Count bus on the HiSilicon Hip06/7 SoC. diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 8327478effd0..bfc566d3f31a 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) return media_changed(cdi, 1); - if ((unsigned int)arg >= cdi->capacity) + if (arg >= cdi->capacity) return -EINVAL; info = kmalloc(sizeof(*info), GFP_KERNEL); diff --git a/drivers/char/random.c b/drivers/char/random.c index e027e7fa1472..cd888d4ee605 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -261,6 +261,7 @@ #include <linux/ptrace.h> #include <linux/workqueue.h> #include <linux/irq.h> +#include <linux/ratelimit.h> #include <linux/syscalls.h> #include <linux/completion.h> #include <linux/uuid.h> @@ -427,8 +428,9 @@ struct crng_state primary_crng = { * its value (from 0->1->2). */ static int crng_init = 0; -#define crng_ready() (likely(crng_init > 0)) +#define crng_ready() (likely(crng_init > 1)) static int crng_init_cnt = 0; +static unsigned long crng_global_init_time = 0; #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) static void _extract_crng(struct crng_state *crng, __u32 out[CHACHA20_BLOCK_WORDS]); @@ -437,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng, static void process_random_ready_list(void); static void _get_random_bytes(void *buf, int nbytes); +static struct ratelimit_state unseeded_warning = + RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); +static struct ratelimit_state urandom_warning = + RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); + +static int ratelimit_disable __read_mostly; + +module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); +MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); + /********************************************************************** * * OS independent entropy store. Here are the functions which handle @@ -787,6 +799,43 @@ static void crng_initialize(struct crng_state *crng) crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; } +#ifdef CONFIG_NUMA +static void do_numa_crng_init(struct work_struct *work) +{ + int i; + struct crng_state *crng; + struct crng_state **pool; + + pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); + for_each_online_node(i) { + crng = kmalloc_node(sizeof(struct crng_state), + GFP_KERNEL | __GFP_NOFAIL, i); + spin_lock_init(&crng->lock); + crng_initialize(crng); + pool[i] = crng; + } + mb(); + if (cmpxchg(&crng_node_pool, NULL, pool)) { + for_each_node(i) + kfree(pool[i]); + kfree(pool); + } +} + +static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init); + +static void numa_crng_init(void) +{ + schedule_work(&numa_crng_init_work); +} +#else +static void numa_crng_init(void) {} +#endif + +/* + * crng_fast_load() can be called by code in the interrupt service + * path. So we can't afford to dilly-dally. + */ static int crng_fast_load(const char *cp, size_t len) { unsigned long flags; @@ -794,7 +843,7 @@ static int crng_fast_load(const char *cp, size_t len) if (!spin_trylock_irqsave(&primary_crng.lock, flags)) return 0; - if (crng_ready()) { + if (crng_init != 0) { spin_unlock_irqrestore(&primary_crng.lock, flags); return 0; } @@ -813,6 +862,51 @@ static int crng_fast_load(const char *cp, size_t len) return 1; } +/* + * crng_slow_load() is called by add_device_randomness, which has two + * attributes. (1) We can't trust the buffer passed to it is + * guaranteed to be unpredictable (so it might not have any entropy at + * all), and (2) it doesn't have the performance constraints of + * crng_fast_load(). + * + * So we do something more comprehensive which is guaranteed to touch + * all of the primary_crng's state, and which uses a LFSR with a + * period of 255 as part of the mixing algorithm. Finally, we do + * *not* advance crng_init_cnt since buffer we may get may be something + * like a fixed DMI table (for example), which might very well be + * unique to the machine, but is otherwise unvarying. + */ +static int crng_slow_load(const char *cp, size_t len) +{ + unsigned long flags; + static unsigned char lfsr = 1; + unsigned char tmp; + unsigned i, max = CHACHA20_KEY_SIZE; + const char * src_buf = cp; + char * dest_buf = (char *) &primary_crng.state[4]; + + if (!spin_trylock_irqsave(&primary_crng.lock, flags)) + return 0; + if (crng_init != 0) { + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 0; + } + if (len > max) + max = len; + + for (i = 0; i < max ; i++) { + tmp = lfsr; + lfsr >>= 1; + if (tmp & 1) + lfsr ^= 0xE1; + tmp = dest_buf[i % CHACHA20_KEY_SIZE]; + dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; + lfsr += (tmp << 3) | (tmp >> 5); + } + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 1; +} + static void crng_reseed(struct crng_state *crng, struct entropy_store *r) { unsigned long flags; @@ -831,7 +925,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) _crng_backtrack_protect(&primary_crng, buf.block, CHACHA20_KEY_SIZE); } - spin_lock_irqsave(&primary_crng.lock, flags); + spin_lock_irqsave(&crng->lock, flags); for (i = 0; i < 8; i++) { unsigned long rv; if (!arch_get_random_seed_long(&rv) && @@ -841,13 +935,26 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) } memzero_explicit(&buf, sizeof(buf)); crng->init_time = jiffies; - spin_unlock_irqrestore(&primary_crng.lock, flags); + spin_unlock_irqrestore(&crng->lock, flags); if (crng == &primary_crng && crng_init < 2) { invalidate_batched_entropy(); + numa_crng_init(); crng_init = 2; process_random_ready_list(); wake_up_interruptible(&crng_init_wait); pr_notice("random: crng init done\n"); + if (unseeded_warning.missed) { + pr_notice("random: %d get_random_xx warning(s) missed " + "due to ratelimiting\n", + unseeded_warning.missed); + unseeded_warning.missed = 0; + } + if (urandom_warning.missed) { + pr_notice("random: %d urandom warning(s) missed " + "due to ratelimiting\n", + urandom_warning.missed); + urandom_warning.missed = 0; + } } } @@ -856,8 +963,9 @@ static void _extract_crng(struct crng_state *crng, { unsigned long v, flags; - if (crng_init > 1 && - time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) + if (crng_ready() && + (time_after(crng_global_init_time, crng->init_time) || + time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))) crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); spin_lock_irqsave(&crng->lock, flags); if (arch_get_random_long(&v)) @@ -981,10 +1089,8 @@ void add_device_randomness(const void *buf, unsigned int size) unsigned long time = random_get_entropy() ^ jiffies; unsigned long flags; - if (!crng_ready()) { - crng_fast_load(buf, size); - return; - } + if (!crng_ready() && size) + crng_slow_load(buf, size); trace_add_device_randomness(size, _RET_IP_); spin_lock_irqsave(&input_pool.lock, flags); @@ -1139,7 +1245,7 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); - if (!crng_ready()) { + if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && crng_fast_load((char *) fast_pool->pool, sizeof(fast_pool->pool))) { @@ -1489,8 +1595,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM print_once = true; #endif - pr_notice("random: %s called from %pS with crng_init=%d\n", - func_name, caller, crng_init); + if (__ratelimit(&unseeded_warning)) + pr_notice("random: %s called from %pS with crng_init=%d\n", + func_name, caller, crng_init); } /* @@ -1680,28 +1787,14 @@ static void init_std_data(struct entropy_store *r) */ static int rand_initialize(void) { -#ifdef CONFIG_NUMA - int i; - struct crng_state *crng; - struct crng_state **pool; -#endif - init_std_data(&input_pool); init_std_data(&blocking_pool); crng_initialize(&primary_crng); - -#ifdef CONFIG_NUMA - pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); - for_each_online_node(i) { - crng = kmalloc_node(sizeof(struct crng_state), - GFP_KERNEL | __GFP_NOFAIL, i); - spin_lock_init(&crng->lock); - crng_initialize(crng); - pool[i] = crng; + crng_global_init_time = jiffies; + if (ratelimit_disable) { + urandom_warning.interval = 0; + unseeded_warning.interval = 0; } - mb(); - crng_node_pool = pool; -#endif return 0; } early_initcall(rand_initialize); @@ -1769,9 +1862,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) if (!crng_ready() && maxwarn > 0) { maxwarn--; - printk(KERN_NOTICE "random: %s: uninitialized urandom read " - "(%zd bytes read)\n", - current->comm, nbytes); + if (__ratelimit(&urandom_warning)) + printk(KERN_NOTICE "random: %s: uninitialized " + "urandom read (%zd bytes read)\n", + current->comm, nbytes); spin_lock_irqsave(&primary_crng.lock, flags); crng_init_cnt = 0; spin_unlock_irqrestore(&primary_crng.lock, flags); @@ -1875,6 +1969,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) input_pool.entropy_count = 0; blocking_pool.entropy_count = 0; return 0; + case RNDRESEEDCRNG: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (crng_init < 2) + return -ENODATA; + crng_reseed(&primary_crng, NULL); + crng_global_init_time = jiffies - 1; + return 0; default: return -EINVAL; } @@ -2212,7 +2314,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, { struct entropy_store *poolp = &input_pool; - if (!crng_ready()) { + if (unlikely(crng_init == 0)) { crng_fast_load(buffer, count); return; } diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 468f06134012..21085515814f 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void) } } -static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, +static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size, int pages) { struct port_buffer *buf; @@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, return buf; } - if (is_rproc_serial(vq->vdev)) { + if (is_rproc_serial(vdev)) { /* * Allocate DMA memory from ancestor. When a virtio * device is created by remoteproc, the DMA memory is * associated with the grandparent device: * vdev => rproc => platform-dev. */ - if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) + if (!vdev->dev.parent || !vdev->dev.parent->parent) goto free_buf; - buf->dev = vq->vdev->dev.parent->parent; + buf->dev = vdev->dev.parent->parent; /* Increase device refcnt to avoid freeing it */ get_device(buf->dev); @@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, count = min((size_t)(32 * 1024), count); - buf = alloc_buf(port->out_vq, count, 0); + buf = alloc_buf(port->portdev->vdev, count, 0); if (!buf) return -ENOMEM; @@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, if (ret < 0) goto error_out; - buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); + buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs); if (!buf) { ret = -ENOMEM; goto error_out; @@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) nr_added_bufs = 0; do { - buf = alloc_buf(vq, PAGE_SIZE, 0); + buf = alloc_buf(vq->vdev, PAGE_SIZE, 0); if (!buf) break; @@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id) { char debugfs_name[16]; struct port *port; - struct port_buffer *buf; dev_t devt; unsigned int nr_added_bufs; int err; @@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id) return 0; free_inbufs: - while ((buf = virtqueue_detach_unused_buf(port->in_vq))) - free_buf(buf, true); free_device: device_destroy(pdrvdata.class, port->dev->devt); free_cdev: @@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref) static void remove_port_data(struct port *port) { - struct port_buffer *buf; - spin_lock_irq(&port->inbuf_lock); /* Remove unused data this port might have received. */ discard_port_data(port); spin_unlock_irq(&port->inbuf_lock); - /* Remove buffers we queued up for the Host to send us data in. */ - do { - spin_lock_irq(&port->inbuf_lock); - buf = virtqueue_detach_unused_buf(port->in_vq); - spin_unlock_irq(&port->inbuf_lock); - if (buf) - free_buf(buf, true); - } while (buf); - spin_lock_irq(&port->outvq_lock); reclaim_consumed_buffers(port); spin_unlock_irq(&port->outvq_lock); - - /* Free pending buffers from the out-queue. */ - do { - spin_lock_irq(&port->outvq_lock); - buf = virtqueue_detach_unused_buf(port->out_vq); - spin_unlock_irq(&port->outvq_lock); - if (buf) - free_buf(buf, true); - } while (buf); } /* @@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work) spin_unlock(&portdev->c_ivq_lock); } +static void flush_bufs(struct virtqueue *vq, bool can_sleep) +{ + struct port_buffer *buf; + unsigned int len; + + while ((buf = virtqueue_get_buf(vq, &len))) + free_buf(buf, can_sleep); +} + static void out_intr(struct virtqueue *vq) { struct port *port; port = find_port_by_vq(vq->vdev->priv, vq); - if (!port) + if (!port) { + flush_bufs(vq, false); return; + } wake_up_interruptible(&port->waitqueue); } @@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq) unsigned long flags; port = find_port_by_vq(vq->vdev->priv, vq); - if (!port) + if (!port) { + flush_bufs(vq, false); return; + } spin_lock_irqsave(&port->inbuf_lock, flags); port->inbuf = get_inbuf(port); @@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = { static void remove_vqs(struct ports_device *portdev) { + struct virtqueue *vq; + + virtio_device_for_each_vq(portdev->vdev, vq) { + struct port_buffer *buf; + + flush_bufs(vq, true); + while ((buf = virtqueue_detach_unused_buf(vq))) + free_buf(buf, true); + } portdev->vdev->config->del_vqs(portdev->vdev); kfree(portdev->in_vqs); kfree(portdev->out_vqs); } -static void remove_controlq_data(struct ports_device *portdev) +static void virtcons_remove(struct virtio_device *vdev) { - struct port_buffer *buf; - unsigned int len; + struct ports_device *portdev; + struct port *port, *port2; - if (!use_multiport(portdev)) - return; + portdev = vdev->priv; - while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) - free_buf(buf, true); + spin_lock_irq(&pdrvdata_lock); + list_del(&portdev->list); + spin_unlock_irq(&pdrvdata_lock); - while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) - free_buf(buf, true); + /* Disable interrupts for vqs */ + vdev->config->reset(vdev); + /* Finish up work that's lined up */ + if (use_multiport(portdev)) + cancel_work_sync(&portdev->control_work); + else + cancel_work_sync(&portdev->config_work); + + list_for_each_entry_safe(port, port2, &portdev->ports, list) + unplug_port(port); + + unregister_chrdev(portdev->chr_major, "virtio-portsdev"); + + /* + * When yanking out a device, we immediately lose the + * (device-side) queues. So there's no point in keeping the + * guest side around till we drop our final reference. This + * also means that any ports which are in an open state will + * have to just stop using the port, as the vqs are going + * away. + */ + remove_vqs(portdev); + kfree(portdev); } /* @@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev) spin_lock_init(&portdev->ports_lock); INIT_LIST_HEAD(&portdev->ports); + INIT_LIST_HEAD(&portdev->list); virtio_device_ready(portdev->vdev); @@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev) if (!nr_added_bufs) { dev_err(&vdev->dev, "Error allocating buffers for control queue\n"); - err = -ENOMEM; - goto free_vqs; + /* + * The host might want to notify mgmt sw about device + * add failure. + */ + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, + VIRTIO_CONSOLE_DEVICE_READY, 0); + /* Device was functional: we need full cleanup. */ + virtcons_remove(vdev); + return -ENOMEM; } } else { /* @@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev) return 0; -free_vqs: - /* The host might want to notify mgmt sw about device add failure */ - __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, - VIRTIO_CONSOLE_DEVICE_READY, 0); - remove_vqs(portdev); free_chrdev: unregister_chrdev(portdev->chr_major, "virtio-portsdev"); free: @@ -2132,43 +2155,6 @@ fail: return err; } -static void virtcons_remove(struct virtio_device *vdev) -{ - struct ports_device *portdev; - struct port *port, *port2; - - portdev = vdev->priv; - - spin_lock_irq(&pdrvdata_lock); - list_del(&portdev->list); - spin_unlock_irq(&pdrvdata_lock); - - /* Disable interrupts for vqs */ - vdev->config->reset(vdev); - /* Finish up work that's lined up */ - if (use_multiport(portdev)) - cancel_work_sync(&portdev->control_work); - else - cancel_work_sync(&portdev->config_work); - - list_for_each_entry_safe(port, port2, &portdev->ports, list) - unplug_port(port); - - unregister_chrdev(portdev->chr_major, "virtio-portsdev"); - - /* - * When yanking out a device, we immediately lose the - * (device-side) queues. So there's no point in keeping the - * guest side around till we drop our final reference. This - * also means that any ports which are in an open state will - * have to just stop using the port, as the vqs are going - * away. - */ - remove_controlq_data(portdev); - remove_vqs(portdev); - kfree(portdev); -} - static struct virtio_device_id id_table[] = { { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, { 0 }, @@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev) */ if (use_multiport(portdev)) virtqueue_disable_cb(portdev->c_ivq); - remove_controlq_data(portdev); list_for_each_entry(port, &portdev->ports, list) { virtqueue_disable_cb(port->in_vq); diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c index c58019750b7e..a2f8c42e527a 100644 --- a/drivers/clk/clk-cs2000-cp.c +++ b/drivers/clk/clk-cs2000-cp.c @@ -541,7 +541,7 @@ probe_err: return ret; } -static int cs2000_resume(struct device *dev) +static int __maybe_unused cs2000_resume(struct device *dev) { struct cs2000_priv *priv = dev_get_drvdata(dev); diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index ac4a042f8658..1628b93655ed 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -112,10 +112,18 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index) return 0; } +static int clk_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_mux *mux = to_clk_mux(hw); + + return clk_mux_determine_rate_flags(hw, req, mux->flags); +} + const struct clk_ops clk_mux_ops = { .get_parent = clk_mux_get_parent, .set_parent = clk_mux_set_parent, - .determine_rate = __clk_mux_determine_rate, + .determine_rate = clk_mux_determine_rate, }; EXPORT_SYMBOL_GPL(clk_mux_ops); diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c index f1d5967b4b39..edd3cf451401 100644 --- a/drivers/clk/clk-stm32mp1.c +++ b/drivers/clk/clk-stm32mp1.c @@ -216,7 +216,7 @@ static const char * const usart1_src[] = { "pclk5", "pll3_q", "ck_hsi", "ck_csi", "pll4_q", "ck_hse" }; -const char * const usart234578_src[] = { +static const char * const usart234578_src[] = { "pclk1", "pll4_q", "ck_hsi", "ck_csi", "ck_hse" }; @@ -224,10 +224,6 @@ static const char * const usart6_src[] = { "pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse" }; -static const char * const dfsdm_src[] = { - "pclk2", "ck_mcu" -}; - static const char * const fdcan_src[] = { "ck_hse", "pll3_q", "pll4_q" }; @@ -316,10 +312,8 @@ struct stm32_clk_mgate { struct clock_config { u32 id; const char *name; - union { - const char *parent_name; - const char * const *parent_names; - }; + const char *parent_name; + const char * const *parent_names; int num_parents; unsigned long flags; void *cfg; @@ -469,7 +463,7 @@ static void mp1_gate_clk_disable(struct clk_hw *hw) } } -const struct clk_ops mp1_gate_clk_ops = { +static const struct clk_ops mp1_gate_clk_ops = { .enable = mp1_gate_clk_enable, .disable = mp1_gate_clk_disable, .is_enabled = clk_gate_is_enabled, @@ -698,7 +692,7 @@ static void mp1_mgate_clk_disable(struct clk_hw *hw) mp1_gate_clk_disable(hw); } -const struct clk_ops mp1_mgate_clk_ops = { +static const struct clk_ops mp1_mgate_clk_ops = { .enable = mp1_mgate_clk_enable, .disable = mp1_mgate_clk_disable, .is_enabled = clk_gate_is_enabled, @@ -732,7 +726,7 @@ static int clk_mmux_set_parent(struct clk_hw *hw, u8 index) return 0; } -const struct clk_ops clk_mmux_ops = { +static const struct clk_ops clk_mmux_ops = { .get_parent = clk_mmux_get_parent, .set_parent = clk_mmux_set_parent, .determine_rate = __clk_mux_determine_rate, @@ -1048,10 +1042,10 @@ struct stm32_pll_cfg { u32 offset; }; -struct clk_hw *_clk_register_pll(struct device *dev, - struct clk_hw_onecell_data *clk_data, - void __iomem *base, spinlock_t *lock, - const struct clock_config *cfg) +static struct clk_hw *_clk_register_pll(struct device *dev, + struct clk_hw_onecell_data *clk_data, + void __iomem *base, spinlock_t *lock, + const struct clock_config *cfg) { struct stm32_pll_cfg *stm_pll_cfg = cfg->cfg; @@ -1405,7 +1399,8 @@ enum { G_USBH, G_ETHSTP, G_RTCAPB, - G_TZC, + G_TZC1, + G_TZC2, G_TZPC, G_IWDG1, G_BSEC, @@ -1417,7 +1412,7 @@ enum { G_LAST }; -struct stm32_mgate mp1_mgate[G_LAST]; +static struct stm32_mgate mp1_mgate[G_LAST]; #define _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\ _mgate, _ops)\ @@ -1440,7 +1435,7 @@ struct stm32_mgate mp1_mgate[G_LAST]; &mp1_mgate[_id], &mp1_mgate_clk_ops) /* Peripheral gates */ -struct stm32_gate_cfg per_gate_cfg[G_LAST] = { +static struct stm32_gate_cfg per_gate_cfg[G_LAST] = { /* Multi gates */ K_GATE(G_MDIO, RCC_APB1ENSETR, 31, 0), K_MGATE(G_DAC12, RCC_APB1ENSETR, 29, 0), @@ -1506,7 +1501,8 @@ struct stm32_gate_cfg per_gate_cfg[G_LAST] = { K_GATE(G_BSEC, RCC_APB5ENSETR, 16, 0), K_GATE(G_IWDG1, RCC_APB5ENSETR, 15, 0), K_GATE(G_TZPC, RCC_APB5ENSETR, 13, 0), - K_GATE(G_TZC, RCC_APB5ENSETR, 12, 0), + K_GATE(G_TZC2, RCC_APB5ENSETR, 12, 0), + K_GATE(G_TZC1, RCC_APB5ENSETR, 11, 0), K_GATE(G_RTCAPB, RCC_APB5ENSETR, 8, 0), K_MGATE(G_USART1, RCC_APB5ENSETR, 4, 0), K_MGATE(G_I2C6, RCC_APB5ENSETR, 3, 0), @@ -1600,7 +1596,7 @@ enum { M_LAST }; -struct stm32_mmux ker_mux[M_LAST]; +static struct stm32_mmux ker_mux[M_LAST]; #define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops)\ [_id] = {\ @@ -1623,7 +1619,7 @@ struct stm32_mmux ker_mux[M_LAST]; _K_MUX(_id, _offset, _shift, _width, _mux_flags,\ &ker_mux[_id], &clk_mmux_ops) -const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = { +static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = { /* Kernel multi mux */ K_MMUX(M_SDMMC12, RCC_SDMMC12CKSELR, 0, 3, 0), K_MMUX(M_SPI23, RCC_SPI2S23CKSELR, 0, 3, 0), @@ -1860,7 +1856,8 @@ static const struct clock_config stm32mp1_clock_cfg[] = { PCLK(USART1, "usart1", "pclk5", 0, G_USART1), PCLK(RTCAPB, "rtcapb", "pclk5", CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, G_RTCAPB), - PCLK(TZC, "tzc", "pclk5", CLK_IGNORE_UNUSED, G_TZC), + PCLK(TZC1, "tzc1", "ck_axi", CLK_IGNORE_UNUSED, G_TZC1), + PCLK(TZC2, "tzc2", "ck_axi", CLK_IGNORE_UNUSED, G_TZC2), PCLK(TZPC, "tzpc", "pclk5", CLK_IGNORE_UNUSED, G_TZPC), PCLK(IWDG1, "iwdg1", "pclk5", 0, G_IWDG1), PCLK(BSEC, "bsec", "pclk5", CLK_IGNORE_UNUSED, G_BSEC), @@ -1916,8 +1913,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = { KCLK(RNG1_K, "rng1_k", rng_src, 0, G_RNG1, M_RNG1), KCLK(RNG2_K, "rng2_k", rng_src, 0, G_RNG2, M_RNG2), KCLK(USBPHY_K, "usbphy_k", usbphy_src, 0, G_USBPHY, M_USBPHY), - KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IGNORE_UNUSED, - G_STGEN, M_STGEN), + KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IS_CRITICAL, G_STGEN, M_STGEN), KCLK(SPDIF_K, "spdif_k", spdif_src, 0, G_SPDIF, M_SPDIF), KCLK(SPI1_K, "spi1_k", spi123_src, 0, G_SPI1, M_SPI1), KCLK(SPI2_K, "spi2_k", spi123_src, 0, G_SPI2, M_SPI23), @@ -1948,8 +1944,8 @@ static const struct clock_config stm32mp1_clock_cfg[] = { KCLK(FDCAN_K, "fdcan_k", fdcan_src, 0, G_FDCAN, M_FDCAN), KCLK(SAI1_K, "sai1_k", sai_src, 0, G_SAI1, M_SAI1), KCLK(SAI2_K, "sai2_k", sai2_src, 0, G_SAI2, M_SAI2), - KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI2, M_SAI3), - KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI2, M_SAI4), + KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI3, M_SAI3), + KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI4, M_SAI4), KCLK(ADC12_K, "adc12_k", adc12_src, 0, G_ADC12, M_ADC12), KCLK(DSI_K, "dsi_k", dsi_src, 0, G_DSI, M_DSI), KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1), @@ -1992,10 +1988,6 @@ static const struct clock_config stm32mp1_clock_cfg[] = { _DIV(RCC_MCO2CFGR, 4, 4, 0, NULL)), /* Debug clocks */ - FIXED_FACTOR(NO_ID, "ck_axi_div2", "ck_axi", 0, 1, 2), - - GATE(DBG, "ck_apb_dbg", "ck_axi_div2", 0, RCC_DBGCFGR, 8, 0), - GATE(CK_DBG, "ck_sys_dbg", "ck_axi", 0, RCC_DBGCFGR, 8, 0), COMPOSITE(CK_TRACE, "ck_trace", ck_trace_src, CLK_OPS_PARENT_ENABLE, diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index ea67ac81c6f9..7af555f0e60c 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -426,9 +426,9 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now, return now <= rate && now > best; } -static int -clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, - unsigned long flags) +int clk_mux_determine_rate_flags(struct clk_hw *hw, + struct clk_rate_request *req, + unsigned long flags) { struct clk_core *core = hw->core, *parent, *best_parent = NULL; int i, num_parents, ret; @@ -488,6 +488,7 @@ out: return 0; } +EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); struct clk *__clk_lookup(const char *name) { diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c index 3645fdb62343..ab7a3556f5b2 100644 --- a/drivers/clk/meson/clk-regmap.c +++ b/drivers/clk/meson/clk-regmap.c @@ -153,10 +153,19 @@ static int clk_regmap_mux_set_parent(struct clk_hw *hw, u8 index) val << mux->shift); } +static int clk_regmap_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct clk_regmap_mux_data *mux = clk_get_regmap_mux_data(clk); + + return clk_mux_determine_rate_flags(hw, req, mux->flags); +} + const struct clk_ops clk_regmap_mux_ops = { .get_parent = clk_regmap_mux_get_parent, .set_parent = clk_regmap_mux_set_parent, - .determine_rate = __clk_mux_determine_rate, + .determine_rate = clk_regmap_mux_determine_rate, }; EXPORT_SYMBOL_GPL(clk_regmap_mux_ops); diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h index 0be78383f257..badc4c22b4ee 100644 --- a/drivers/clk/meson/gxbb-aoclk.h +++ b/drivers/clk/meson/gxbb-aoclk.h @@ -17,8 +17,6 @@ #define AO_RTC_ALT_CLK_CNTL0 0x94 #define AO_RTC_ALT_CLK_CNTL1 0x98 -extern const struct clk_ops meson_aoclk_gate_regmap_ops; - struct aoclk_cec_32k { struct clk_hw hw; struct regmap *regmap; diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index cc2992493e0b..d0524ec71aad 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -253,7 +253,7 @@ static struct clk_fixed_factor meson8b_fclk_div3_div = { .mult = 1, .div = 3, .hw.init = &(struct clk_init_data){ - .name = "fclk_div_div3", + .name = "fclk_div3_div", .ops = &clk_fixed_factor_ops, .parent_names = (const char *[]){ "fixed_pll" }, .num_parents = 1, @@ -632,7 +632,8 @@ static struct clk_regmap meson8b_cpu_clk = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk", .ops = &clk_regmap_mux_ro_ops, - .parent_names = (const char *[]){ "xtal", "cpu_out_sel" }, + .parent_names = (const char *[]){ "xtal", + "cpu_scale_out_sel" }, .num_parents = 2, .flags = (CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT), diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 9ee2888275c1..8e8a09755d10 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -133,6 +133,14 @@ config VT8500_TIMER help Enables support for the VT8500 driver. +config NPCM7XX_TIMER + bool "NPCM7xx timer driver" if COMPILE_TEST + depends on HAS_IOMEM + select CLKSRC_MMIO + help + Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, + While TIMER0 serves as clockevent and TIMER1 serves as clocksource. + config CADENCE_TTC_TIMER bool "Cadence TTC timer driver" if COMPILE_TEST depends on COMMON_CLK diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index e8e76dfef00b..00caf37e52f9 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o obj-$(CONFIG_OWL_TIMER) += owl-timer.o obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o +obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o obj-$(CONFIG_ARC_TIMERS) += arc_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c index 21bffdcb2f20..6c8318470b48 100644 --- a/drivers/clocksource/timer-imx-tpm.c +++ b/drivers/clocksource/timer-imx-tpm.c @@ -17,9 +17,14 @@ #include <linux/of_irq.h> #include <linux/sched_clock.h> +#define TPM_PARAM 0x4 +#define TPM_PARAM_WIDTH_SHIFT 16 +#define TPM_PARAM_WIDTH_MASK (0xff << 16) #define TPM_SC 0x10 #define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) #define TPM_SC_CMOD_DIV_DEFAULT 0x3 +#define TPM_SC_CMOD_DIV_MAX 0x7 +#define TPM_SC_TOF_MASK (0x1 << 7) #define TPM_CNT 0x14 #define TPM_MOD 0x18 #define TPM_STATUS 0x1c @@ -29,8 +34,11 @@ #define TPM_C0SC_MODE_SHIFT 2 #define TPM_C0SC_MODE_MASK 0x3c #define TPM_C0SC_MODE_SW_COMPARE 0x4 +#define TPM_C0SC_CHF_MASK (0x1 << 7) #define TPM_C0V 0x24 +static int counter_width; +static int rating; static void __iomem *timer_base; static struct clock_event_device clockevent_tpm; @@ -83,10 +91,11 @@ static int __init tpm_clocksource_init(unsigned long rate) tpm_delay_timer.freq = rate; register_current_timer_delay(&tpm_delay_timer); - sched_clock_register(tpm_read_sched_clock, 32, rate); + sched_clock_register(tpm_read_sched_clock, counter_width, rate); return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm", - rate, 200, 32, clocksource_mmio_readl_up); + rate, rating, counter_width, + clocksource_mmio_readl_up); } static int tpm_set_next_event(unsigned long delta, @@ -105,7 +114,7 @@ static int tpm_set_next_event(unsigned long delta, * of writing CNT registers which may cause the min_delta event got * missed, so we need add a ETIME check here in case it happened. */ - return (int)((next - now) <= 0) ? -ETIME : 0; + return (int)(next - now) <= 0 ? -ETIME : 0; } static int tpm_set_state_oneshot(struct clock_event_device *evt) @@ -139,7 +148,6 @@ static struct clock_event_device clockevent_tpm = { .set_state_oneshot = tpm_set_state_oneshot, .set_next_event = tpm_set_next_event, .set_state_shutdown = tpm_set_state_shutdown, - .rating = 200, }; static int __init tpm_clockevent_init(unsigned long rate, int irq) @@ -149,10 +157,11 @@ static int __init tpm_clockevent_init(unsigned long rate, int irq) ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, "i.MX7ULP TPM Timer", &clockevent_tpm); + clockevent_tpm.rating = rating; clockevent_tpm.cpumask = cpumask_of(0); clockevent_tpm.irq = irq; - clockevents_config_and_register(&clockevent_tpm, - rate, 300, 0xfffffffe); + clockevents_config_and_register(&clockevent_tpm, rate, 300, + GENMASK(counter_width - 1, 1)); return ret; } @@ -179,7 +188,7 @@ static int __init tpm_timer_init(struct device_node *np) ipg = of_clk_get_by_name(np, "ipg"); per = of_clk_get_by_name(np, "per"); if (IS_ERR(ipg) || IS_ERR(per)) { - pr_err("tpm: failed to get igp or per clk\n"); + pr_err("tpm: failed to get ipg or per clk\n"); ret = -ENODEV; goto err_clk_get; } @@ -197,6 +206,11 @@ static int __init tpm_timer_init(struct device_node *np) goto err_per_clk_enable; } + counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK) + >> TPM_PARAM_WIDTH_SHIFT; + /* use rating 200 for 32-bit counter and 150 for 16-bit counter */ + rating = counter_width == 0x20 ? 200 : 150; + /* * Initialize tpm module to a known state * 1) Counter disabled @@ -205,16 +219,25 @@ static int __init tpm_timer_init(struct device_node *np) * 4) Channel0 disabled * 5) DMA transfers disabled */ + /* make sure counter is disabled */ writel(0, timer_base + TPM_SC); + /* TOF is W1C */ + writel(TPM_SC_TOF_MASK, timer_base + TPM_SC); writel(0, timer_base + TPM_CNT); - writel(0, timer_base + TPM_C0SC); + /* CHF is W1C */ + writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC); - /* increase per cnt, div 8 by default */ - writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT, + /* + * increase per cnt, + * div 8 for 32-bit counter and div 128 for 16-bit counter + */ + writel(TPM_SC_CMOD_INC_PER_CNT | + (counter_width == 0x20 ? + TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX), timer_base + TPM_SC); /* set MOD register to maximum for free running mode */ - writel(0xffffffff, timer_base + TPM_MOD); + writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD); rate = clk_get_rate(per) >> 3; ret = tpm_clocksource_init(rate); diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c new file mode 100644 index 000000000000..7a9bb5532d99 --- /dev/null +++ b/drivers/clocksource/timer-npcm7xx.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2014-2018 Nuvoton Technologies tomer.maimon@nuvoton.com + * All rights reserved. + * + * Copyright 2017 Google, Inc. + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/clockchips.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include "timer-of.h" + +/* Timers registers */ +#define NPCM7XX_REG_TCSR0 0x0 /* Timer 0 Control and Status Register */ +#define NPCM7XX_REG_TICR0 0x8 /* Timer 0 Initial Count Register */ +#define NPCM7XX_REG_TCSR1 0x4 /* Timer 1 Control and Status Register */ +#define NPCM7XX_REG_TICR1 0xc /* Timer 1 Initial Count Register */ +#define NPCM7XX_REG_TDR1 0x14 /* Timer 1 Data Register */ +#define NPCM7XX_REG_TISR 0x18 /* Timer Interrupt Status Register */ + +/* Timers control */ +#define NPCM7XX_Tx_RESETINT 0x1f +#define NPCM7XX_Tx_PERIOD BIT(27) +#define NPCM7XX_Tx_INTEN BIT(29) +#define NPCM7XX_Tx_COUNTEN BIT(30) +#define NPCM7XX_Tx_ONESHOT 0x0 +#define NPCM7XX_Tx_OPER GENMASK(3, 27) +#define NPCM7XX_Tx_MIN_PRESCALE 0x1 +#define NPCM7XX_Tx_TDR_MASK_BITS 24 +#define NPCM7XX_Tx_MAX_CNT 0xFFFFFF +#define NPCM7XX_T0_CLR_INT 0x1 +#define NPCM7XX_Tx_CLR_CSR 0x0 + +/* Timers operating mode */ +#define NPCM7XX_START_PERIODIC_Tx (NPCM7XX_Tx_PERIOD | NPCM7XX_Tx_COUNTEN | \ + NPCM7XX_Tx_INTEN | \ + NPCM7XX_Tx_MIN_PRESCALE) + +#define NPCM7XX_START_ONESHOT_Tx (NPCM7XX_Tx_ONESHOT | NPCM7XX_Tx_COUNTEN | \ + NPCM7XX_Tx_INTEN | \ + NPCM7XX_Tx_MIN_PRESCALE) + +#define NPCM7XX_START_Tx (NPCM7XX_Tx_COUNTEN | NPCM7XX_Tx_PERIOD | \ + NPCM7XX_Tx_MIN_PRESCALE) + +#define NPCM7XX_DEFAULT_CSR (NPCM7XX_Tx_CLR_CSR | NPCM7XX_Tx_MIN_PRESCALE) + +static int npcm7xx_timer_resume(struct clock_event_device *evt) +{ + struct timer_of *to = to_timer_of(evt); + u32 val; + + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val |= NPCM7XX_Tx_COUNTEN; + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); + + return 0; +} + +static int npcm7xx_timer_shutdown(struct clock_event_device *evt) +{ + struct timer_of *to = to_timer_of(evt); + u32 val; + + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val &= ~NPCM7XX_Tx_COUNTEN; + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); + + return 0; +} + +static int npcm7xx_timer_oneshot(struct clock_event_device *evt) +{ + struct timer_of *to = to_timer_of(evt); + u32 val; + + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val &= ~NPCM7XX_Tx_OPER; + + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val |= NPCM7XX_START_ONESHOT_Tx; + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); + + return 0; +} + +static int npcm7xx_timer_periodic(struct clock_event_device *evt) +{ + struct timer_of *to = to_timer_of(evt); + u32 val; + + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val &= ~NPCM7XX_Tx_OPER; + + writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0); + val |= NPCM7XX_START_PERIODIC_Tx; + + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); + + return 0; +} + +static int npcm7xx_clockevent_set_next_event(unsigned long evt, + struct clock_event_device *clk) +{ + struct timer_of *to = to_timer_of(clk); + u32 val; + + writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0); + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); + val |= NPCM7XX_START_Tx; + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); + + return 0; +} + +static irqreturn_t npcm7xx_timer0_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = (struct clock_event_device *)dev_id; + struct timer_of *to = to_timer_of(evt); + + writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static struct timer_of npcm7xx_to = { + .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK, + + .clkevt = { + .name = "npcm7xx-timer0", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = npcm7xx_clockevent_set_next_event, + .set_state_shutdown = npcm7xx_timer_shutdown, + .set_state_periodic = npcm7xx_timer_periodic, + .set_state_oneshot = npcm7xx_timer_oneshot, + .tick_resume = npcm7xx_timer_resume, + .rating = 300, + }, + + .of_irq = { + .handler = npcm7xx_timer0_interrupt, + .flags = IRQF_TIMER | IRQF_IRQPOLL, + }, +}; + +static void __init npcm7xx_clockevents_init(void) +{ + writel(NPCM7XX_DEFAULT_CSR, + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR0); + + writel(NPCM7XX_Tx_RESETINT, + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TISR); + + npcm7xx_to.clkevt.cpumask = cpumask_of(0); + clockevents_config_and_register(&npcm7xx_to.clkevt, + timer_of_rate(&npcm7xx_to), + 0x1, NPCM7XX_Tx_MAX_CNT); +} + +static void __init npcm7xx_clocksource_init(void) +{ + u32 val; + + writel(NPCM7XX_DEFAULT_CSR, + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); + writel(NPCM7XX_Tx_MAX_CNT, + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TICR1); + + val = readl(timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); + val |= NPCM7XX_START_Tx; + writel(val, timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); + + clocksource_mmio_init(timer_of_base(&npcm7xx_to) + + NPCM7XX_REG_TDR1, + "npcm7xx-timer1", timer_of_rate(&npcm7xx_to), + 200, (unsigned int)NPCM7XX_Tx_TDR_MASK_BITS, + clocksource_mmio_readl_down); +} + +static int __init npcm7xx_timer_init(struct device_node *np) +{ + int ret; + + ret = timer_of_init(np, &npcm7xx_to); + if (ret) + return ret; + + /* Clock input is divided by PRESCALE + 1 before it is fed */ + /* to the counter */ + npcm7xx_to.of_clk.rate = npcm7xx_to.of_clk.rate / + (NPCM7XX_Tx_MIN_PRESCALE + 1); + + npcm7xx_clocksource_init(); + npcm7xx_clockevents_init(); + + pr_info("Enabling NPCM7xx clocksource timer base: %px, IRQ: %d ", + timer_of_base(&npcm7xx_to), timer_of_irq(&npcm7xx_to)); + + return 0; +} + +TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init); + diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 7f56fe5183f2..de55c7d57438 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -71,16 +71,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS. -config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG - bool "Broadcom STB AVS CPUfreq driver sysfs debug capability" - depends on ARM_BRCMSTB_AVS_CPUFREQ - help - Enabling this option turns on debug support via sysfs under - /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and - write some AVS mailbox registers through sysfs entries. - - If in doubt, say N. - config ARM_EXYNOS5440_CPUFREQ tristate "SAMSUNG EXYNOS5440" depends on SOC_EXYNOS5440 diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 6cdac1aaf23c..b07559b9ed99 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -49,13 +49,6 @@ #include <linux/platform_device.h> #include <linux/semaphore.h> -#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG -#include <linux/ctype.h> -#include <linux/debugfs.h> -#include <linux/slab.h> -#include <linux/uaccess.h> -#endif - /* Max number of arguments AVS calls take */ #define AVS_MAX_CMD_ARGS 4 /* @@ -182,88 +175,11 @@ struct private_data { void __iomem *base; void __iomem *avs_intr_base; struct device *dev; -#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG - struct dentry *debugfs; -#endif struct completion done; struct semaphore sem; struct pmap pmap; }; -#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG - -enum debugfs_format { - DEBUGFS_NORMAL, - DEBUGFS_FLOAT, - DEBUGFS_REV, -}; - -struct debugfs_data { - struct debugfs_entry *entry; - struct private_data *priv; -}; - -struct debugfs_entry { - char *name; - u32 offset; - fmode_t mode; - enum debugfs_format format; -}; - -#define DEBUGFS_ENTRY(name, mode, format) { \ - #name, AVS_MBOX_##name, mode, format \ -} - -/* - * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly. - */ -#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0) -#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1) -#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2) -#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3) - -/* - * This table stores the name, access permissions and offset for each hardware - * register and is used to generate debugfs entries. - */ -static struct debugfs_entry debugfs_entries[] = { - DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV), - DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL), - DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT), - DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL), -}; - -static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int); - -static char *__strtolower(char *s) -{ - char *p; - - for (p = s; *p; p++) - *p = tolower(*p); - - return s; -} - -#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */ - static void __iomem *__map_region(const char *name) { struct device_node *np; @@ -516,238 +432,6 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv) return table; } -#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG - -#define MANT(x) (unsigned int)(abs((x)) / 1000) -#define FRAC(x) (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000) - -static int brcm_avs_debug_show(struct seq_file *s, void *data) -{ - struct debugfs_data *dbgfs = s->private; - void __iomem *base; - u32 val, offset; - - if (!dbgfs) { - seq_puts(s, "No device pointer\n"); - return 0; - } - - base = dbgfs->priv->base; - offset = dbgfs->entry->offset; - val = readl(base + offset); - switch (dbgfs->entry->format) { - case DEBUGFS_NORMAL: - seq_printf(s, "%u\n", val); - break; - case DEBUGFS_FLOAT: - seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val)); - break; - case DEBUGFS_REV: - seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff), - (val >> 16 & 0xff), (val >> 8 & 0xff), - val & 0xff); - break; - } - seq_printf(s, "0x%08x\n", val); - - return 0; -} - -#undef MANT -#undef FRAC - -static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf, - size_t size, loff_t *ppos) -{ - struct seq_file *s = file->private_data; - struct debugfs_data *dbgfs = s->private; - struct private_data *priv = dbgfs->priv; - void __iomem *base, *avs_intr_base; - bool use_issue_command = false; - unsigned long val, offset; - char str[128]; - int ret; - char *str_ptr = str; - - if (size >= sizeof(str)) - return -E2BIG; - - memset(str, 0, sizeof(str)); - ret = copy_from_user(str, buf, size); - if (ret) - return ret; - - base = priv->base; - avs_intr_base = priv->avs_intr_base; - offset = dbgfs->entry->offset; - /* - * Special case writing to "command" entry only: if the string starts - * with a 'c', we use the driver's __issue_avs_command() function. - * Otherwise, we perform a raw write. This should allow testing of raw - * access as well as using the higher level function. (Raw access - * doesn't clear the firmware return status after issuing the command.) - */ - if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) { - use_issue_command = true; - str_ptr++; - } - if (kstrtoul(str_ptr, 0, &val) != 0) - return -EINVAL; - - /* - * Setting the P-state is a special case. We need to update the CPU - * frequency we report. - */ - if (val == AVS_CMD_SET_PSTATE) { - struct cpufreq_policy *policy; - unsigned int pstate; - - policy = cpufreq_cpu_get(smp_processor_id()); - /* Read back the P-state we are about to set */ - pstate = readl(base + AVS_MBOX_PARAM(0)); - if (use_issue_command) { - ret = brcm_avs_target_index(policy, pstate); - return ret ? ret : size; - } - policy->cur = policy->freq_table[pstate].frequency; - } - - if (use_issue_command) { - ret = __issue_avs_command(priv, val, false, NULL); - } else { - /* Locking here is not perfect, but is only for debug. */ - ret = down_interruptible(&priv->sem); - if (ret) - return ret; - - writel(val, base + offset); - /* We have to wake up the firmware to process a command. */ - if (offset == AVS_MBOX_COMMAND) - writel(AVS_CPU_L2_INT_MASK, - avs_intr_base + AVS_CPU_L2_SET0); - up(&priv->sem); - } - - return ret ? ret : size; -} - -static struct debugfs_entry *__find_debugfs_entry(const char *name) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) - if (strcasecmp(debugfs_entries[i].name, name) == 0) - return &debugfs_entries[i]; - - return NULL; -} - -static int brcm_avs_debug_open(struct inode *inode, struct file *file) -{ - struct debugfs_data *data; - fmode_t fmode; - int ret; - - /* - * seq_open(), which is called by single_open(), clears "write" access. - * We need write access to some files, so we preserve our access mode - * and restore it. - */ - fmode = file->f_mode; - /* - * Check access permissions even for root. We don't want to be writing - * to read-only registers. Access for regular users has already been - * checked by the VFS layer. - */ - if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR)) - return -EACCES; - - data = kmalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - /* - * We use the same file system operations for all our debug files. To - * produce specific output, we look up the file name upon opening a - * debugfs entry and map it to a memory offset. This offset is then used - * in the generic "show" function to read a specific register. - */ - data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname); - data->priv = inode->i_private; - - ret = single_open(file, brcm_avs_debug_show, data); - if (ret) - kfree(data); - file->f_mode = fmode; - - return ret; -} - -static int brcm_avs_debug_release(struct inode *inode, struct file *file) -{ - struct seq_file *seq_priv = file->private_data; - struct debugfs_data *data = seq_priv->private; - - kfree(data); - return single_release(inode, file); -} - -static const struct file_operations brcm_avs_debug_ops = { - .open = brcm_avs_debug_open, - .read = seq_read, - .write = brcm_avs_seq_write, - .llseek = seq_lseek, - .release = brcm_avs_debug_release, -}; - -static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) -{ - struct private_data *priv = platform_get_drvdata(pdev); - struct dentry *dir; - int i; - - if (!priv) - return; - - dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL); - if (IS_ERR_OR_NULL(dir)) - return; - priv->debugfs = dir; - - for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) { - /* - * The DEBUGFS_ENTRY macro generates uppercase strings. We - * convert them to lowercase before creating the debugfs - * entries. - */ - char *entry = __strtolower(debugfs_entries[i].name); - fmode_t mode = debugfs_entries[i].mode; - - if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode, - dir, priv, &brcm_avs_debug_ops)) { - priv->debugfs = NULL; - debugfs_remove_recursive(dir); - break; - } - } -} - -static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) -{ - struct private_data *priv = platform_get_drvdata(pdev); - - if (priv && priv->debugfs) { - debugfs_remove_recursive(priv->debugfs); - priv->debugfs = NULL; - } -} - -#else - -static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {} -static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {} - -#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */ - /* * To ensure the right firmware is running we need to * - check the MAGIC matches what we expect @@ -1016,11 +700,8 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev) return ret; brcm_avs_driver.driver_data = pdev; - ret = cpufreq_register_driver(&brcm_avs_driver); - if (!ret) - brcm_avs_cpufreq_debug_init(pdev); - return ret; + return cpufreq_register_driver(&brcm_avs_driver); } static int brcm_avs_cpufreq_remove(struct platform_device *pdev) @@ -1032,8 +713,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev) if (ret) return ret; - brcm_avs_cpufreq_debug_exit(pdev); - priv = platform_get_drvdata(pdev); iounmap(priv->base); iounmap(priv->avs_intr_base); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index bc5fc1630876..b15115a48775 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -126,6 +126,49 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) cpu->perf_caps.lowest_perf, cpu_num, ret); } +/* + * The PCC subspace describes the rate at which platform can accept commands + * on the shared PCC channel (including READs which do not count towards freq + * trasition requests), so ideally we need to use the PCC values as a fallback + * if we don't have a platform specific transition_delay_us + */ +#ifdef CONFIG_ARM64 +#include <asm/cputype.h> + +static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) +{ + unsigned long implementor = read_cpuid_implementor(); + unsigned long part_num = read_cpuid_part_number(); + unsigned int delay_us = 0; + + switch (implementor) { + case ARM_CPU_IMP_QCOM: + switch (part_num) { + case QCOM_CPU_PART_FALKOR_V1: + case QCOM_CPU_PART_FALKOR: + delay_us = 10000; + break; + default: + delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + break; + } + break; + default: + delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + break; + } + + return delay_us; +} + +#else + +static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) +{ + return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; +} +#endif + static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) { struct cppc_cpudata *cpu; @@ -162,8 +205,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) cpu->perf_caps.highest_perf; policy->cpuinfo.max_freq = cppc_dmi_max_khz; - policy->transition_delay_us = cppc_get_transition_latency(cpu_num) / - NSEC_PER_USEC; + policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num); policy->shared_type = cpu->shared_type; if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 0591874856d3..54edaec1e608 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -679,6 +679,16 @@ void gpstate_timer_handler(struct timer_list *t) if (!spin_trylock(&gpstates->gpstate_lock)) return; + /* + * If the timer has migrated to the different cpu then bring + * it back to one of the policy->cpus + */ + if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) { + gpstates->timer.expires = jiffies + msecs_to_jiffies(1); + add_timer_on(&gpstates->timer, cpumask_first(policy->cpus)); + spin_unlock(&gpstates->gpstate_lock); + return; + } /* * If PMCR was last updated was using fast_swtich then @@ -718,10 +728,8 @@ void gpstate_timer_handler(struct timer_list *t) if (gpstate_idx != gpstates->last_lpstate_idx) queue_gpstate_timer(gpstates); + set_pstate(&freq_data); spin_unlock(&gpstates->gpstate_lock); - - /* Timer may get migrated to a different cpu on cpu hot unplug */ - smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); } /* diff --git a/drivers/dax/device.c b/drivers/dax/device.c index be8606457f27..aff2c1594220 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -19,6 +19,7 @@ #include <linux/dax.h> #include <linux/fs.h> #include <linux/mm.h> +#include <linux/mman.h> #include "dax-private.h" #include "dax.h" @@ -540,6 +541,7 @@ static const struct file_operations dax_fops = { .release = dax_release, .get_unmapped_area = dax_get_unmapped_area, .mmap = dax_mmap, + .mmap_supported_flags = MAP_SYNC, }; static void dev_dax_release(struct device *dev) diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index e6f17825db79..2b90606452a2 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -284,7 +284,7 @@ scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id) struct clock_info *ci = handle->clk_priv; struct scmi_clock_info *clk = ci->clk + clk_id; - if (!clk->name || !clk->name[0]) + if (!clk->name[0]) return NULL; return clk; diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 14f14efdf0d5..06d212a3d49d 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -249,7 +249,7 @@ static int altera_ps_probe(struct spi_device *spi) conf->data = of_id->data; conf->spi = spi; - conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH); + conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW); if (IS_ERR(conf->config)) { dev_err(&spi->dev, "Failed to get config gpio: %ld\n", PTR_ERR(conf->config)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index b0e591eaa71a..e14263fca1c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] = static const u32 vgpr_init_regs[] = { mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff, - mmCOMPUTE_RESOURCE_LIMITS, 0, + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ mmCOMPUTE_NUM_THREAD_X, 256*4, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, @@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] = static const u32 sgpr1_init_regs[] = { mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f, - mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ mmCOMPUTE_NUM_THREAD_X, 256*5, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, @@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] = mmCOMPUTE_NUM_THREAD_X, 256*5, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index ed2f06c9f346..3858820a0055 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -6,5 +6,6 @@ config HSA_AMD tristate "HSA kernel driver for AMD GPU devices" depends on DRM_AMDGPU && X86_64 imply AMD_IOMMU_V2 + select MMU_NOTIFIER help Enable this if you want to use HSA features on AMD GPU devices. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index cd679cf1fd30..59808a39ecf4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -749,12 +749,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep, struct timespec64 time; dev = kfd_device_by_id(args->gpu_id); - if (dev == NULL) - return -EINVAL; - - /* Reading GPU clock counter from KGD */ - args->gpu_clock_counter = - dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); + if (dev) + /* Reading GPU clock counter from KGD */ + args->gpu_clock_counter = + dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); + else + /* Node without GPU resource */ + args->gpu_clock_counter = 0; /* No access to rdtsc. Using raw monotonic time */ getrawmonotonic64(&time); @@ -1147,7 +1148,7 @@ err_unlock: return ret; } -bool kfd_dev_is_large_bar(struct kfd_dev *dev) +static bool kfd_dev_is_large_bar(struct kfd_dev *dev) { struct kfd_local_mem_info mem_info; @@ -1421,7 +1422,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, pdd = kfd_get_process_device_data(dev, p); if (!pdd) { - err = PTR_ERR(pdd); + err = -EINVAL; goto bind_process_to_device_failed; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4e2f379ce217..1dd1142246c2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4557,6 +4557,7 @@ static int dm_update_crtcs_state(struct dc *dc, struct amdgpu_dm_connector *aconnector = NULL; struct drm_connector_state *new_con_state = NULL; struct dm_connector_state *dm_conn_state = NULL; + struct drm_plane_state *new_plane_state = NULL; new_stream = NULL; @@ -4564,6 +4565,13 @@ static int dm_update_crtcs_state(struct dc *dc, dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); acrtc = to_amdgpu_crtc(crtc); + new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary); + + if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) { + ret = -EINVAL; + goto fail; + } + aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); /* TODO This hack should go away */ @@ -4760,7 +4768,7 @@ static int dm_update_planes_state(struct dc *dc, if (!dm_old_crtc_state->stream) continue; - DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n", + DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", plane->base.id, old_plane_crtc->base.id); if (!dc_remove_plane_from_context( diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index f6cb502c303f..25f064c01038 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -138,13 +138,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) lut = (struct drm_color_lut *)blob->data; lut_size = blob->length / sizeof(struct drm_color_lut); - if (__is_lut_linear(lut, lut_size)) { - /* Set to bypass if lut is set to linear */ - stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - return 0; - } - gamma = dc_create_gamma(); if (!gamma) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 490017df371d..4be21bf54749 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) { int src; struct irq_list_head *lh; + unsigned long irq_table_flags; DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); - for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { - + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); /* The handler was removed from the table, * it means it is safe to flush all the 'work' * (because no code can schedule a new one). */ lh = &adev->dm.irq_handler_list_low_tab[src]; + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); flush_work(&lh->work); } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 8291d74f26bc..ace9ad578ca0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -161,6 +161,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; + if (amdgpu_dm_connector->edid) { + kfree(amdgpu_dm_connector->edid); + amdgpu_dm_connector->edid = NULL; + } + drm_encoder_cleanup(&amdgpu_encoder->base); kfree(amdgpu_encoder); drm_connector_cleanup(connector); @@ -181,28 +186,22 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { void dm_dp_mst_dc_sink_create(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct edid *edid; struct dc_sink *dc_sink; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + /* FIXME none of this is safe. we shouldn't touch aconnector here in + * atomic_check + */ + /* * TODO: Need to further figure out why ddc.algo is NULL while MST port exists */ if (!aconnector->port || !aconnector->port->aux.ddc.algo) return; - edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); - - if (!edid) { - drm_mode_connector_update_edid_property( - &aconnector->base, - NULL); - return; - } - - aconnector->edid = edid; + ASSERT(aconnector->edid); dc_sink = dc_link_add_remote_sink( aconnector->dc_link, @@ -215,9 +214,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector) amdgpu_dm_add_sink_to_freesync_module( connector, aconnector->edid); - - drm_mode_connector_update_edid_property( - &aconnector->base, aconnector->edid); } static int dm_dp_mst_get_modes(struct drm_connector *connector) @@ -230,10 +226,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector->edid) { struct edid *edid; - struct dc_sink *dc_sink; - struct dc_sink_init_data init_params = { - .link = aconnector->dc_link, - .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); if (!edid) { @@ -244,11 +236,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) } aconnector->edid = edid; + } + if (!aconnector->dc_sink) { + struct dc_sink *dc_sink; + struct dc_sink_init_data init_params = { + .link = aconnector->dc_link, + .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; dc_sink = dc_link_add_remote_sink( aconnector->dc_link, - (uint8_t *)edid, - (edid->extensions + 1) * EDID_LENGTH, + (uint8_t *)aconnector->edid, + (aconnector->edid->extensions + 1) * EDID_LENGTH, &init_params); dc_sink->priv = aconnector; @@ -256,12 +254,12 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (aconnector->dc_sink) amdgpu_dm_add_sink_to_freesync_module( - connector, edid); - - drm_mode_connector_update_edid_property( - &aconnector->base, edid); + connector, aconnector->edid); } + drm_mode_connector_update_edid_property( + &aconnector->base, aconnector->edid); + ret = drm_add_edid_modes(connector, aconnector->edid); return ret; @@ -424,14 +422,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; } - if (aconnector->edid) { - kfree(aconnector->edid); - aconnector->edid = NULL; - } - - drm_mode_connector_update_edid_property( - &aconnector->base, - NULL); aconnector->mst_connected = false; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index add90675fd2a..26fbeafc3c96 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4743,23 +4743,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) for (i=0; i < dep_table->count; i++) { if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; - break; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; + return; } } - if (i == dep_table->count) + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + } dep_table = table_info->vdd_dep_on_sclk; odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); for (i=0; i < dep_table->count; i++) { if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; - break; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; + return; } } - if (i == dep_table->count) + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + } } static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index fb696e3d06cf..2f8a3b983cce 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h @@ -412,8 +412,10 @@ typedef struct { QuadraticInt_t ReservedEquation2; QuadraticInt_t ReservedEquation3; + uint16_t MinVoltageUlvGfx; + uint16_t MinVoltageUlvSoc; - uint32_t Reserved[15]; + uint32_t Reserved[14]; diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c index 498d5948d1a8..9837c8d69e69 100644 --- a/drivers/gpu/drm/bridge/dumb-vga-dac.c +++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c @@ -56,7 +56,9 @@ static int dumb_vga_get_modes(struct drm_connector *connector) } drm_mode_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + return ret; fallback: /* diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 02a50929af67..e7f4fe2848a5 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, { uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; ssize_t ret; + int retry; if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) return 0; - ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, - &tmds_oen, sizeof(tmds_oen)); - if (ret) { - DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", - enable ? "enable" : "disable"); - return ret; + /* + * LSPCON adapters in low-power state may ignore the first write, so + * read back and verify the written value a few times. + */ + for (retry = 0; retry < 3; retry++) { + uint8_t tmp; + + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmds_oen, sizeof(tmds_oen)); + if (ret) { + DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n", + enable ? "enable" : "disable", + retry + 1); + return ret; + } + + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmp, sizeof(tmp)); + if (ret) { + DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n", + enable ? "enabling" : "disabling", + retry + 1); + return ret; + } + + if (tmp == tmds_oen) + return 0; } - return 0; + DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n", + enable ? "enabling" : "disabling"); + + return -EIO; } EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 134069f36482..39f1db4acda4 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4451,6 +4451,7 @@ drm_reset_display_info(struct drm_connector *connector) info->max_tmds_clock = 0; info->dvi_dual = false; info->has_hdmi_infoframe = false; + memset(&info->hdmi, 0, sizeof(info->hdmi)); info->non_desktop = 0; } @@ -4462,17 +4463,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi u32 quirks = edid_get_quirks(edid); + drm_reset_display_info(connector); + info->width_mm = edid->width_cm * 10; info->height_mm = edid->height_cm * 10; - /* driver figures it out in this case */ - info->bpc = 0; - info->color_formats = 0; - info->cea_rev = 0; - info->max_tmds_clock = 0; - info->dvi_dual = false; - info->has_hdmi_infoframe = false; - info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 0faaf829f5bf..f0e79178bde6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -18,6 +18,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <uapi/drm/exynos_drm.h> #include "exynos_drm_drv.h" @@ -26,20 +27,6 @@ #include "exynos_drm_iommu.h" #include "exynos_drm_crtc.h" -#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) - -/* - * exynos specific framebuffer structure. - * - * @fb: drm framebuffer obejct. - * @exynos_gem: array of exynos specific gem object containing a gem object. - */ -struct exynos_drm_fb { - struct drm_framebuffer fb; - struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; - dma_addr_t dma_addr[MAX_FB_BUFFER]; -}; - static int check_fb_gem_memory_type(struct drm_device *drm_dev, struct exynos_drm_gem *exynos_gem) { @@ -66,40 +53,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev, return 0; } -static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) -{ - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - unsigned int i; - - drm_framebuffer_cleanup(fb); - - for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) { - struct drm_gem_object *obj; - - if (exynos_fb->exynos_gem[i] == NULL) - continue; - - obj = &exynos_fb->exynos_gem[i]->base; - drm_gem_object_unreference_unlocked(obj); - } - - kfree(exynos_fb); - exynos_fb = NULL; -} - -static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, - struct drm_file *file_priv, - unsigned int *handle) -{ - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - - return drm_gem_handle_create(file_priv, - &exynos_fb->exynos_gem[0]->base, handle); -} - static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { - .destroy = exynos_drm_fb_destroy, - .create_handle = exynos_drm_fb_create_handle, + .destroy = drm_gem_fb_destroy, + .create_handle = drm_gem_fb_create_handle, }; struct drm_framebuffer * @@ -108,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev, struct exynos_drm_gem **exynos_gem, int count) { - struct exynos_drm_fb *exynos_fb; + struct drm_framebuffer *fb; int i; int ret; - exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); - if (!exynos_fb) + fb = kzalloc(sizeof(*fb), GFP_KERNEL); + if (!fb) return ERR_PTR(-ENOMEM); for (i = 0; i < count; i++) { @@ -121,23 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev, if (ret < 0) goto err; - exynos_fb->exynos_gem[i] = exynos_gem[i]; - exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr - + mode_cmd->offsets[i]; + fb->obj[i] = &exynos_gem[i]->base; } - drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); - ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); + ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs); if (ret < 0) { DRM_ERROR("failed to initialize framebuffer\n"); goto err; } - return &exynos_fb->fb; + return fb; err: - kfree(exynos_fb); + kfree(fb); return ERR_PTR(ret); } @@ -191,12 +145,13 @@ err: dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) { - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); + struct exynos_drm_gem *exynos_gem; if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) return 0; - return exynos_fb->dma_addr[index]; + exynos_gem = to_exynos_gem(fb->obj[index]); + return exynos_gem->dma_addr + fb->offsets[index]; } static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = { diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index db6b94dda5df..d85939bd7b47 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1080,6 +1080,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) { set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, s->workload->pending_events); + patch_value(s, cmd_ptr(s, 0), MI_NOOP); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index dd96ffc878ac..6d8180e8d1e2 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -169,6 +169,8 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { static void emulate_monitor_status_change(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + int pipe; + vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); @@ -267,6 +269,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) if (IS_BROADWELL(dev_priv)) vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; + /* Disable Primary/Sprite/Cursor plane */ + for_each_pipe(dev_priv, pipe) { + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; + vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE; + } + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; } diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index b555eb26f9ce..6f4f8e941fc2 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, struct intel_vgpu_fb_info *fb_info) { gvt_dmabuf->drm_format = fb_info->drm_format; + gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod; gvt_dmabuf->width = fb_info->width; gvt_dmabuf->height = fb_info->height; gvt_dmabuf->stride = fb_info->stride; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 6b50fe78dc1b..1c120683e958 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -245,16 +245,13 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->hw_format = fmt; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } @@ -371,16 +368,13 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, alpha_plane, alpha_force); plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } @@ -476,16 +470,13 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->drm_format = drm_format; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d29281231507..78e55aafc8bc 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -530,6 +530,16 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, false, 0, mm->vgpu); } +static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, + struct intel_gvt_gtt_entry *entry, unsigned long index) +{ + struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + + GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); + + pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); +} + static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { @@ -1818,6 +1828,18 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, return ret; } +static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, + struct intel_gvt_gtt_entry *entry) +{ + struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + unsigned long pfn; + + pfn = pte_ops->get_pfn(entry); + if (pfn != vgpu->gvt->gtt.scratch_mfn) + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, + pfn << PAGE_SHIFT); +} + static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { @@ -1844,10 +1866,10 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, bytes); - m = e; if (ops->test_present(&e)) { gfn = ops->get_pfn(&e); + m = e; /* one PTE update may be issued in multiple writes and the * first write may not construct a valid gfn @@ -1868,8 +1890,12 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ops->set_pfn(&m, gvt->gtt.scratch_mfn); } else ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); - } else + } else { + ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); + ggtt_invalidate_pte(vgpu, &m); ops->set_pfn(&m, gvt->gtt.scratch_mfn); + ops->clear_present(&m); + } out: ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); @@ -2030,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) return PTR_ERR(gtt->ggtt_mm); } - intel_vgpu_reset_ggtt(vgpu); + intel_vgpu_reset_ggtt(vgpu, false); return create_scratch_page_tree(vgpu); } @@ -2315,17 +2341,19 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) /** * intel_vgpu_reset_ggtt - reset the GGTT entry * @vgpu: a vGPU + * @invalidate_old: invalidate old entries * * This function is called at the vGPU create stage * to reset all the GGTT entries. * */ -void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; + struct intel_gvt_gtt_entry old_entry; u32 index; u32 num_entries; @@ -2334,13 +2362,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; - while (num_entries--) + while (num_entries--) { + if (invalidate_old) { + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); + ggtt_invalidate_pte(vgpu, &old_entry); + } ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); + } index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; - while (num_entries--) + while (num_entries--) { + if (invalidate_old) { + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); + ggtt_invalidate_pte(vgpu, &old_entry); + } ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); + } ggtt_invalidate(dev_priv); } @@ -2360,5 +2398,5 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) * removing the shadow pages. */ intel_vgpu_destroy_all_ppgtt_mm(vgpu); - intel_vgpu_reset_ggtt(vgpu); + intel_vgpu_reset_ggtt(vgpu, true); } diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index a8b369cd352b..3792f2b7f4ff 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -193,7 +193,7 @@ struct intel_vgpu_gtt { extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); -void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); extern int intel_gvt_init_gtt(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 8c5d5d005854..a33c1c3e4a21 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) switch (notification) { case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; + /* fall through */ case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); return PTR_ERR_OR_ZERO(mm); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c16a492449d7..1466d8769ec9 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1301,7 +1301,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, } - return 0; + return -ENOTTY; } static ssize_t diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 84ca369f15a5..3b4daafebdcb 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1105,30 +1105,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ret = i915_ggtt_probe_hw(dev_priv); if (ret) - return ret; + goto err_perf; - /* WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. */ + /* + * WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. + */ ret = i915_kick_out_firmware_fb(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); - goto out_ggtt; + goto err_ggtt; } ret = i915_kick_out_vgacon(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting VGA console\n"); - goto out_ggtt; + goto err_ggtt; } ret = i915_ggtt_init_hw(dev_priv); if (ret) - return ret; + goto err_ggtt; ret = i915_ggtt_enable_hw(dev_priv); if (ret) { DRM_ERROR("failed to enable GGTT\n"); - goto out_ggtt; + goto err_ggtt; } pci_set_master(pdev); @@ -1139,7 +1141,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto out_ggtt; + goto err_ggtt; } } @@ -1157,7 +1159,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto out_ggtt; + goto err_ggtt; } } @@ -1190,13 +1192,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ret = intel_gvt_init(dev_priv); if (ret) - goto out_ggtt; + goto err_ggtt; return 0; -out_ggtt: +err_ggtt: i915_ggtt_cleanup_hw(dev_priv); - +err_perf: + i915_perf_fini(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 8c170db8495d..0414228cd2b5 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) err = radix_tree_insert(handles_vma, handle, vma); if (unlikely(err)) { - kfree(lut); + kmem_cache_free(eb->i915->luts, lut); goto err_obj; } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d8feb9053e0c..f0519e31543a 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -473,20 +473,37 @@ static u64 get_rc6(struct drm_i915_private *i915) spin_lock_irqsave(&i915->pmu.lock, flags); spin_lock(&kdev->power.lock); - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - i915->pmu.suspended_jiffies_last = - kdev->power.suspended_jiffies; + /* + * After the above branch intel_runtime_pm_get_if_in_use failed + * to get the runtime PM reference we cannot assume we are in + * runtime suspend since we can either: a) race with coming out + * of it before we took the power.lock, or b) there are other + * states than suspended which can bring us here. + * + * We need to double-check that we are indeed currently runtime + * suspended and if not we cannot do better than report the last + * known RC6 value. + */ + if (kdev->power.runtime_status == RPM_SUSPENDED) { + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + i915->pmu.suspended_jiffies_last = + kdev->power.suspended_jiffies; - val = kdev->power.suspended_jiffies - - i915->pmu.suspended_jiffies_last; - val += jiffies - kdev->power.accounting_timestamp; + val = kdev->power.suspended_jiffies - + i915->pmu.suspended_jiffies_last; + val += jiffies - kdev->power.accounting_timestamp; - spin_unlock(&kdev->power.lock); + val = jiffies_to_nsecs(val); + val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; - val = jiffies_to_nsecs(val); - val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; - i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { + val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; + } else { + val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; + } + spin_unlock(&kdev->power.lock); spin_unlock_irqrestore(&i915->pmu.lock, flags); } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 709d6ca68074..3ea566f99450 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev, struct drm_i915_private *dev_priv = kdev_to_i915(kdev); u32 tmp; - if (!IS_GEN9_BC(dev_priv)) + if (!IS_GEN9(dev_priv)) return; i915_audio_component_get_power(kdev); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index c5c7530ba157..447b721c3be9 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, return; aux_channel = child->aux_channel; - ddc_pin = child->ddc_pin; is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; @@ -1303,9 +1302,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); if (is_dvi) { - info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin); - - sanitize_ddc_pin(dev_priv, port); + ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin); + if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) { + info->alternate_ddc_pin = ddc_pin; + sanitize_ddc_pin(dev_priv, port); + } else { + DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, " + "sticking to defaults\n", + port_name(port), ddc_pin); + } } if (is_dp) { diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index fc8b2c6e3508..32d24c69da3c 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -2140,10 +2140,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) } } - /* According to BSpec, "The CD clock frequency must be at least twice + /* + * According to BSpec, "The CD clock frequency must be at least twice * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. + * + * FIXME: Check the actual, not default, BCLK being used. + * + * FIXME: This does not depend on ->has_audio because the higher CDCLK + * is required for audio probe, also when there are no audio capable + * displays connected at probe time. This leads to unnecessarily high + * CDCLK when audio is not required. + * + * FIXME: This limit is only applied when there are displays connected + * at probe time. If we probe without displays, we'll still end up using + * the platform minimum CDCLK, failing audio probe. */ - if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) + if (INTEL_GEN(dev_priv) >= 9) min_cdclk = max(2 * 96000, min_cdclk); /* diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 41e6c75a7f3c..f9550ea46c26 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -35,6 +35,7 @@ */ #define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" +MODULE_FIRMWARE(I915_CSR_GLK); #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) #define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin" diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d4368589b355..a80fbad9be0f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -49,12 +49,12 @@ * check the condition before the timeout. */ #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ - unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ + const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ int ret__; \ might_sleep(); \ for (;;) { \ - bool expired__ = time_after(jiffies, timeout__); \ + const bool expired__ = ktime_after(ktime_get_raw(), end__); \ OP; \ if (COND) { \ ret__ = 0; \ diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 6f12adc06365..6467a5cc2ca3 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -806,7 +806,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) return; intel_fbdev_sync(ifbdev); - if (ifbdev->vma) + if (ifbdev->vma || ifbdev->helper.deferred_setup) drm_fb_helper_hotplug_event(&ifbdev->helper); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 697af5add78b..e3a5f673ff67 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * know the next preemption status we see corresponds * to this ELSP update. */ + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_USER)); GEM_BUG_ON(!port_count(&port[0])); if (port_count(&port[0]) > 1) goto unlock; @@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) memset(port, 0, sizeof(*port)); port++; } + + execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); } static void execlists_cancel_requests(struct intel_engine_cs *engine) @@ -1001,6 +1005,11 @@ static void execlists_submission_tasklet(unsigned long data) if (fw) intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); + + /* If the engine is now idle, so should be the flag; and vice versa. */ + GEM_BUG_ON(execlists_is_active(&engine->execlists, + EXECLISTS_ACTIVE_USER) == + !port_isset(engine->execlists.port)); } static void queue_request(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 53ea564f971e..66de4b2dc8b7 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -641,19 +641,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Enabling DC6\n"); - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); } void skl_disable_dc6(struct drm_i915_private *dev_priv) { DRM_DEBUG_KMS("Disabling DC6\n"); - /* Wa Display #1183: skl,kbl,cfl */ - if (IS_GEN9_BC(dev_priv)) - I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | - SKL_SELECT_ALTERNATE_DC_EXIT); - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index 6e5e1aa54ce1..b001699297c4 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -351,6 +351,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc, spin_lock_irqsave(&dev->event_lock, flags); mdp4_crtc->event = crtc->state->event; + crtc->state->event = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); blend_setup(crtc); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 9893e43ba6c5..76b96081916f 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -708,6 +708,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, spin_lock_irqsave(&dev->event_lock, flags); mdp5_crtc->event = crtc->state->event; + crtc->state->event = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); /* diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c index b4a8aa4490ee..005760bee708 100644 --- a/drivers/gpu/drm/msm/disp/mdp_format.c +++ b/drivers/gpu/drm/msm/disp/mdp_format.c @@ -171,7 +171,8 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats, return i; } -const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) +const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, + uint64_t modifier) { int i; for (i = 0; i < ARRAY_SIZE(formats); i++) { diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h index 1185487e7e5e..4fa8dbe4e165 100644 --- a/drivers/gpu/drm/msm/disp/mdp_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp_kms.h @@ -98,7 +98,7 @@ struct mdp_format { #define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); -const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); +const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier); /* MDP capabilities */ #define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 7a03a9489708..8baba30d6c65 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -173,6 +173,7 @@ struct msm_dsi_host { bool registered; bool power_on; + bool enabled; int irq; }; @@ -775,7 +776,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( switch (mipi_fmt) { case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; case MIPI_DSI_FMT_RGB666_PACKED: - case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666; + case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666; case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; default: return CMD_DST_FORMAT_RGB888; } @@ -986,13 +987,19 @@ static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host) static void dsi_wait4video_done(struct msm_dsi_host *msm_host) { + u32 ret = 0; + struct device *dev = &msm_host->pdev->dev; + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); reinit_completion(&msm_host->video_comp); - wait_for_completion_timeout(&msm_host->video_comp, + ret = wait_for_completion_timeout(&msm_host->video_comp, msecs_to_jiffies(70)); + if (ret <= 0) + dev_err(dev, "wait for video done timed out\n"); + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); } @@ -1001,7 +1008,7 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host) if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) return; - if (msm_host->power_on) { + if (msm_host->power_on && msm_host->enabled) { dsi_wait4video_done(msm_host); /* delay 4 ms to skip BLLP */ usleep_range(2000, 4000); @@ -2203,7 +2210,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host) * pm_runtime_put_autosuspend(&msm_host->pdev->dev); * } */ - + msm_host->enabled = true; return 0; } @@ -2211,6 +2218,7 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + msm_host->enabled = false; dsi_op_mode_config(msm_host, !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 8e9d5c255820..9a9fa0c75a13 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -265,6 +265,115 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, return 0; } +int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, ui_x8, lpx; + s32 tmax, tmin; + s32 pcnt0 = 50; + s32 pcnt1 = 50; + s32 pcnt2 = 10; + s32 pcnt3 = 30; + s32 pcnt4 = 10; + s32 pcnt5 = 2; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 hb_en, hb_en_ckln; + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + timing->hs_halfbyte_en = 0; + hb_en = 0; + timing->hs_halfbyte_en_ckln = 0; + hb_en_ckln = 0; + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + ui_x8 = ui << 3; + lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); + + temp = S_DIV_ROUND_UP(38 * coeff, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (95 * coeff) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false); + + temp = 300 * coeff - (timing->clk_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = (tmin > 255) ? 511 : 255; + timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (85 * coeff + 6 * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false); + + temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 255; + timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1; + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp / ui_x8) - 1; + timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = 50 * coeff + ((hb_en << 2) - 8) * ui; + timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8); + + tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1; + tmax = 255; + timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui; + timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8); + + temp = 60 * coeff + 52 * ui - 43 * ui; + tmin = DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + timing->shared_timings.clk_post = + linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 8 * ui + (timing->clk_prepare << 3) * ui; + temp += (((timing->clk_zero + 3) << 3) + 11) * ui; + temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) : + (((timing->hs_rqst_ckln << 3) + 8) * ui); + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + if (tmin > tmax) { + temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre = temp >> 1; + timing->shared_timings.clk_pre_inc_by_2 = 1; + } else { + timing->shared_timings.clk_pre = + linear_inter(tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre_inc_by_2 = 0; + } + + timing->ta_go = 3; + timing->ta_sure = 0; + timing->ta_get = 4; + + DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, + timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, + timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en, + timing->hs_halfbyte_en_ckln, timing->hs_prep_dly, + timing->hs_prep_dly_ckln); + + return 0; +} + void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask) { diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index c56268cbdb3d..a24ab80994a3 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -101,6 +101,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); +int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask); int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c index 0af951aaeea1..b3fffc8dbb2a 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -79,34 +79,6 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy) dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); } -static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, - struct msm_dsi_phy_clk_request *clk_req) -{ - /* - * TODO: These params need to be computed, they're currently hardcoded - * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a - * default escape clock of 19.2 Mhz. - */ - - timing->hs_halfbyte_en = 0; - timing->clk_zero = 0x1c; - timing->clk_prepare = 0x07; - timing->clk_trail = 0x07; - timing->hs_exit = 0x23; - timing->hs_zero = 0x21; - timing->hs_prepare = 0x07; - timing->hs_trail = 0x07; - timing->hs_rqst = 0x05; - timing->ta_sure = 0x00; - timing->ta_go = 0x03; - timing->ta_get = 0x04; - - timing->shared_timings.clk_pre = 0x2d; - timing->shared_timings.clk_post = 0x0d; - - return 0; -} - static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, struct msm_dsi_phy_clk_request *clk_req) { diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 0e0c87252ab0..7a16242bf8bf 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -183,7 +183,8 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); - format = kms->funcs->get_format(kms, mode_cmd->pixel_format); + format = kms->funcs->get_format(kms, mode_cmd->pixel_format, + mode_cmd->modifier[0]); if (!format) { dev_err(dev->dev, "unsupported pixel format: %4.4s\n", (char *)&mode_cmd->pixel_format); diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index c178563fcd4d..456622b46335 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, if (IS_ERR(fb)) { dev_err(dev->dev, "failed to allocate fb\n"); - ret = PTR_ERR(fb); - goto fail; + return PTR_ERR(fb); } bo = msm_framebuffer_bo(fb, 0); @@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, fail_unlock: mutex_unlock(&dev->struct_mutex); -fail: - - if (ret) { - if (fb) - drm_framebuffer_remove(fb); - } - + drm_framebuffer_remove(fb); return ret; } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 95196479f651..f583bb4222f9 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); if (msm_obj->pages) { - /* For non-cached buffers, ensure the new pages are clean - * because display controller, GPU, etc. are not coherent: - */ - if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + if (msm_obj->sgt) { + /* For non-cached buffers, ensure the new + * pages are clean because display controller, + * GPU, etc. are not coherent: + */ + if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) + dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, + DMA_BIDIRECTIONAL); - if (msm_obj->sgt) sg_free_table(msm_obj->sgt); - - kfree(msm_obj->sgt); + kfree(msm_obj->sgt); + } if (use_pages(obj)) drm_gem_put_pages(obj, msm_obj->pages, true, false); diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 17d5824417ad..aaa329dc020e 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -48,8 +48,11 @@ struct msm_kms_funcs { /* functions to wait for atomic commit completed on each CRTC */ void (*wait_for_crtc_commit_done)(struct msm_kms *kms, struct drm_crtc *crtc); + /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */ + const struct msm_format *(*get_format)(struct msm_kms *kms, + const uint32_t format, + const uint64_t modifiers); /* misc: */ - const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder); int (*set_split_display)(struct msm_kms *kms, diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index c0fb52c6d4ca..01665b98c57e 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -179,10 +179,9 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea uint32_t type, bool interruptible) { struct qxl_command cmd; - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); cmd.type = type; - cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); + cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); return qxl_ring_push(qdev->command_ring, &cmd, interruptible); } @@ -192,10 +191,9 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas uint32_t type, bool interruptible) { struct qxl_command cmd; - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); cmd.type = type; - cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); + cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); } diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 00a1a66b052a..864b456080c4 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -167,6 +167,7 @@ struct qxl_release { int id; int type; + struct qxl_bo *release_bo; uint32_t release_offset; uint32_t surface_release_id; struct ww_acquire_ctx ticket; diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index e238a1a2eca1..6cc9f3367fa0 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -182,9 +182,9 @@ static int qxl_process_single_command(struct qxl_device *qdev, goto out_free_reloc; /* TODO copy slow path code from i915 */ - fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); + fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK)); unwritten = __copy_from_user_inatomic_nocache - (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), + (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK), u64_to_user_ptr(cmd->command), cmd->command_size); { diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 5d84a66fed36..7cb214577275 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -173,6 +173,7 @@ qxl_release_free_list(struct qxl_release *release) list_del(&entry->tv.head); kfree(entry); } + release->release_bo = NULL; } void @@ -296,7 +297,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, { if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { int idr_ret; - struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); struct qxl_bo *bo; union qxl_release_info *info; @@ -304,8 +304,9 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); if (idr_ret < 0) return idr_ret; - bo = to_qxl_bo(entry->tv.bo); + bo = create_rel->release_bo; + (*release)->release_bo = bo; (*release)->release_offset = create_rel->release_offset + 64; qxl_release_list_add(*release, bo); @@ -365,6 +366,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); + (*release)->release_bo = bo; (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; qdev->current_release_bo_offset[cur_idx]++; @@ -408,13 +410,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev, { void *ptr; union qxl_release_info *info; - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); - struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); + struct qxl_bo *bo = release->release_bo; - ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); + ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK); if (!ptr) return NULL; - info = ptr + (release->release_offset & ~PAGE_SIZE); + info = ptr + (release->release_offset & ~PAGE_MASK); return info; } @@ -422,11 +423,10 @@ void qxl_release_unmap(struct qxl_device *qdev, struct qxl_release *release, union qxl_release_info *info) { - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); - struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); + struct qxl_bo *bo = release->release_bo; void *ptr; - ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); + ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK); qxl_bo_kunmap_atomic_page(qdev, bo, ptr); } diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c index bffff4c9fbf5..be3f14d7746d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_lvds.c +++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c @@ -94,64 +94,9 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder) } } -static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc, - const struct drm_display_mode *mode) -{ - struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc); - struct sun4i_tcon *tcon = lvds->tcon; - u32 hsync = mode->hsync_end - mode->hsync_start; - u32 vsync = mode->vsync_end - mode->vsync_start; - unsigned long rate = mode->clock * 1000; - long rounded_rate; - - DRM_DEBUG_DRIVER("Validating modes...\n"); - - if (hsync < 1) - return MODE_HSYNC_NARROW; - - if (hsync > 0x3ff) - return MODE_HSYNC_WIDE; - - if ((mode->hdisplay < 1) || (mode->htotal < 1)) - return MODE_H_ILLEGAL; - - if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff)) - return MODE_BAD_HVALUE; - - DRM_DEBUG_DRIVER("Horizontal parameters OK\n"); - - if (vsync < 1) - return MODE_VSYNC_NARROW; - - if (vsync > 0x3ff) - return MODE_VSYNC_WIDE; - - if ((mode->vdisplay < 1) || (mode->vtotal < 1)) - return MODE_V_ILLEGAL; - - if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff)) - return MODE_BAD_VVALUE; - - DRM_DEBUG_DRIVER("Vertical parameters OK\n"); - - tcon->dclk_min_div = 7; - tcon->dclk_max_div = 7; - rounded_rate = clk_round_rate(tcon->dclk, rate); - if (rounded_rate < rate) - return MODE_CLOCK_LOW; - - if (rounded_rate > rate) - return MODE_CLOCK_HIGH; - - DRM_DEBUG_DRIVER("Clock rate OK\n"); - - return MODE_OK; -} - static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = { .disable = sun4i_lvds_encoder_disable, .enable = sun4i_lvds_encoder_enable, - .mode_valid = sun4i_lvds_encoder_mode_valid, }; static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = { diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 2decc8e2c79f..add9cc97a3b6 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo) vc4_bo_set_label(obj, -1); if (bo->validated_shader) { + kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; @@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo) } if (bo->validated_shader) { + kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index bf4667481935..c61dff594195 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -760,6 +760,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data) struct vc4_async_flip_state { struct drm_crtc *crtc; struct drm_framebuffer *fb; + struct drm_framebuffer *old_fb; struct drm_pending_vblank_event *event; struct vc4_seqno_cb cb; @@ -789,6 +790,23 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) drm_crtc_vblank_put(crtc); drm_framebuffer_put(flip_state->fb); + + /* Decrement the BO usecnt in order to keep the inc/dec calls balanced + * when the planes are updated through the async update path. + * FIXME: we should move to generic async-page-flip when it's + * available, so that we can get rid of this hand-made cleanup_fb() + * logic. + */ + if (flip_state->old_fb) { + struct drm_gem_cma_object *cma_bo; + struct vc4_bo *bo; + + cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0); + bo = to_vc4_bo(&cma_bo->base); + vc4_bo_dec_usecnt(bo); + drm_framebuffer_put(flip_state->old_fb); + } + kfree(flip_state); up(&vc4->async_modeset); @@ -813,9 +831,22 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); + /* Increment the BO usecnt here, so that we never end up with an + * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the + * plane is later updated through the non-async path. + * FIXME: we should move to generic async-page-flip when it's + * available, so that we can get rid of this hand-made prepare_fb() + * logic. + */ + ret = vc4_bo_inc_usecnt(bo); + if (ret) + return ret; + flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); - if (!flip_state) + if (!flip_state) { + vc4_bo_dec_usecnt(bo); return -ENOMEM; + } drm_framebuffer_get(fb); flip_state->fb = fb; @@ -826,10 +857,23 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, ret = down_interruptible(&vc4->async_modeset); if (ret) { drm_framebuffer_put(fb); + vc4_bo_dec_usecnt(bo); kfree(flip_state); return ret; } + /* Save the current FB before it's replaced by the new one in + * drm_atomic_set_fb_for_plane(). We'll need the old FB in + * vc4_async_page_flip_complete() to decrement the BO usecnt and keep + * it consistent. + * FIXME: we should move to generic async-page-flip when it's + * available, so that we can get rid of this hand-made cleanup_fb() + * logic. + */ + flip_state->old_fb = plane->state->fb; + if (flip_state->old_fb) + drm_framebuffer_get(flip_state->old_fb); + WARN_ON(drm_crtc_vblank_get(crtc) != 0); /* Immediately update the plane's legacy fb pointer, so that later diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index d3f15bf60900..7cf82b071de2 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) fail: kfree(validation_state.branch_targets); if (validated_shader) { + kfree(validated_shader->uniform_addr_offsets); kfree(validated_shader->texture_samples); kfree(validated_shader); } diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 48e4f1df6e5d..020070d483d3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -293,7 +293,7 @@ retry: ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); if (ret == -ENOSPC) { spin_unlock(&vgdev->ctrlq.qlock); - wait_event(vgdev->ctrlq.ack_queue, vq->num_free); + wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt); spin_lock(&vgdev->ctrlq.qlock); goto retry; } else { @@ -368,7 +368,7 @@ retry: ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); if (ret == -ENOSPC) { spin_unlock(&vgdev->cursorq.qlock); - wait_event(vgdev->cursorq.ack_queue, vq->num_free); + wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); spin_lock(&vgdev->cursorq.qlock); goto retry; } else { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 2582ffd36bb5..ba0cdb743c3e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -441,11 +441,11 @@ static int vmwgfx_set_config_internal(struct drm_mode_set *set) struct drm_crtc *crtc = set->crtc; struct drm_framebuffer *fb; struct drm_crtc *tmp; - struct drm_modeset_acquire_ctx *ctx; struct drm_device *dev = set->crtc->dev; + struct drm_modeset_acquire_ctx ctx; int ret; - ctx = dev->mode_config.acquire_ctx; + drm_modeset_acquire_init(&ctx, 0); restart: /* @@ -458,7 +458,7 @@ restart: fb = set->fb; - ret = crtc->funcs->set_config(set, ctx); + ret = crtc->funcs->set_config(set, &ctx); if (ret == 0) { crtc->primary->crtc = crtc; crtc->primary->fb = fb; @@ -473,20 +473,13 @@ restart: } if (ret == -EDEADLK) { - dev->mode_config.acquire_ctx = NULL; - -retry_locking: - drm_modeset_backoff(ctx); - - ret = drm_modeset_lock_all_ctx(dev, ctx); - if (ret) - goto retry_locking; - - dev->mode_config.acquire_ctx = ctx; - + drm_modeset_backoff(&ctx); goto restart; } + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return ret; } @@ -624,7 +617,6 @@ static int vmw_fb_set_par(struct fb_info *info) } mutex_lock(&par->bo_mutex); - drm_modeset_lock_all(vmw_priv->dev); ret = vmw_fb_kms_framebuffer(info); if (ret) goto out_unlock; @@ -657,7 +649,6 @@ out_unlock: drm_mode_destroy(vmw_priv->dev, old_mode); par->set_mode = mode; - drm_modeset_unlock_all(vmw_priv->dev); mutex_unlock(&par->bo_mutex); return ret; @@ -713,18 +704,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv) par->max_width = fb_width; par->max_height = fb_height; - drm_modeset_lock_all(vmw_priv->dev); ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width, par->max_height, &par->con, &par->crtc, &init_mode); - if (ret) { - drm_modeset_unlock_all(vmw_priv->dev); + if (ret) goto err_kms; - } info->var.xres = init_mode->hdisplay; info->var.yres = init_mode->vdisplay; - drm_modeset_unlock_all(vmw_priv->dev); /* * Create buffers and alloc memory @@ -832,7 +819,9 @@ int vmw_fb_close(struct vmw_private *vmw_priv) cancel_delayed_work_sync(&par->local_work); unregister_framebuffer(info); + mutex_lock(&par->bo_mutex); (void) vmw_fb_kms_detach(par, true, true); + mutex_unlock(&par->bo_mutex); vfree(par->vmalloc); framebuffer_release(info); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f11601b6fd74..96fd7a03d2f8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2595,6 +2595,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, out_fence, NULL); + vmw_dmabuf_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -2680,7 +2681,9 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, struct vmw_display_unit *du; struct drm_display_mode *mode; int i = 0; + int ret = 0; + mutex_lock(&dev_priv->dev->mode_config.mutex); list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, head) { if (i == unit) @@ -2691,7 +2694,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, if (i != unit) { DRM_ERROR("Could not find initial display unit.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_unlock; } if (list_empty(&con->modes)) @@ -2699,7 +2703,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, if (list_empty(&con->modes)) { DRM_ERROR("Could not find initial display mode.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_unlock; } du = vmw_connector_to_du(con); @@ -2720,7 +2725,10 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, head); } - return 0; + out_unlock: + mutex_unlock(&dev_priv->dev->mode_config.mutex); + + return ret; } /** diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 5a3a7ead3012..0b5cc910f62e 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -525,6 +525,9 @@ #define I2C_VENDOR_ID_HANTICK 0x0911 #define I2C_PRODUCT_ID_HANTICK_5288 0x5288 +#define I2C_VENDOR_ID_RAYD 0x2386 +#define I2C_PRODUCT_ID_RAYD_3118 0x3118 + #define USB_VENDOR_ID_HANWANG 0x0b57 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 6836a856c243..930652c25120 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CAPACITY: - if (dev->battery_report_type == HID_FEATURE_REPORT) { + if (dev->battery_status != HID_BATTERY_REPORTED && + !dev->battery_avoid_query) { value = hidinput_query_battery_capacity(dev); if (value < 0) return value; @@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_STATUS: - if (!dev->battery_reported && - dev->battery_report_type == HID_FEATURE_REPORT) { + if (dev->battery_status != HID_BATTERY_REPORTED && + !dev->battery_avoid_query) { value = hidinput_query_battery_capacity(dev); if (value < 0) return value; dev->battery_capacity = value; - dev->battery_reported = true; + dev->battery_status = HID_BATTERY_QUERIED; } - if (!dev->battery_reported) + if (dev->battery_status == HID_BATTERY_UNKNOWN) val->intval = POWER_SUPPLY_STATUS_UNKNOWN; else if (dev->battery_capacity == 100) val->intval = POWER_SUPPLY_STATUS_FULL; @@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, dev->battery_report_type = report_type; dev->battery_report_id = field->report->id; + /* + * Stylus is normally not connected to the device and thus we + * can't query the device and get meaningful battery strength. + * We have to wait for the device to report it on its own. + */ + dev->battery_avoid_query = report_type == HID_INPUT_REPORT && + field->physical == HID_DG_STYLUS; + dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); if (IS_ERR(dev->battery)) { error = PTR_ERR(dev->battery); @@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value) capacity = hidinput_scale_battery_capacity(dev, value); - if (!dev->battery_reported || capacity != dev->battery_capacity) { + if (dev->battery_status != HID_BATTERY_REPORTED || + capacity != dev->battery_capacity) { dev->battery_capacity = capacity; - dev->battery_reported = true; + dev->battery_status = HID_BATTERY_REPORTED; power_supply_changed(dev->battery); } } diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index fbfcc8009432..b39844adea47 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t int ret = 0, len; unsigned char report_number; + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { + ret = -ENODEV; + goto out; + } + dev = hidraw_table[minor]->hid; if (!dev->ll_driver->raw_request) { diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 97689e98e53f..963328674e93 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -47,6 +47,7 @@ /* quirks to control the device */ #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) +#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2) /* flags */ #define I2C_HID_STARTED 0 @@ -171,6 +172,8 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, + { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118, + I2C_HID_QUIRK_RESEND_REPORT_DESCR }, { 0, 0 } }; @@ -1220,6 +1223,16 @@ static int i2c_hid_resume(struct device *dev) if (ret) return ret; + /* RAYDIUM device (2386:3118) need to re-send report descr cmd + * after resume, after this it will be back normal. + * otherwise it issues too many incomplete reports. + */ + if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) { + ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0); + if (ret) + return ret; + } + if (hid->driver && hid->driver->reset_resume) { ret = hid->driver->reset_resume(hid); return ret; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 6da16a879c9f..5f947ec20dcb 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id) return tool_type; } +static void wacom_exit_report(struct wacom_wac *wacom) +{ + struct input_dev *input = wacom->pen_input; + struct wacom_features *features = &wacom->features; + unsigned char *data = wacom->data; + int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0; + + /* + * Reset all states otherwise we lose the initial states + * when in-prox next time + */ + input_report_abs(input, ABS_X, 0); + input_report_abs(input, ABS_Y, 0); + input_report_abs(input, ABS_DISTANCE, 0); + input_report_abs(input, ABS_TILT_X, 0); + input_report_abs(input, ABS_TILT_Y, 0); + if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { + input_report_key(input, BTN_LEFT, 0); + input_report_key(input, BTN_MIDDLE, 0); + input_report_key(input, BTN_RIGHT, 0); + input_report_key(input, BTN_SIDE, 0); + input_report_key(input, BTN_EXTRA, 0); + input_report_abs(input, ABS_THROTTLE, 0); + input_report_abs(input, ABS_RZ, 0); + } else { + input_report_abs(input, ABS_PRESSURE, 0); + input_report_key(input, BTN_STYLUS, 0); + input_report_key(input, BTN_STYLUS2, 0); + input_report_key(input, BTN_TOUCH, 0); + input_report_abs(input, ABS_WHEEL, 0); + if (features->type >= INTUOS3S) + input_report_abs(input, ABS_Z, 0); + } + input_report_key(input, wacom->tool[idx], 0); + input_report_abs(input, ABS_MISC, 0); /* reset tool id */ + input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); + wacom->id[idx] = 0; +} + static int wacom_intuos_inout(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; @@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) if (!wacom->id[idx]) return 1; - /* - * Reset all states otherwise we lose the initial states - * when in-prox next time - */ - input_report_abs(input, ABS_X, 0); - input_report_abs(input, ABS_Y, 0); - input_report_abs(input, ABS_DISTANCE, 0); - input_report_abs(input, ABS_TILT_X, 0); - input_report_abs(input, ABS_TILT_Y, 0); - if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { - input_report_key(input, BTN_LEFT, 0); - input_report_key(input, BTN_MIDDLE, 0); - input_report_key(input, BTN_RIGHT, 0); - input_report_key(input, BTN_SIDE, 0); - input_report_key(input, BTN_EXTRA, 0); - input_report_abs(input, ABS_THROTTLE, 0); - input_report_abs(input, ABS_RZ, 0); - } else { - input_report_abs(input, ABS_PRESSURE, 0); - input_report_key(input, BTN_STYLUS, 0); - input_report_key(input, BTN_STYLUS2, 0); - input_report_key(input, BTN_TOUCH, 0); - input_report_abs(input, ABS_WHEEL, 0); - if (features->type >= INTUOS3S) - input_report_abs(input, ABS_Z, 0); - } - input_report_key(input, wacom->tool[idx], 0); - input_report_abs(input, ABS_MISC, 0); /* reset tool id */ - input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); - wacom->id[idx] = 0; + wacom_exit_report(wacom); return 2; } @@ -1235,6 +1245,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) if (!valid) continue; + if (!prox) { + wacom->shared->stylus_in_proximity = false; + wacom_exit_report(wacom); + input_sync(pen_input); + return; + } if (range) { input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 051a72eecb24..d2cc55e21374 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -40,6 +40,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #endif +#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB +#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0 +#endif + /* CPUID function 0x80000001, ebx */ #define CPUID_PKGTYPE_MASK 0xf0000000 #define CPUID_PKGTYPE_F 0x00000000 @@ -72,6 +76,7 @@ struct k10temp_data { struct pci_dev *pdev; void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); int temp_offset; + u32 temp_adjust_mask; }; struct tctl_offset { @@ -84,6 +89,7 @@ static const struct tctl_offset tctl_offset_table[] = { { 0x17, "AMD Ryzen 5 1600X", 20000 }, { 0x17, "AMD Ryzen 7 1700X", 20000 }, { 0x17, "AMD Ryzen 7 1800X", 20000 }, + { 0x17, "AMD Ryzen 7 2700X", 10000 }, { 0x17, "AMD Ryzen Threadripper 1950X", 27000 }, { 0x17, "AMD Ryzen Threadripper 1920X", 27000 }, { 0x17, "AMD Ryzen Threadripper 1900X", 27000 }, @@ -129,6 +135,8 @@ static ssize_t temp1_input_show(struct device *dev, data->read_tempreg(data->pdev, ®val); temp = (regval >> 21) * 125; + if (regval & data->temp_adjust_mask) + temp -= 49000; if (temp > data->temp_offset) temp -= data->temp_offset; else @@ -259,12 +267,14 @@ static int k10temp_probe(struct pci_dev *pdev, data->pdev = pdev; if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || - boot_cpu_data.x86_model == 0x70)) + boot_cpu_data.x86_model == 0x70)) { data->read_tempreg = read_tempreg_nb_f15; - else if (boot_cpu_data.x86 == 0x17) + } else if (boot_cpu_data.x86 == 0x17) { + data->temp_adjust_mask = 0x80000; data->read_tempreg = read_tempreg_nb_f17; - else + } else { data->read_tempreg = read_tempreg_pci; + } for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { const struct tctl_offset *entry = &tctl_offset_table[i]; @@ -292,6 +302,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c index 8b0bc4fc06e8..b0bc77bf2cd9 100644 --- a/drivers/hwmon/nct6683.c +++ b/drivers/hwmon/nct6683.c @@ -1380,8 +1380,8 @@ static int __init nct6683_find(int sioaddr, struct nct6683_sio_data *sio_data) /* Activate logical device if needed */ val = superio_inb(sioaddr, SIO_REG_ENABLE); if (!(val & 0x01)) { - pr_err("EC is disabled\n"); - goto fail; + pr_warn("Forcibly enabling EC access. Data may be unusable.\n"); + superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01); } superio_exit(sioaddr); diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c index 363bf56eb0f2..91976b6ca300 100644 --- a/drivers/hwmon/scmi-hwmon.c +++ b/drivers/hwmon/scmi-hwmon.c @@ -170,7 +170,10 @@ static int scmi_hwmon_probe(struct scmi_device *sdev) scmi_chip_info.info = ptr_scmi_ci; chip_info = &scmi_chip_info; - for (type = 0; type < hwmon_max && nr_count[type]; type++) { + for (type = 0; type < hwmon_max; type++) { + if (!nr_count[type]) + continue; + scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type], type, hwmon_attributes[type]); *ptr_scmi_ci++ = scmi_hwmon_chan++; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index c4865b08d7fb..8d21b9825d71 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -707,7 +707,6 @@ config I2C_MPC config I2C_MT65XX tristate "MediaTek I2C adapter" depends on ARCH_MEDIATEK || COMPILE_TEST - depends on HAS_DMA help This selects the MediaTek(R) Integrated Inter Circuit bus driver for MT65xx and MT81xx. @@ -885,7 +884,6 @@ config I2C_SH7760 config I2C_SH_MOBILE tristate "SuperH Mobile I2C Controller" - depends on HAS_DMA depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST help If you say yes to this option, support will be included for the @@ -1098,7 +1096,6 @@ config I2C_XLP9XX config I2C_RCAR tristate "Renesas R-Car I2C Controller" - depends on HAS_DMA depends on ARCH_RENESAS || COMPILE_TEST select I2C_SLAVE help diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index 25fcc3c1e32b..4053259bccb8 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c @@ -86,6 +86,7 @@ struct sprd_i2c { u32 count; int irq; int err; + bool is_suspended; }; static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count) @@ -283,6 +284,9 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap, struct sprd_i2c *i2c_dev = i2c_adap->algo_data; int im, ret; + if (i2c_dev->is_suspended) + return -EBUSY; + ret = pm_runtime_get_sync(i2c_dev->dev); if (ret < 0) return ret; @@ -364,13 +368,12 @@ static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id) struct sprd_i2c *i2c_dev = dev_id; struct i2c_msg *msg = i2c_dev->msg; bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); u32 i2c_tran; if (msg->flags & I2C_M_RD) i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; else - i2c_tran = i2c_count; + i2c_tran = i2c_dev->count; /* * If we got one ACK from slave when writing data, and we did not @@ -408,14 +411,13 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id) { struct sprd_i2c *i2c_dev = dev_id; struct i2c_msg *msg = i2c_dev->msg; - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); u32 i2c_tran; if (msg->flags & I2C_M_RD) i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; else - i2c_tran = i2c_count; + i2c_tran = i2c_dev->count; /* * If we did not get one ACK from slave when writing data, then we @@ -586,11 +588,23 @@ static int sprd_i2c_remove(struct platform_device *pdev) static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev) { + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); + + i2c_lock_adapter(&i2c_dev->adap); + i2c_dev->is_suspended = true; + i2c_unlock_adapter(&i2c_dev->adap); + return pm_runtime_force_suspend(pdev); } static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev) { + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); + + i2c_lock_adapter(&i2c_dev->adap); + i2c_dev->is_suspended = false; + i2c_unlock_adapter(&i2c_dev->adap); + return pm_runtime_force_resume(pdev); } diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 036a03f0d0a6..1667b6e7674f 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -280,7 +280,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, */ if (msgs[i].flags & I2C_M_RECV_LEN) { if (!(msgs[i].flags & I2C_M_RD) || - msgs[i].buf[0] < 1 || + msgs[i].len < 1 || msgs[i].buf[0] < 1 || msgs[i].len < msgs[i].buf[0] + I2C_SMBUS_BLOCK_MAX) { res = -EINVAL; diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index ee270e065ba9..2a972ed6851b 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -61,9 +61,12 @@ config INFINIBAND_ON_DEMAND_PAGING pages on demand instead. config INFINIBAND_ADDR_TRANS - bool + bool "RDMA/CM" depends on INFINIBAND default y + ---help--- + Support for RDMA communication manager (CM). + This allows for a generic connection abstraction over RDMA. config INFINIBAND_ADDR_TRANS_CONFIGFS bool diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index e337b08de2ff..fb2d347f760f 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -291,14 +291,18 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, * so lookup free slot only if requested. */ if (pempty && empty < 0) { - if (data->props & GID_TABLE_ENTRY_INVALID) { - /* Found an invalid (free) entry; allocate it */ - if (data->props & GID_TABLE_ENTRY_DEFAULT) { - if (default_gid) - empty = curr_index; - } else { - empty = curr_index; - } + if (data->props & GID_TABLE_ENTRY_INVALID && + (default_gid == + !!(data->props & GID_TABLE_ENTRY_DEFAULT))) { + /* + * Found an invalid (free) entry; allocate it. + * If default GID is requested, then our + * found slot must be one of the DEFAULT + * reserved slots or we fail. + * This ensures that only DEFAULT reserved + * slots are used for default property GIDs. + */ + empty = curr_index; } } @@ -420,8 +424,10 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, return ret; } -int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, - union ib_gid *gid, struct ib_gid_attr *attr) +static int +_ib_cache_gid_del(struct ib_device *ib_dev, u8 port, + union ib_gid *gid, struct ib_gid_attr *attr, + unsigned long mask, bool default_gid) { struct ib_gid_table *table; int ret = 0; @@ -431,11 +437,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, mutex_lock(&table->lock); - ix = find_gid(table, gid, attr, false, - GID_ATTR_FIND_MASK_GID | - GID_ATTR_FIND_MASK_GID_TYPE | - GID_ATTR_FIND_MASK_NETDEV, - NULL); + ix = find_gid(table, gid, attr, default_gid, mask, NULL); if (ix < 0) { ret = -EINVAL; goto out_unlock; @@ -452,6 +454,17 @@ out_unlock: return ret; } +int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, + union ib_gid *gid, struct ib_gid_attr *attr) +{ + unsigned long mask = GID_ATTR_FIND_MASK_GID | + GID_ATTR_FIND_MASK_GID_TYPE | + GID_ATTR_FIND_MASK_DEFAULT | + GID_ATTR_FIND_MASK_NETDEV; + + return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false); +} + int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, struct net_device *ndev) { @@ -728,7 +741,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, unsigned long gid_type_mask, enum ib_cache_gid_default_mode mode) { - union ib_gid gid; + union ib_gid gid = { }; struct ib_gid_attr gid_attr; struct ib_gid_table *table; unsigned int gid_type; @@ -736,7 +749,9 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; - make_default_gid(ndev, &gid); + mask = GID_ATTR_FIND_MASK_GID_TYPE | + GID_ATTR_FIND_MASK_DEFAULT | + GID_ATTR_FIND_MASK_NETDEV; memset(&gid_attr, 0, sizeof(gid_attr)); gid_attr.ndev = ndev; @@ -747,12 +762,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, gid_attr.gid_type = gid_type; if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) { - mask = GID_ATTR_FIND_MASK_GID_TYPE | - GID_ATTR_FIND_MASK_DEFAULT; + make_default_gid(ndev, &gid); __ib_cache_gid_add(ib_dev, port, &gid, &gid_attr, mask, true); } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) { - ib_cache_gid_del(ib_dev, port, &gid, &gid_attr); + _ib_cache_gid_del(ib_dev, port, &gid, + &gid_attr, mask, true); } } } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 51a641002e10..a693fcd4c513 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -382,6 +382,8 @@ struct cma_hdr { #define CMA_VERSION 0x00 struct cma_req_info { + struct sockaddr_storage listen_addr_storage; + struct sockaddr_storage src_addr_storage; struct ib_device *device; int port; union ib_gid local_gid; @@ -866,7 +868,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; - union ib_gid sgid; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { @@ -889,12 +890,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, if (ret) goto out; - ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, - rdma_ah_read_grh(&qp_attr.ah_attr)->sgid_index, - &sgid, NULL); - if (ret) - goto out; - BUG_ON(id_priv->cma_dev->device != id_priv->id.device); if (conn_param) @@ -1340,11 +1335,11 @@ static bool validate_net_dev(struct net_device *net_dev, } static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, - const struct cma_req_info *req) + struct cma_req_info *req) { - struct sockaddr_storage listen_addr_storage, src_addr_storage; - struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, - *src_addr = (struct sockaddr *)&src_addr_storage; + struct sockaddr *listen_addr = + (struct sockaddr *)&req->listen_addr_storage; + struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; struct net_device *net_dev; const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; int err; @@ -1359,11 +1354,6 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, if (!net_dev) return ERR_PTR(-ENODEV); - if (!validate_net_dev(net_dev, listen_addr, src_addr)) { - dev_put(net_dev); - return ERR_PTR(-EHOSTUNREACH); - } - return net_dev; } @@ -1490,15 +1480,51 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, } } + /* + * Net namespace might be getting deleted while route lookup, + * cm_id lookup is in progress. Therefore, perform netdevice + * validation, cm_id lookup under rcu lock. + * RCU lock along with netdevice state check, synchronizes with + * netdevice migrating to different net namespace and also avoids + * case where net namespace doesn't get deleted while lookup is in + * progress. + * If the device state is not IFF_UP, its properties such as ifindex + * and nd_net cannot be trusted to remain valid without rcu lock. + * net/core/dev.c change_net_namespace() ensures to synchronize with + * ongoing operations on net device after device is closed using + * synchronize_net(). + */ + rcu_read_lock(); + if (*net_dev) { + /* + * If netdevice is down, it is likely that it is administratively + * down or it might be migrating to different namespace. + * In that case avoid further processing, as the net namespace + * or ifindex may change. + */ + if (((*net_dev)->flags & IFF_UP) == 0) { + id_priv = ERR_PTR(-EHOSTUNREACH); + goto err; + } + + if (!validate_net_dev(*net_dev, + (struct sockaddr *)&req.listen_addr_storage, + (struct sockaddr *)&req.src_addr_storage)) { + id_priv = ERR_PTR(-EHOSTUNREACH); + goto err; + } + } + bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, rdma_ps_from_service_id(req.service_id), cma_port_from_service_id(req.service_id)); id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); +err: + rcu_read_unlock(); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; } - return id_priv; } diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 9821ae900f6d..da12da1c36f6 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -114,7 +114,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, struct sockaddr_storage *mapped_sockaddr, u8 nl_client) { - struct hlist_head *hash_bucket_head; + struct hlist_head *hash_bucket_head = NULL; struct iwpm_mapping_info *map_info; unsigned long flags; int ret = -EINVAL; @@ -142,6 +142,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, } } spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); + + if (!hash_bucket_head) + kfree(map_info); return ret; } diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index c50596f7f98a..b28452a55a08 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -59,7 +59,7 @@ module_param_named(recv_queue_size, mad_recvq_size, int, 0444); MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); static struct list_head ib_mad_port_list; -static u32 ib_mad_client_id = 0; +static atomic_t ib_mad_client_id = ATOMIC_INIT(0); /* Port list lock */ static DEFINE_SPINLOCK(ib_mad_port_list_lock); @@ -377,7 +377,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, } spin_lock_irqsave(&port_priv->reg_lock, flags); - mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; + mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id); /* * Make sure MAD registration (if supplied) diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index cc2966380c0c..c0e4fd55e2cc 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -255,6 +255,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, struct net_device *rdma_ndev) { struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev); + unsigned long gid_type_mask; if (!rdma_ndev) return; @@ -264,21 +265,22 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, rcu_read_lock(); - if (rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) && - is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) == - BONDING_SLAVE_STATE_INACTIVE) { - unsigned long gid_type_mask; - + if (((rdma_ndev != event_ndev && + !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) || + is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) + == + BONDING_SLAVE_STATE_INACTIVE)) { rcu_read_unlock(); + return; + } - gid_type_mask = roce_gid_type_mask_support(ib_dev, port); + rcu_read_unlock(); - ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, - gid_type_mask, - IB_CACHE_GID_DEFAULT_MODE_DELETE); - } else { - rcu_read_unlock(); - } + gid_type_mask = roce_gid_type_mask_support(ib_dev, port); + + ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, + gid_type_mask, + IB_CACHE_GID_DEFAULT_MODE_DELETE); } static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 74329483af6d..eab43b17e9cf 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -159,6 +159,23 @@ static void ucma_put_ctx(struct ucma_context *ctx) complete(&ctx->comp); } +/* + * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the + * CM_ID is bound. + */ +static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id) +{ + struct ucma_context *ctx = ucma_get_ctx(file, id); + + if (IS_ERR(ctx)) + return ctx; + if (!ctx->cm_id->device) { + ucma_put_ctx(ctx); + return ERR_PTR(-EINVAL); + } + return ctx; +} + static void ucma_close_event_id(struct work_struct *work) { struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work); @@ -683,7 +700,7 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - if (!rdma_addr_size_in6(&cmd.src_addr) || + if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) || !rdma_addr_size_in6(&cmd.dst_addr)) return -EINVAL; @@ -734,7 +751,7 @@ static ssize_t ucma_resolve_route(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1050,7 +1067,7 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, if (!cmd.conn_param.valid) return -EINVAL; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1092,7 +1109,7 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1120,7 +1137,7 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1139,7 +1156,7 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1167,15 +1184,10 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, if (cmd.qp_state > IB_QPS_ERR) return -EINVAL; - ctx = ucma_get_ctx(file, cmd.id); + ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - if (!ctx->cm_id->device) { - ret = -EINVAL; - goto out; - } - resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; @@ -1316,13 +1328,13 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) - return -EINVAL; - optval = memdup_user(u64_to_user_ptr(cmd.optval), cmd.optlen); if (IS_ERR(optval)) { @@ -1384,7 +1396,7 @@ static ssize_t ucma_process_join(struct ucma_file *file, else return -EINVAL; - ctx = ucma_get_ctx(file, cmd->id); + ctx = ucma_get_ctx_dev(file, cmd->id); if (IS_ERR(ctx)) return PTR_ERR(ctx); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 13cb5e4deb86..21a887c9523b 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -691,6 +691,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, mr->device = pd->device; mr->pd = pd; + mr->dm = NULL; mr->uobject = uobj; atomic_inc(&pd->usecnt); mr->res.type = RDMA_RESTRACK_MR; @@ -765,6 +766,11 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, mr = uobj->object; + if (mr->dm) { + ret = -EINVAL; + goto put_uobjs; + } + if (cmd.flags & IB_MR_REREG_ACCESS) { ret = ib_check_mr_access(cmd.access_flags); if (ret) diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 8c93970dc8f1..8d32c4ae368c 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -234,6 +234,15 @@ static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *met return -EINVAL; } + for (; i < method_spec->num_buckets; i++) { + struct uverbs_attr_spec_hash *attr_spec_bucket = + method_spec->attr_buckets[i]; + + if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask, + attr_spec_bucket->num_attrs)) + return -EINVAL; + } + return 0; } diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c index cbcec3da12f6..b4f016dfa23d 100644 --- a/drivers/infiniband/core/uverbs_std_types_flow_action.c +++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c @@ -363,28 +363,28 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(struct ib_device static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = { [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = { - .ptr = { + { .ptr = { .type = UVERBS_ATTR_TYPE_PTR_IN, UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_keymat_aes_gcm), .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO, - }, + } }, }, }; static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = { [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = { - .ptr = { + { .ptr = { .type = UVERBS_ATTR_TYPE_PTR_IN, /* No need to specify any data */ .len = 0, - } + } } }, [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = { - .ptr = { + { .ptr = { .type = UVERBS_ATTR_TYPE_PTR_IN, UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, size), .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO, - } + } } }, }; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 7eff3aeffe01..6ddfb1fade79 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1656,6 +1656,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd, if (!IS_ERR(mr)) { mr->device = pd->device; mr->pd = pd; + mr->dm = NULL; mr->uobject = NULL; atomic_inc(&pd->usecnt); mr->need_inval = false; diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 6f2b26126c64..2be2e1ac1b5f 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -315,7 +315,7 @@ static void advance_oldest_read(struct t4_wq *wq) * Deal with out-of-order and/or completions that complete * prior unsignalled WRs. */ -void c4iw_flush_hw_cq(struct c4iw_cq *chp) +void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp) { struct t4_cqe *hw_cqe, *swcqe, read_cqe; struct c4iw_qp *qhp; @@ -339,6 +339,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) if (qhp == NULL) goto next_cqe; + if (flush_qhp != qhp) { + spin_lock(&qhp->lock); + + if (qhp->wq.flushed == 1) + goto next_cqe; + } + if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) goto next_cqe; @@ -390,6 +397,8 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) next_cqe: t4_hwcq_consume(&chp->cq); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); + if (qhp && flush_qhp != qhp) + spin_unlock(&qhp->lock); } } diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index feeb8ee6f4a2..44161ca4d2a8 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -875,6 +875,11 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->status_page->db_off = 0; + init_completion(&rdev->rqt_compl); + init_completion(&rdev->pbl_compl); + kref_init(&rdev->rqt_kref); + kref_init(&rdev->pbl_kref); + return 0; err_free_status_page_and_wr_log: if (c4iw_wr_log && rdev->wr_log) @@ -893,13 +898,15 @@ destroy_resource: static void c4iw_rdev_close(struct c4iw_rdev *rdev) { - destroy_workqueue(rdev->free_workq); kfree(rdev->wr_log); c4iw_release_dev_ucontext(rdev, &rdev->uctx); free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); + wait_for_completion(&rdev->pbl_compl); + wait_for_completion(&rdev->rqt_compl); c4iw_ocqp_pool_destroy(rdev); + destroy_workqueue(rdev->free_workq); c4iw_destroy_resource(&rdev->resource); } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index cc929002c05e..831027717121 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -185,6 +185,10 @@ struct c4iw_rdev { struct wr_log_entry *wr_log; int wr_log_size; struct workqueue_struct *free_workq; + struct completion rqt_compl; + struct completion pbl_compl; + struct kref rqt_kref; + struct kref pbl_kref; }; static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) @@ -1049,7 +1053,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size); void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size); void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size); -void c4iw_flush_hw_cq(struct c4iw_cq *chp); +void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp); void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index de77b6027d69..ae167b686608 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1343,12 +1343,12 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, qhp->wq.flushed = 1; t4_set_wq_in_error(&qhp->wq); - c4iw_flush_hw_cq(rchp); + c4iw_flush_hw_cq(rchp, qhp); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); if (schp != rchp) - c4iw_flush_hw_cq(schp); + c4iw_flush_hw_cq(schp, qhp); sq_flushed = c4iw_flush_sq(qhp); spin_unlock(&qhp->lock); diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c index 3cf25997ed2b..0ef25ae05e6f 100644 --- a/drivers/infiniband/hw/cxgb4/resource.c +++ b/drivers/infiniband/hw/cxgb4/resource.c @@ -260,12 +260,22 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); if (rdev->stats.pbl.cur > rdev->stats.pbl.max) rdev->stats.pbl.max = rdev->stats.pbl.cur; + kref_get(&rdev->pbl_kref); } else rdev->stats.pbl.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } +static void destroy_pblpool(struct kref *kref) +{ + struct c4iw_rdev *rdev; + + rdev = container_of(kref, struct c4iw_rdev, pbl_kref); + gen_pool_destroy(rdev->pbl_pool); + complete(&rdev->pbl_compl); +} + void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { pr_debug("addr 0x%x size %d\n", addr, size); @@ -273,6 +283,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); + kref_put(&rdev->pbl_kref, destroy_pblpool); } int c4iw_pblpool_create(struct c4iw_rdev *rdev) @@ -310,7 +321,7 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev) void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) { - gen_pool_destroy(rdev->pbl_pool); + kref_put(&rdev->pbl_kref, destroy_pblpool); } /* @@ -331,12 +342,22 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); if (rdev->stats.rqt.cur > rdev->stats.rqt.max) rdev->stats.rqt.max = rdev->stats.rqt.cur; + kref_get(&rdev->rqt_kref); } else rdev->stats.rqt.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } +static void destroy_rqtpool(struct kref *kref) +{ + struct c4iw_rdev *rdev; + + rdev = container_of(kref, struct c4iw_rdev, rqt_kref); + gen_pool_destroy(rdev->rqt_pool); + complete(&rdev->rqt_compl); +} + void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { pr_debug("addr 0x%x size %d\n", addr, size << 6); @@ -344,6 +365,7 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); + kref_put(&rdev->rqt_kref, destroy_rqtpool); } int c4iw_rqtpool_create(struct c4iw_rdev *rdev) @@ -380,7 +402,7 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev) void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) { - gen_pool_destroy(rdev->rqt_pool); + kref_put(&rdev->rqt_kref, destroy_rqtpool); } /* diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index a97055dd4fbd..b5fab55cc275 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -412,7 +412,6 @@ static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix) static int get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) { - int ret; cpumask_var_t diff; struct hfi1_affinity_node *entry; struct cpu_mask_set *set = NULL; @@ -424,10 +423,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd, extra[0] = '\0'; cpumask_clear(&msix->mask); - ret = zalloc_cpumask_var(&diff, GFP_KERNEL); - if (!ret) - return -ENOMEM; - entry = node_affinity_lookup(dd->node); switch (msix->type) { @@ -458,6 +453,9 @@ static int get_irq_affinity(struct hfi1_devdata *dd, * finds its CPU here. */ if (cpu == -1 && set) { + if (!zalloc_cpumask_var(&diff, GFP_KERNEL)) + return -ENOMEM; + if (cpumask_equal(&set->mask, &set->used)) { /* * We've used up all the CPUs, bump up the generation @@ -469,6 +467,8 @@ static int get_irq_affinity(struct hfi1_devdata *dd, cpumask_andnot(diff, &set->mask, &set->used); cpu = cpumask_first(diff); cpumask_set_cpu(cpu, &set->used); + + free_cpumask_var(diff); } cpumask_set_cpu(cpu, &msix->mask); @@ -482,7 +482,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd, hfi1_setup_sdma_notifier(msix); } - free_cpumask_var(diff); return 0; } diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 46d1475b2154..bd837a048bf4 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -433,31 +433,43 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, bool do_cnp) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct ib_other_headers *ohdr = pkt->ohdr; struct ib_grh *grh = pkt->grh; u32 rqpn = 0, bth1; - u16 pkey, rlid, dlid = ib_get_dlid(pkt->hdr); + u16 pkey; + u32 rlid, slid, dlid = 0; u8 hdr_type, sc, svc_type; bool is_mcast = false; + /* can be called from prescan */ if (pkt->etype == RHF_RCV_TYPE_BYPASS) { is_mcast = hfi1_is_16B_mcast(dlid); pkey = hfi1_16B_get_pkey(pkt->hdr); sc = hfi1_16B_get_sc(pkt->hdr); + dlid = hfi1_16B_get_dlid(pkt->hdr); + slid = hfi1_16B_get_slid(pkt->hdr); hdr_type = HFI1_PKT_TYPE_16B; } else { is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); pkey = ib_bth_get_pkey(ohdr); sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf); + dlid = ib_get_dlid(pkt->hdr); + slid = ib_get_slid(pkt->hdr); hdr_type = HFI1_PKT_TYPE_9B; } switch (qp->ibqp.qp_type) { + case IB_QPT_UD: + dlid = ppd->lid; + rlid = slid; + rqpn = ib_get_sqpn(pkt->ohdr); + svc_type = IB_CC_SVCTYPE_UD; + break; case IB_QPT_SMI: case IB_QPT_GSI: - case IB_QPT_UD: - rlid = ib_get_slid(pkt->hdr); + rlid = slid; rqpn = ib_get_sqpn(pkt->ohdr); svc_type = IB_CC_SVCTYPE_UD; break; @@ -482,7 +494,6 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, dlid, rlid, sc, grh); if (!is_mcast && (bth1 & IB_BECN_SMASK)) { - struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 lqpn = bth1 & RVT_QPN_MASK; u8 sl = ibp->sc_to_sl[sc]; diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 32c48265405e..cac2c62bc42d 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1537,13 +1537,13 @@ void set_link_ipg(struct hfi1_pportdata *ppd); void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn, u32 rqpn, u8 svc_type); void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, - u32 pkey, u32 slid, u32 dlid, u8 sc5, + u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh); void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, - u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, + u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh); typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp, - u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, + u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh); #define PKEY_CHECK_INVALID -1 @@ -2437,7 +2437,7 @@ static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr, ((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT); lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) | ((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT); - lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | (pkey << OPA_16B_PKEY_SHIFT); + lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | ((u32)pkey << OPA_16B_PKEY_SHIFT); lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4; hdr->lrh[0] = lrh0; diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 33eba2356742..6309edf811df 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -88,9 +88,9 @@ * pio buffers per ctxt, etc.) Zero means use one user context per CPU. */ int num_user_contexts = -1; -module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO); +module_param_named(num_user_contexts, num_user_contexts, int, 0444); MODULE_PARM_DESC( - num_user_contexts, "Set max number of user contexts to use"); + num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)"); uint krcvqs[RXE_NUM_DATA_VL]; int krcvqsset; @@ -1209,30 +1209,49 @@ static void finalize_asic_data(struct hfi1_devdata *dd, kfree(ad); } -static void __hfi1_free_devdata(struct kobject *kobj) +/** + * hfi1_clean_devdata - cleans up per-unit data structure + * @dd: pointer to a valid devdata structure + * + * It cleans up all data structures set up by + * by hfi1_alloc_devdata(). + */ +static void hfi1_clean_devdata(struct hfi1_devdata *dd) { - struct hfi1_devdata *dd = - container_of(kobj, struct hfi1_devdata, kobj); struct hfi1_asic_data *ad; unsigned long flags; spin_lock_irqsave(&hfi1_devs_lock, flags); - idr_remove(&hfi1_unit_table, dd->unit); - list_del(&dd->list); + if (!list_empty(&dd->list)) { + idr_remove(&hfi1_unit_table, dd->unit); + list_del_init(&dd->list); + } ad = release_asic_data(dd); spin_unlock_irqrestore(&hfi1_devs_lock, flags); - if (ad) - finalize_asic_data(dd, ad); + + finalize_asic_data(dd, ad); free_platform_config(dd); rcu_barrier(); /* wait for rcu callbacks to complete */ free_percpu(dd->int_counter); free_percpu(dd->rcv_limit); free_percpu(dd->send_schedule); free_percpu(dd->tx_opstats); + dd->int_counter = NULL; + dd->rcv_limit = NULL; + dd->send_schedule = NULL; + dd->tx_opstats = NULL; sdma_clean(dd, dd->num_sdma); rvt_dealloc_device(&dd->verbs_dev.rdi); } +static void __hfi1_free_devdata(struct kobject *kobj) +{ + struct hfi1_devdata *dd = + container_of(kobj, struct hfi1_devdata, kobj); + + hfi1_clean_devdata(dd); +} + static struct kobj_type hfi1_devdata_type = { .release = __hfi1_free_devdata, }; @@ -1265,6 +1284,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) return ERR_PTR(-ENOMEM); dd->num_pports = nports; dd->pport = (struct hfi1_pportdata *)(dd + 1); + dd->pcidev = pdev; + pci_set_drvdata(pdev, dd); INIT_LIST_HEAD(&dd->list); idr_preload(GFP_KERNEL); @@ -1331,9 +1352,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) return dd; bail: - if (!list_empty(&dd->list)) - list_del_init(&dd->list); - rvt_dealloc_device(&dd->verbs_dev.rdi); + hfi1_clean_devdata(dd); return ERR_PTR(ret); } diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 83d66e862207..c1c982908b4b 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -163,9 +163,6 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) resource_size_t addr; int ret = 0; - dd->pcidev = pdev; - pci_set_drvdata(pdev, dd); - addr = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c index d486355880cb..cbf7faa5038c 100644 --- a/drivers/infiniband/hw/hfi1/platform.c +++ b/drivers/infiniband/hw/hfi1/platform.c @@ -199,6 +199,7 @@ void free_platform_config(struct hfi1_devdata *dd) { /* Release memory allocated for eprom or fallback file read. */ kfree(dd->platform_config.data); + dd->platform_config.data = NULL; } void get_port_type(struct hfi1_pportdata *ppd) diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c index 1869f639c3ae..b5966991d647 100644 --- a/drivers/infiniband/hw/hfi1/qsfp.c +++ b/drivers/infiniband/hw/hfi1/qsfp.c @@ -204,6 +204,8 @@ static void clean_i2c_bus(struct hfi1_i2c_bus *bus) void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad) { + if (!ad) + return; clean_i2c_bus(ad->i2c_bus0); ad->i2c_bus0 = NULL; clean_i2c_bus(ad->i2c_bus1); diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index 3daa94bdae3a..c0071ca4147a 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -733,6 +733,20 @@ static inline void hfi1_make_ruc_bth(struct rvt_qp *qp, ohdr->bth[2] = cpu_to_be32(bth2); } +/** + * hfi1_make_ruc_header_16B - build a 16B header + * @qp: the queue pair + * @ohdr: a pointer to the destination header memory + * @bth0: bth0 passed in from the RC/UC builder + * @bth2: bth2 passed in from the RC/UC builder + * @middle: non zero implies indicates ahg "could" be used + * @ps: the current packet state + * + * This routine may disarm ahg under these situations: + * - packet needs a GRH + * - BECN needed + * - migration state not IB_MIG_MIGRATED + */ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth2, int middle, @@ -777,6 +791,12 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, else middle = 0; + if (qp->s_flags & RVT_S_ECN) { + qp->s_flags &= ~RVT_S_ECN; + /* we recently received a FECN, so return a BECN */ + becn = true; + middle = 0; + } if (middle) build_ahg(qp, bth2); else @@ -784,11 +804,6 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, bth0 |= pkey; bth0 |= extra_bytes << 20; - if (qp->s_flags & RVT_S_ECN) { - qp->s_flags &= ~RVT_S_ECN; - /* we recently received a FECN, so return a BECN */ - becn = true; - } hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); if (!ppd->lid) @@ -806,6 +821,20 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, pkey, becn, 0, l4, priv->s_sc); } +/** + * hfi1_make_ruc_header_9B - build a 9B header + * @qp: the queue pair + * @ohdr: a pointer to the destination header memory + * @bth0: bth0 passed in from the RC/UC builder + * @bth2: bth2 passed in from the RC/UC builder + * @middle: non zero implies indicates ahg "could" be used + * @ps: the current packet state + * + * This routine may disarm ahg under these situations: + * - packet needs a GRH + * - BECN needed + * - migration state not IB_MIG_MIGRATED + */ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth2, int middle, @@ -839,6 +868,12 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, else middle = 0; + if (qp->s_flags & RVT_S_ECN) { + qp->s_flags &= ~RVT_S_ECN; + /* we recently received a FECN, so return a BECN */ + bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT); + middle = 0; + } if (middle) build_ahg(qp, bth2); else @@ -846,11 +881,6 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, bth0 |= pkey; bth0 |= extra_bytes << 20; - if (qp->s_flags & RVT_S_ECN) { - qp->s_flags &= ~RVT_S_ECN; - /* we recently received a FECN, so return a BECN */ - bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT); - } hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh, lrh0, diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index bcf3b0bebac8..69c17a5ef038 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -628,7 +628,7 @@ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey) } void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, - u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, + u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh) { u64 pbc, pbc_flags = 0; @@ -687,7 +687,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, } void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, - u32 pkey, u32 slid, u32 dlid, u8 sc5, + u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh) { u64 pbc, pbc_flags = 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 0eeabfbee192..63b5b3edabcb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -912,7 +912,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, obj_per_chunk = buf_chunk_size / obj_size; num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; bt_chunk_num = bt_chunk_size / 8; - if (table->type >= HEM_TYPE_MTT) + if (type >= HEM_TYPE_MTT) num_bt_l0 = bt_chunk_num; table->hem = kcalloc(num_hem, sizeof(*table->hem), @@ -920,7 +920,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, if (!table->hem) goto err_kcalloc_hem_buf; - if (check_whether_bt_num_3(table->type, hop_num)) { + if (check_whether_bt_num_3(type, hop_num)) { unsigned long num_bt_l1; num_bt_l1 = (num_hem + bt_chunk_num - 1) / @@ -939,8 +939,8 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, goto err_kcalloc_l1_dma; } - if (check_whether_bt_num_2(table->type, hop_num) || - check_whether_bt_num_3(table->type, hop_num)) { + if (check_whether_bt_num_2(type, hop_num) || + check_whether_bt_num_3(type, hop_num)) { table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0), GFP_KERNEL); if (!table->bt_l0) @@ -1039,14 +1039,14 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) { hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); - hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); if (hr_dev->caps.trrl_entry_sz) hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.trrl_table); + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table); - hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_cqe_table); + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 8b84ab7800d8..25916e8522ed 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -71,6 +71,11 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr, return -EINVAL; } + if (wr->opcode == IB_WR_RDMA_READ) { + dev_err(hr_dev->dev, "Not support inline data!\n"); + return -EINVAL; + } + for (i = 0; i < wr->num_sge; i++) { memcpy(wqe, ((void *)wr->sg_list[i].addr), wr->sg_list[i].length); @@ -148,7 +153,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ibqp->qp_type != IB_QPT_GSI && ibqp->qp_type != IB_QPT_UD)) { dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type); - *bad_wr = NULL; + *bad_wr = wr; return -EOPNOTSUPP; } @@ -182,7 +187,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; - owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1; + owner_bit = + ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); /* Corresponding to the QP type, wqe process separately */ if (ibqp->qp_type == IB_QPT_GSI) { @@ -456,6 +462,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } else { dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type); spin_unlock_irqrestore(&qp->sq.lock, flags); + *bad_wr = wr; return -EOPNOTSUPP; } } @@ -2592,10 +2599,12 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, V2_QPC_BYTE_4_SQPN_S, 0); - roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, - V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn); - roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, - V2_QPC_BYTE_56_DQPN_S, 0); + if (attr_mask & IB_QP_DEST_QPN) { + roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, + V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn); + roce_set_field(qpc_mask->byte_56_dqpn_err, + V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0); + } roce_set_field(context->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, @@ -2650,8 +2659,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, return -EINVAL; } - if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) || - (attr_mask & IB_QP_PKEY_INDEX) || (attr_mask & IB_QP_QKEY)) { + if (attr_mask & IB_QP_ALT_PATH) { dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask); return -EINVAL; } @@ -2800,10 +2808,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, V2_QPC_BYTE_140_RR_MAX_S, 0); } - roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, - V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num); - roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, - V2_QPC_BYTE_56_DQPN_S, 0); + if (attr_mask & IB_QP_DEST_QPN) { + roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, + V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num); + roce_set_field(qpc_mask->byte_56_dqpn_err, + V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0); + } /* Configure GID index */ port_num = rdma_ah_get_port_num(&attr->ah_attr); @@ -2845,7 +2855,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, V2_QPC_BYTE_24_MTU_S, IB_MTU_4096); - else + else if (attr_mask & IB_QP_PATH_MTU) roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, V2_QPC_BYTE_24_MTU_S, attr->path_mtu); @@ -2922,11 +2932,9 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, return -EINVAL; } - /* If exist optional param, return error */ - if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) || - (attr_mask & IB_QP_QKEY) || (attr_mask & IB_QP_PATH_MIG_STATE) || - (attr_mask & IB_QP_CUR_STATE) || - (attr_mask & IB_QP_MIN_RNR_TIMER)) { + /* Not support alternate path and path migration */ + if ((attr_mask & IB_QP_ALT_PATH) || + (attr_mask & IB_QP_PATH_MIG_STATE)) { dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask); return -EINVAL; } @@ -3161,7 +3169,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) || - (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) { + (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) || + (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) { /* Nothing */ ; } else { @@ -4478,7 +4487,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) { - dev_err(dev, "[mailbox cmd] creat eqc failed.\n"); + dev_err(dev, "[mailbox cmd] create eqc failed.\n"); goto err_cmd_mbox; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index e289a924e789..d4aad34c21e2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -620,7 +620,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, to_hr_ucontext(ib_pd->uobject->context), ucmd.db_addr, &hr_qp->rdb); if (ret) { - dev_err(dev, "rp record doorbell map failed!\n"); + dev_err(dev, "rq record doorbell map failed!\n"); goto err_mtt; } } diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 17f4f151a97f..61d8b06375bb 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -346,7 +346,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, /* Add to the first block the misalignment that it suffers from. */ total_len += (first_block_start & ((1ULL << block_shift) - 1ULL)); last_block_end = current_block_start + current_block_len; - last_block_aligned_end = round_up(last_block_end, 1 << block_shift); + last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift); total_len += (last_block_aligned_end - last_block_end); if (total_len & ((1ULL << block_shift) - 1ULL)) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 50af8915e7ec..199648adac74 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -673,7 +673,8 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, MLX4_IB_RX_HASH_SRC_PORT_TCP | MLX4_IB_RX_HASH_DST_PORT_TCP | MLX4_IB_RX_HASH_SRC_PORT_UDP | - MLX4_IB_RX_HASH_DST_PORT_UDP)) { + MLX4_IB_RX_HASH_DST_PORT_UDP | + MLX4_IB_RX_HASH_INNER)) { pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", ucmd->rx_hash_fields_mask); return (-EOPNOTSUPP); diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig index bce263b92821..fb4d77be019b 100644 --- a/drivers/infiniband/hw/mlx5/Kconfig +++ b/drivers/infiniband/hw/mlx5/Kconfig @@ -1,6 +1,7 @@ config MLX5_INFINIBAND tristate "Mellanox Connect-IB HCA support" depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE + depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n ---help--- This driver provides low-level InfiniBand support for Mellanox Connect-IB PCI Express host channel adapters (HCAs). diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index daa919e5a442..b4d8ff8ab807 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -52,7 +52,6 @@ #include <linux/mlx5/port.h> #include <linux/mlx5/vport.h> #include <linux/mlx5/fs.h> -#include <linux/mlx5/fs_helpers.h> #include <linux/list.h> #include <rdma/ib_smi.h> #include <rdma/ib_umem.h> @@ -180,7 +179,7 @@ static int mlx5_netdev_event(struct notifier_block *this, if (rep_ndev == ndev) roce->netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev; - } else if (ndev->dev.parent == &ibdev->mdev->pdev->dev) { + } else if (ndev->dev.parent == &mdev->pdev->dev) { roce->netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev; } @@ -4757,7 +4756,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - return mlx5_get_vector_affinity(dev->mdev, comp_vector); + return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector); } /* The mlx5_ib_multiport_mutex should be held when calling this function */ @@ -5427,9 +5426,7 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev) { dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); - if (!dev->mdev->priv.uar) - return -ENOMEM; - return 0; + return PTR_ERR_OR_ZERO(dev->mdev->priv.uar); } static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 1520a2f20f98..90a9c461cedc 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -866,25 +866,28 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, int *order) { struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct ib_umem *u; int err; - *umem = ib_umem_get(pd->uobject->context, start, length, - access_flags, 0); - err = PTR_ERR_OR_ZERO(*umem); + *umem = NULL; + + u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0); + err = PTR_ERR_OR_ZERO(u); if (err) { - *umem = NULL; - mlx5_ib_err(dev, "umem get failed (%d)\n", err); + mlx5_ib_dbg(dev, "umem get failed (%d)\n", err); return err; } - mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, + mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, page_shift, ncont, order); if (!*npages) { mlx5_ib_warn(dev, "avoid zero region\n"); - ib_umem_release(*umem); + ib_umem_release(u); return -EINVAL; } + *umem = u; + mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", *npages, *ncont, *order, *page_shift); @@ -1458,13 +1461,12 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, int access_flags = flags & IB_MR_REREG_ACCESS ? new_access_flags : mr->access_flags; - u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address; - u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length; int page_shift = 0; int upd_flags = 0; int npages = 0; int ncont = 0; int order = 0; + u64 addr, len; int err; mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", @@ -1472,6 +1474,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); + if (!mr->umem) + return -EINVAL; + + if (flags & IB_MR_REREG_TRANS) { + addr = virt_addr; + len = length; + } else { + addr = mr->umem->address; + len = mr->umem->length; + } + if (flags != IB_MR_REREG_PD) { /* * Replace umem. This needs to be done whether or not UMR is @@ -1479,6 +1492,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, */ flags |= IB_MR_REREG_TRANS; ib_umem_release(mr->umem); + mr->umem = NULL; err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, &npages, &page_shift, &ncont, &order); if (err) diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7ed4b70f6447..87b7c1be2a11 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -259,7 +259,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, } else { if (ucmd) { qp->rq.wqe_cnt = ucmd->rq_wqe_count; + if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) + return -EINVAL; qp->rq.wqe_shift = ucmd->rq_wqe_shift; + if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig) + return -EINVAL; qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; qp->rq.max_post = qp->rq.wqe_cnt; } else { @@ -2451,18 +2455,18 @@ enum { static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) { - if (rate == IB_RATE_PORT_CURRENT) { + if (rate == IB_RATE_PORT_CURRENT) return 0; - } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { + + if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) return -EINVAL; - } else { - while (rate != IB_RATE_2_5_GBPS && - !(1 << (rate + MLX5_STAT_RATE_OFFSET) & - MLX5_CAP_GEN(dev->mdev, stat_rate_support))) - --rate; - } - return rate + MLX5_STAT_RATE_OFFSET; + while (rate != IB_RATE_PORT_CURRENT && + !(1 << (rate + MLX5_STAT_RATE_OFFSET) & + MLX5_CAP_GEN(dev->mdev, stat_rate_support))) + --rate; + + return rate ? rate + MLX5_STAT_RATE_OFFSET : rate; } static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 0a75164cedea..007d5e8a0121 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -461,7 +461,7 @@ static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev) /** * nes_netdev_start_xmit */ -static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c index 61927c165b59..4cf11063e0b5 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.c +++ b/drivers/infiniband/sw/rxe/rxe_opcode.c @@ -390,7 +390,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { .name = "IB_OPCODE_RC_SEND_ONLY_INV", .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_END_MASK, + | RXE_END_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_IETH_BYTES, .offset = { [RXE_BTH] = 0, diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 7bdaf71b8221..785199990457 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -728,7 +728,6 @@ next_wqe: rollback_state(wqe, qp, &rollback_wqe, rollback_psn); if (ret == -EAGAIN) { - kfree_skb(skb); rxe_run_task(&qp->req.task, 1); goto exit; } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index a65c9969f7fc..955ff3b6da9c 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -742,7 +742,6 @@ static enum resp_states read_reply(struct rxe_qp *qp, err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb); if (err) { pr_err("Failed sending RDMA reply.\n"); - kfree_skb(skb); return RESPST_ERR_RNR; } @@ -954,10 +953,8 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, } err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb); - if (err) { + if (err) pr_err_ratelimited("Failed sending ack\n"); - kfree_skb(skb); - } err1: return err; @@ -1141,7 +1138,6 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, if (rc) { pr_err("Failed resending result. This flow is not handled - skb ignored\n"); rxe_drop_ref(qp); - kfree_skb(skb_copy); rc = RESPST_CLEANUP; goto out; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 161ba8c76285..cf291f90b58f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1094,7 +1094,7 @@ drop_and_unlock: spin_unlock_irqrestore(&priv->lock, flags); } -static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); struct rdma_netdev *rn = netdev_priv(dev); diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig index c74ee9633041..99db8fe5173a 100644 --- a/drivers/infiniband/ulp/srp/Kconfig +++ b/drivers/infiniband/ulp/srp/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_SRP tristate "InfiniBand SCSI RDMA Protocol" - depends on SCSI + depends on SCSI && INFINIBAND_ADDR_TRANS select SCSI_SRP_ATTRS ---help--- Support for the SCSI RDMA Protocol over InfiniBand. This diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig index 31ee83d528d9..fb8b7182f05e 100644 --- a/drivers/infiniband/ulp/srpt/Kconfig +++ b/drivers/infiniband/ulp/srpt/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_SRPT tristate "InfiniBand SCSI RDMA Protocol target support" - depends on INFINIBAND && TARGET_CORE + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE ---help--- Support for the SCSI RDMA Protocol (SRP) Target driver. The diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 46115a392098..c81c79d01d93 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -31,6 +31,7 @@ enum evdev_clock_type { EV_CLK_REAL = 0, EV_CLK_MONO, + EV_CLK_BOOT, EV_CLK_MAX }; @@ -197,10 +198,12 @@ static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid) case CLOCK_REALTIME: clk_type = EV_CLK_REAL; break; - case CLOCK_BOOTTIME: case CLOCK_MONOTONIC: clk_type = EV_CLK_MONO; break; + case CLOCK_BOOTTIME: + clk_type = EV_CLK_BOOT; + break; default: return -EINVAL; } @@ -311,6 +314,8 @@ static void evdev_events(struct input_handle *handle, ev_time[EV_CLK_MONO] = ktime_get(); ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]); + ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO], + TK_OFFS_BOOT); rcu_read_lock(); diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c index 766bf2660116..5f04b2d94635 100644 --- a/drivers/input/input-leds.c +++ b/drivers/input/input-leds.c @@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler, const struct input_device_id *id) { struct input_leds *leds; + struct input_led *led; unsigned int num_leds; unsigned int led_code; int led_no; @@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler, led_no = 0; for_each_set_bit(led_code, dev->ledbit, LED_CNT) { - struct input_led *led = &leds->leds[led_no]; + if (!input_led_info[led_code].name) + continue; + led = &leds->leds[led_no]; led->handle = &leds->handle; led->code = led_code; - if (!input_led_info[led_code].name) - continue; - led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", dev_name(&dev->dev), input_led_info[led_code].name); diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 0a67f235ba88..38f9501acdf0 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -583,7 +583,7 @@ static void alps_process_trackstick_packet_v3(struct psmouse *psmouse) x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f)); y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f)); - z = packet[4] & 0x7c; + z = packet[4] & 0x7f; /* * The x and y values tend to be quite large, and when used diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c index 76edbf2c1bce..082defc329a8 100644 --- a/drivers/input/rmi4/rmi_spi.c +++ b/drivers/input/rmi4/rmi_spi.c @@ -147,8 +147,11 @@ static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi, if (len > RMI_SPI_XFER_SIZE_LIMIT) return -EINVAL; - if (rmi_spi->xfer_buf_size < len) - rmi_spi_manage_pools(rmi_spi, len); + if (rmi_spi->xfer_buf_size < len) { + ret = rmi_spi_manage_pools(rmi_spi, len); + if (ret < 0) + return ret; + } if (addr == 0) /* diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 4f15496fec8b..3e613afa10b4 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -362,7 +362,7 @@ config TOUCHSCREEN_HIDEEP If unsure, say N. - To compile this driver as a moudle, choose M here : the + To compile this driver as a module, choose M here : the module will be called hideep_ts. config TOUCHSCREEN_ILI210X diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 5d9699fe1b55..09194721aed2 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -280,7 +280,8 @@ struct mxt_data { struct input_dev *input_dev; char phys[64]; /* device physical location */ struct mxt_object *object_table; - struct mxt_info info; + struct mxt_info *info; + void *raw_info_block; unsigned int irq; unsigned int max_x; unsigned int max_y; @@ -460,12 +461,13 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry) { u8 appmode = data->client->addr; u8 bootloader; + u8 family_id = data->info ? data->info->family_id : 0; switch (appmode) { case 0x4a: case 0x4b: /* Chips after 1664S use different scheme */ - if (retry || data->info.family_id >= 0xa2) { + if (retry || family_id >= 0xa2) { bootloader = appmode - 0x24; break; } @@ -692,7 +694,7 @@ mxt_get_object(struct mxt_data *data, u8 type) struct mxt_object *object; int i; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { object = data->object_table + i; if (object->type == type) return object; @@ -1462,12 +1464,12 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) data_pos += offset; } - if (cfg_info.family_id != data->info.family_id) { + if (cfg_info.family_id != data->info->family_id) { dev_err(dev, "Family ID mismatch!\n"); return -EINVAL; } - if (cfg_info.variant_id != data->info.variant_id) { + if (cfg_info.variant_id != data->info->variant_id) { dev_err(dev, "Variant ID mismatch!\n"); return -EINVAL; } @@ -1512,7 +1514,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) /* Malloc memory to store configuration */ cfg_start_ofs = MXT_OBJECT_START + - data->info.object_num * sizeof(struct mxt_object) + + data->info->object_num * sizeof(struct mxt_object) + MXT_INFO_CHECKSUM_SIZE; config_mem_size = data->mem_size - cfg_start_ofs; config_mem = kzalloc(config_mem_size, GFP_KERNEL); @@ -1563,20 +1565,6 @@ release_mem: return ret; } -static int mxt_get_info(struct mxt_data *data) -{ - struct i2c_client *client = data->client; - struct mxt_info *info = &data->info; - int error; - - /* Read 7-byte info block starting at address 0 */ - error = __mxt_read_reg(client, 0, sizeof(*info), info); - if (error) - return error; - - return 0; -} - static void mxt_free_input_device(struct mxt_data *data) { if (data->input_dev) { @@ -1591,9 +1579,10 @@ static void mxt_free_object_table(struct mxt_data *data) video_unregister_device(&data->dbg.vdev); v4l2_device_unregister(&data->dbg.v4l2); #endif - - kfree(data->object_table); data->object_table = NULL; + data->info = NULL; + kfree(data->raw_info_block); + data->raw_info_block = NULL; kfree(data->msg_buf); data->msg_buf = NULL; data->T5_address = 0; @@ -1609,34 +1598,18 @@ static void mxt_free_object_table(struct mxt_data *data) data->max_reportid = 0; } -static int mxt_get_object_table(struct mxt_data *data) +static int mxt_parse_object_table(struct mxt_data *data, + struct mxt_object *object_table) { struct i2c_client *client = data->client; - size_t table_size; - struct mxt_object *object_table; - int error; int i; u8 reportid; u16 end_address; - table_size = data->info.object_num * sizeof(struct mxt_object); - object_table = kzalloc(table_size, GFP_KERNEL); - if (!object_table) { - dev_err(&data->client->dev, "Failed to allocate memory\n"); - return -ENOMEM; - } - - error = __mxt_read_reg(client, MXT_OBJECT_START, table_size, - object_table); - if (error) { - kfree(object_table); - return error; - } - /* Valid Report IDs start counting from 1 */ reportid = 1; data->mem_size = 0; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { struct mxt_object *object = object_table + i; u8 min_id, max_id; @@ -1660,8 +1633,8 @@ static int mxt_get_object_table(struct mxt_data *data) switch (object->type) { case MXT_GEN_MESSAGE_T5: - if (data->info.family_id == 0x80 && - data->info.version < 0x20) { + if (data->info->family_id == 0x80 && + data->info->version < 0x20) { /* * On mXT224 firmware versions prior to V2.0 * read and discard unused CRC byte otherwise @@ -1716,24 +1689,102 @@ static int mxt_get_object_table(struct mxt_data *data) /* If T44 exists, T5 position has to be directly after */ if (data->T44_address && (data->T5_address != data->T44_address + 1)) { dev_err(&client->dev, "Invalid T44 position\n"); - error = -EINVAL; - goto free_object_table; + return -EINVAL; } data->msg_buf = kcalloc(data->max_reportid, data->T5_msg_size, GFP_KERNEL); - if (!data->msg_buf) { - dev_err(&client->dev, "Failed to allocate message buffer\n"); + if (!data->msg_buf) + return -ENOMEM; + + return 0; +} + +static int mxt_read_info_block(struct mxt_data *data) +{ + struct i2c_client *client = data->client; + int error; + size_t size; + void *id_buf, *buf; + uint8_t num_objects; + u32 calculated_crc; + u8 *crc_ptr; + + /* If info block already allocated, free it */ + if (data->raw_info_block) + mxt_free_object_table(data); + + /* Read 7-byte ID information block starting at address 0 */ + size = sizeof(struct mxt_info); + id_buf = kzalloc(size, GFP_KERNEL); + if (!id_buf) + return -ENOMEM; + + error = __mxt_read_reg(client, 0, size, id_buf); + if (error) + goto err_free_mem; + + /* Resize buffer to give space for rest of info block */ + num_objects = ((struct mxt_info *)id_buf)->object_num; + size += (num_objects * sizeof(struct mxt_object)) + + MXT_INFO_CHECKSUM_SIZE; + + buf = krealloc(id_buf, size, GFP_KERNEL); + if (!buf) { error = -ENOMEM; - goto free_object_table; + goto err_free_mem; + } + id_buf = buf; + + /* Read rest of info block */ + error = __mxt_read_reg(client, MXT_OBJECT_START, + size - MXT_OBJECT_START, + id_buf + MXT_OBJECT_START); + if (error) + goto err_free_mem; + + /* Extract & calculate checksum */ + crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE; + data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16); + + calculated_crc = mxt_calculate_crc(id_buf, 0, + size - MXT_INFO_CHECKSUM_SIZE); + + /* + * CRC mismatch can be caused by data corruption due to I2C comms + * issue or else device is not using Object Based Protocol (eg i2c-hid) + */ + if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) { + dev_err(&client->dev, + "Info Block CRC error calculated=0x%06X read=0x%06X\n", + calculated_crc, data->info_crc); + error = -EIO; + goto err_free_mem; + } + + data->raw_info_block = id_buf; + data->info = (struct mxt_info *)id_buf; + + dev_info(&client->dev, + "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n", + data->info->family_id, data->info->variant_id, + data->info->version >> 4, data->info->version & 0xf, + data->info->build, data->info->object_num); + + /* Parse object table information */ + error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START); + if (error) { + dev_err(&client->dev, "Error %d parsing object table\n", error); + mxt_free_object_table(data); + goto err_free_mem; } - data->object_table = object_table; + data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START); return 0; -free_object_table: - mxt_free_object_table(data); +err_free_mem: + kfree(id_buf); return error; } @@ -2046,7 +2097,7 @@ static int mxt_initialize(struct mxt_data *data) int error; while (1) { - error = mxt_get_info(data); + error = mxt_read_info_block(data); if (!error) break; @@ -2077,16 +2128,9 @@ static int mxt_initialize(struct mxt_data *data) msleep(MXT_FW_RESET_TIME); } - /* Get object table information */ - error = mxt_get_object_table(data); - if (error) { - dev_err(&client->dev, "Error %d reading object table\n", error); - return error; - } - error = mxt_acquire_irq(data); if (error) - goto err_free_object_table; + return error; error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME, &client->dev, GFP_KERNEL, data, @@ -2094,14 +2138,10 @@ static int mxt_initialize(struct mxt_data *data) if (error) { dev_err(&client->dev, "Failed to invoke firmware loader: %d\n", error); - goto err_free_object_table; + return error; } return 0; - -err_free_object_table: - mxt_free_object_table(data); - return error; } static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep) @@ -2162,7 +2202,7 @@ recheck: static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x, unsigned int y) { - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; struct mxt_dbg *dbg = &data->dbg; unsigned int ofs, page; unsigned int col = 0; @@ -2490,7 +2530,7 @@ static const struct video_device mxt_video_device = { static void mxt_debug_init(struct mxt_data *data) { - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; struct mxt_dbg *dbg = &data->dbg; struct mxt_object *object; int error; @@ -2576,7 +2616,6 @@ static int mxt_configure_objects(struct mxt_data *data, const struct firmware *cfg) { struct device *dev = &data->client->dev; - struct mxt_info *info = &data->info; int error; error = mxt_init_t7_power_cfg(data); @@ -2601,11 +2640,6 @@ static int mxt_configure_objects(struct mxt_data *data, mxt_debug_init(data); - dev_info(dev, - "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n", - info->family_id, info->variant_id, info->version >> 4, - info->version & 0xf, info->build, info->object_num); - return 0; } @@ -2614,7 +2648,7 @@ static ssize_t mxt_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mxt_data *data = dev_get_drvdata(dev); - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n", info->version >> 4, info->version & 0xf, info->build); } @@ -2624,7 +2658,7 @@ static ssize_t mxt_hw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mxt_data *data = dev_get_drvdata(dev); - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; return scnprintf(buf, PAGE_SIZE, "%u.%u\n", info->family_id, info->variant_id); } @@ -2663,7 +2697,7 @@ static ssize_t mxt_object_show(struct device *dev, return -ENOMEM; error = 0; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { object = data->object_table + i; if (!mxt_object_readable(object->type)) @@ -3035,6 +3069,15 @@ static const struct dmi_system_id mxt_dmi_table[] = { .driver_data = samus_platform_data, }, { + /* Samsung Chromebook Pro */ + .ident = "Samsung Chromebook Pro", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"), + }, + .driver_data = samus_platform_data, + }, + { /* Other Google Chromebooks */ .ident = "Chromebook", .matches = { @@ -3254,6 +3297,11 @@ static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume); static const struct of_device_id mxt_of_match[] = { { .compatible = "atmel,maxtouch", }, + /* Compatibles listed below are deprecated */ + { .compatible = "atmel,qt602240_ts", }, + { .compatible = "atmel,atmel_mxt_ts", }, + { .compatible = "atmel,atmel_mxt_tp", }, + { .compatible = "atmel,mXT224", }, {}, }; MODULE_DEVICE_TABLE(of, mxt_of_match); diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 2a99f0f14795..8fb8c737fffe 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -83,7 +83,6 @@ static DEFINE_SPINLOCK(amd_iommu_devtable_lock); static DEFINE_SPINLOCK(pd_bitmap_lock); -static DEFINE_SPINLOCK(iommu_table_lock); /* List of all available dev_data structures */ static LLIST_HEAD(dev_data_list); @@ -3562,6 +3561,7 @@ EXPORT_SYMBOL(amd_iommu_device_info); *****************************************************************************/ static struct irq_chip amd_ir_chip; +static DEFINE_SPINLOCK(iommu_table_lock); static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) { diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index f05f3cf90756..ddcbbdb5d658 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -167,40 +167,16 @@ EXPORT_SYMBOL(iommu_put_dma_cookie); * @list: Reserved region list from iommu_get_resv_regions() * * IOMMU drivers can use this to implement their .get_resv_regions callback - * for general non-IOMMU-specific reservations. Currently, this covers host - * bridge windows for PCI devices and GICv3 ITS region reservation on ACPI - * based ARM platforms that may require HW MSI reservation. + * for general non-IOMMU-specific reservations. Currently, this covers GICv3 + * ITS region reservation on ACPI based ARM platforms that may require HW MSI + * reservation. */ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) { - struct pci_host_bridge *bridge; - struct resource_entry *window; - - if (!is_of_node(dev->iommu_fwspec->iommu_fwnode) && - iort_iommu_msi_get_resv_regions(dev, list) < 0) - return; - - if (!dev_is_pci(dev)) - return; - - bridge = pci_find_host_bridge(to_pci_dev(dev)->bus); - resource_list_for_each_entry(window, &bridge->windows) { - struct iommu_resv_region *region; - phys_addr_t start; - size_t length; - - if (resource_type(window->res) != IORESOURCE_MEM) - continue; - start = window->res->start - window->offset; - length = window->res->end - window->res->start + 1; - region = iommu_alloc_resv_region(start, length, 0, - IOMMU_RESV_RESERVED); - if (!region) - return; + if (!is_of_node(dev->iommu_fwspec->iommu_fwnode)) + iort_iommu_msi_get_resv_regions(dev, list); - list_add_tail(®ion->list, list); - } } EXPORT_SYMBOL(iommu_dma_get_resv_regions); @@ -229,6 +205,23 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, return 0; } +static void iova_reserve_pci_windows(struct pci_dev *dev, + struct iova_domain *iovad) +{ + struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); + struct resource_entry *window; + unsigned long lo, hi; + + resource_list_for_each_entry(window, &bridge->windows) { + if (resource_type(window->res) != IORESOURCE_MEM) + continue; + + lo = iova_pfn(iovad, window->res->start - window->offset); + hi = iova_pfn(iovad, window->res->end - window->offset); + reserve_iova(iovad, lo, hi); + } +} + static int iova_reserve_iommu_regions(struct device *dev, struct iommu_domain *domain) { @@ -238,6 +231,9 @@ static int iova_reserve_iommu_regions(struct device *dev, LIST_HEAD(resv_regions); int ret = 0; + if (dev_is_pci(dev)) + iova_reserve_pci_windows(to_pci_dev(dev), iovad); + iommu_get_resv_regions(dev, &resv_regions); list_for_each_entry(region, &resv_regions, list) { unsigned long lo, hi; diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index accf58388bdb..460bed4fc5b1 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1345,7 +1345,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, struct qi_desc desc; if (mask) { - BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); + WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1)); addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; } else diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 66f69af2c219..3062a154a9fb 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -1136,7 +1136,7 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force) irte->dest_id = IRTE_DEST(cfg->dest_apicid); /* Update the hardware only if the interrupt is in remapped mode. */ - if (!force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING) + if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING) modify_irte(&ir_data->irq_2_iommu, irte); } diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 5fc8656c60f9..0468acfa131f 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1098,7 +1098,7 @@ static int rk_iommu_of_xlate(struct device *dev, data->iommu = platform_get_drvdata(iommu_dev); dev->archdata.iommu = data; - of_dev_put(iommu_dev); + platform_device_put(iommu_dev); return 0; } @@ -1175,8 +1175,15 @@ static int rk_iommu_probe(struct platform_device *pdev) for (i = 0; i < iommu->num_clocks; ++i) iommu->clocks[i].id = rk_iommu_clocks[i]; + /* + * iommu clocks should be present for all new devices and devicetrees + * but there are older devicetrees without clocks out in the wild. + * So clocks as optional for the time being. + */ err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks); - if (err) + if (err == -ENOENT) + iommu->num_clocks = 0; + else if (err) return err; err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c index f31265937439..7f0c0be322e0 100644 --- a/drivers/irqchip/qcom-irq-combiner.c +++ b/drivers/irqchip/qcom-irq-combiner.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -68,7 +68,7 @@ static void combiner_handle_irq(struct irq_desc *desc) bit = readl_relaxed(combiner->regs[reg].addr); status = bit & combiner->regs[reg].enabled; - if (!status) + if (bit && !status) pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n", smp_processor_id(), bit, combiner->regs[reg].enabled, diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c index a6e87076acc2..5336bbdbfdc5 100644 --- a/drivers/isdn/mISDN/dsp_hwec.c +++ b/drivers/isdn/mISDN/dsp_hwec.c @@ -68,12 +68,12 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg) goto _do; { - char _dup[len + 1]; char *dup, *tok, *name, *val; int tmp; - strcpy(_dup, arg); - dup = _dup; + dup = kstrdup(arg, GFP_ATOMIC); + if (!dup) + return; while ((tok = strsep(&dup, ","))) { if (!strlen(tok)) @@ -89,6 +89,8 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg) deftaps = tmp; } } + + kfree(dup); } _do: diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 21d50e4cc5e1..b05022f94f18 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -279,7 +279,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask, u16 timebase, u8 *buf, int len) { u8 *p; - u8 frame[len + 32]; + u8 frame[MAX_DFRAME_LEN_L1 + 32]; struct socket *socket = NULL; if (debug & DEBUG_L1OIP_MSG) @@ -902,7 +902,11 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) p = skb->data; l = skb->len; while (l) { - ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; + /* + * This is technically bounded by L1OIP_MAX_PERFRAME but + * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME + */ + ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; l1oip_socket_send(hc, 0, dch->slot, 0, hc->chan[dch->slot].tx_counter++, p, ll); p += ll; @@ -1140,7 +1144,11 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb) p = skb->data; l = skb->len; while (l) { - ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; + /* + * This is technically bounded by L1OIP_MAX_PERFRAME but + * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME + */ + ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; l1oip_socket_send(hc, hc->codec, bch->slot, 0, hc->chan[bch->slot].tx_counter, p, ll); hc->chan[bch->slot].tx_counter += ll; diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 004cc3cc6123..7fa2631b422c 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -290,7 +290,7 @@ do { \ if (kthread_should_stop() || \ test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \ set_current_state(TASK_RUNNING); \ - return 0; \ + goto out; \ } \ \ schedule(); \ @@ -378,6 +378,9 @@ retry_invalidate: bch_prio_write(ca); } } +out: + wait_for_kthread_stop(); + return 0; } /* Allocation */ diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index d338b7086013..3a0cfb237af9 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -392,6 +392,8 @@ struct cached_dev { #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 atomic_t io_errors; unsigned error_limit; + + char backing_dev_name[BDEVNAME_SIZE]; }; enum alloc_reserve { @@ -464,6 +466,8 @@ struct cache { atomic_long_t meta_sectors_written; atomic_long_t btree_sectors_written; atomic_long_t sectors_written; + + char cache_dev_name[BDEVNAME_SIZE]; }; struct gc_stat { diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 028f7b386e01..4e63c6f6c04d 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -106,7 +106,6 @@ void bch_btree_verify(struct btree *b) void bch_data_verify(struct cached_dev *dc, struct bio *bio) { - char name[BDEVNAME_SIZE]; struct bio *check; struct bio_vec bv, cbv; struct bvec_iter iter, citer = { 0 }; @@ -134,7 +133,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) bv.bv_len), dc->disk.c, "verify failed at dev %s sector %llu", - bdevname(dc->bdev, name), + dc->backing_dev_name, (uint64_t) bio->bi_iter.bi_sector); kunmap_atomic(p1); diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 7fac97ae036e..2ddf8515e6a5 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -52,7 +52,6 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, /* IO errors */ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) { - char buf[BDEVNAME_SIZE]; unsigned errors; WARN_ONCE(!dc, "NULL pointer of struct cached_dev"); @@ -60,7 +59,7 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) errors = atomic_add_return(1, &dc->io_errors); if (errors < dc->error_limit) pr_err("%s: IO error on backing device, unrecoverable", - bio_devname(bio, buf)); + dc->backing_dev_name); else bch_cached_dev_error(dc); } @@ -105,19 +104,18 @@ void bch_count_io_errors(struct cache *ca, } if (error) { - char buf[BDEVNAME_SIZE]; unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, &ca->io_errors); errors >>= IO_ERROR_SHIFT; if (errors < ca->set->error_limit) pr_err("%s: IO error on %s%s", - bdevname(ca->bdev, buf), m, + ca->cache_dev_name, m, is_read ? ", recovering." : "."); else bch_cache_set_error(ca->set, "%s: too many IO errors %s", - bdevname(ca->bdev, buf), m); + ca->cache_dev_name, m); } } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index a65e3365eeb9..8e3e8655ed63 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -649,11 +649,8 @@ static void backing_request_endio(struct bio *bio) */ if (unlikely(s->iop.writeback && bio->bi_opf & REQ_PREFLUSH)) { - char buf[BDEVNAME_SIZE]; - - bio_devname(bio, buf); pr_err("Can't flush %s: returned bi_status %i", - buf, bio->bi_status); + dc->backing_dev_name, bio->bi_status); } else { /* set to orig_bio->bi_status in bio_complete() */ s->iop.status = bio->bi_status; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index d90d9e59ca00..3dea06b41d43 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -936,7 +936,6 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) static void cached_dev_detach_finish(struct work_struct *w) { struct cached_dev *dc = container_of(w, struct cached_dev, detach); - char buf[BDEVNAME_SIZE]; struct closure cl; closure_init_stack(&cl); @@ -967,7 +966,7 @@ static void cached_dev_detach_finish(struct work_struct *w) mutex_unlock(&bch_register_lock); - pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); + pr_info("Caching disabled for %s", dc->backing_dev_name); /* Drop ref we took in cached_dev_detach() */ closure_put(&dc->disk.cl); @@ -999,29 +998,28 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, { uint32_t rtime = cpu_to_le32(get_seconds()); struct uuid_entry *u; - char buf[BDEVNAME_SIZE]; struct cached_dev *exist_dc, *t; - bdevname(dc->bdev, buf); - if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) return -ENOENT; if (dc->disk.c) { - pr_err("Can't attach %s: already attached", buf); + pr_err("Can't attach %s: already attached", + dc->backing_dev_name); return -EINVAL; } if (test_bit(CACHE_SET_STOPPING, &c->flags)) { - pr_err("Can't attach %s: shutting down", buf); + pr_err("Can't attach %s: shutting down", + dc->backing_dev_name); return -EINVAL; } if (dc->sb.block_size < c->sb.block_size) { /* Will die */ pr_err("Couldn't attach %s: block size less than set's block size", - buf); + dc->backing_dev_name); return -EINVAL; } @@ -1029,7 +1027,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { pr_err("Tried to attach %s but duplicate UUID already attached", - buf); + dc->backing_dev_name); return -EINVAL; } @@ -1047,13 +1045,15 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, if (!u) { if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { - pr_err("Couldn't find uuid for %s in set", buf); + pr_err("Couldn't find uuid for %s in set", + dc->backing_dev_name); return -ENOENT; } u = uuid_find_empty(c); if (!u) { - pr_err("Not caching %s, no room for UUID", buf); + pr_err("Not caching %s, no room for UUID", + dc->backing_dev_name); return -EINVAL; } } @@ -1112,7 +1112,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, up_write(&dc->writeback_lock); pr_info("Caching %s as %s on set %pU", - bdevname(dc->bdev, buf), dc->disk.disk->disk_name, + dc->backing_dev_name, + dc->disk.disk->disk_name, dc->disk.c->sb.set_uuid); return 0; } @@ -1225,10 +1226,10 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cached_dev *dc) { - char name[BDEVNAME_SIZE]; const char *err = "cannot allocate memory"; struct cache_set *c; + bdevname(bdev, dc->backing_dev_name); memcpy(&dc->sb, sb, sizeof(struct cache_sb)); dc->bdev = bdev; dc->bdev->bd_holder = dc; @@ -1237,6 +1238,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; get_page(sb_page); + if (cached_dev_init(dc, sb->block_size << 9)) goto err; @@ -1247,7 +1249,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) goto err; - pr_info("registered backing device %s", bdevname(bdev, name)); + pr_info("registered backing device %s", dc->backing_dev_name); list_add(&dc->list, &uncached_devices); list_for_each_entry(c, &bch_cache_sets, list) @@ -1259,7 +1261,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, return; err: - pr_notice("error %s: %s", bdevname(bdev, name), err); + pr_notice("error %s: %s", dc->backing_dev_name, err); bcache_device_stop(&dc->disk); } @@ -1367,7 +1369,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size) bool bch_cached_dev_error(struct cached_dev *dc) { - char name[BDEVNAME_SIZE]; + struct cache_set *c; if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) return false; @@ -1377,7 +1379,22 @@ bool bch_cached_dev_error(struct cached_dev *dc) smp_mb(); pr_err("stop %s: too many IO errors on backing device %s\n", - dc->disk.disk->disk_name, bdevname(dc->bdev, name)); + dc->disk.disk->disk_name, dc->backing_dev_name); + + /* + * If the cached device is still attached to a cache set, + * even dc->io_disable is true and no more I/O requests + * accepted, cache device internal I/O (writeback scan or + * garbage collection) may still prevent bcache device from + * being stopped. So here CACHE_SET_IO_DISABLE should be + * set to c->flags too, to make the internal I/O to cache + * device rejected and stopped immediately. + * If c is NULL, that means the bcache device is not attached + * to any cache set, then no CACHE_SET_IO_DISABLE bit to set. + */ + c = dc->disk.c; + if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) + pr_info("CACHE_SET_IO_DISABLE already set"); bcache_device_stop(&dc->disk); return true; @@ -1395,7 +1412,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) return false; if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) - pr_warn("CACHE_SET_IO_DISABLE already set"); + pr_info("CACHE_SET_IO_DISABLE already set"); /* XXX: we can be called from atomic context acquire_console_sem(); @@ -1539,6 +1556,20 @@ static void conditional_stop_bcache_device(struct cache_set *c, */ pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.", d->disk->disk_name); + /* + * There might be a small time gap that cache set is + * released but bcache device is not. Inside this time + * gap, regular I/O requests will directly go into + * backing device as no cache set attached to. This + * behavior may also introduce potential inconsistence + * data in writeback mode while cache is dirty. + * Therefore before calling bcache_device_stop() due + * to a broken cache device, dc->io_disable should be + * explicitly set to true. + */ + dc->io_disable = true; + /* make others know io_disable is true earlier */ + smp_mb(); bcache_device_stop(d); } else { /* @@ -2003,12 +2034,10 @@ static int cache_alloc(struct cache *ca) static int register_cache(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cache *ca) { - char name[BDEVNAME_SIZE]; const char *err = NULL; /* must be set for any error case */ int ret = 0; - bdevname(bdev, name); - + bdevname(bdev, ca->cache_dev_name); memcpy(&ca->sb, sb, sizeof(struct cache_sb)); ca->bdev = bdev; ca->bdev->bd_holder = ca; @@ -2045,14 +2074,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, goto out; } - pr_info("registered cache device %s", name); + pr_info("registered cache device %s", ca->cache_dev_name); out: kobject_put(&ca->kobj); err: if (err) - pr_notice("error %s: %s", name, err); + pr_notice("error %s: %s", ca->cache_dev_name, err); return ret; } diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 4a9547cdcdc5..ad45ebe1a74b 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -244,8 +244,10 @@ static void dirty_endio(struct bio *bio) struct keybuf_key *w = bio->bi_private; struct dirty_io *io = w->private; - if (bio->bi_status) + if (bio->bi_status) { SET_KEY_DIRTY(&w->key, false); + bch_count_backing_io_errors(io->dc, bio); + } closure_put(&io->cl); } diff --git a/drivers/md/md.c b/drivers/md/md.c index 3bea45e8ccff..c208c01f63a5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -9256,8 +9256,10 @@ void md_reload_sb(struct mddev *mddev, int nr) check_sb_changes(mddev, rdev); /* Read all rdev's to update recovery_offset */ - rdev_for_each_rcu(rdev, mddev) - read_rdev(mddev, rdev); + rdev_for_each_rcu(rdev, mddev) { + if (!test_bit(Faulty, &rdev->flags)) + read_rdev(mddev, rdev); + } } EXPORT_SYMBOL(md_reload_sb); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e2943fb74056..e9e3308cb0a7 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -854,7 +854,7 @@ static void flush_pending_writes(struct r1conf *conf) * there is no normal IO happeing. It must arrange to call * lower_barrier when the particular background IO completes. */ -static void raise_barrier(struct r1conf *conf, sector_t sector_nr) +static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr) { int idx = sector_to_idx(sector_nr); @@ -885,13 +885,23 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr) * max resync count which allowed on current I/O barrier bucket. */ wait_event_lock_irq(conf->wait_barrier, - !conf->array_frozen && + (!conf->array_frozen && !atomic_read(&conf->nr_pending[idx]) && - atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH, + atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || + test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), conf->resync_lock); + if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { + atomic_dec(&conf->barrier[idx]); + spin_unlock_irq(&conf->resync_lock); + wake_up(&conf->wait_barrier); + return -EINTR; + } + atomic_inc(&conf->nr_sync_pending); spin_unlock_irq(&conf->resync_lock); + + return 0; } static void lower_barrier(struct r1conf *conf, sector_t sector_nr) @@ -1092,6 +1102,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, goto skip_copy; } + behind_bio->bi_write_hint = bio->bi_write_hint; + while (i < vcnt && size) { struct page *page; int len = min_t(int, PAGE_SIZE, size); @@ -2662,9 +2674,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bitmap_cond_end_sync(mddev->bitmap, sector_nr, mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); - r1_bio = raid1_alloc_init_r1buf(conf); - raise_barrier(conf, sector_nr); + + if (raise_barrier(conf, sector_nr)) + return 0; + + r1_bio = raid1_alloc_init_r1buf(conf); rcu_read_lock(); /* diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c index e216cd768409..b07114b5efb2 100644 --- a/drivers/media/i2c/saa7115.c +++ b/drivers/media/i2c/saa7115.c @@ -20,7 +20,7 @@ // // VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl> // -// Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org> // SAA7111, SAA7113 and SAA7118 support #include "saa711x_regs.h" diff --git a/drivers/media/i2c/saa711x_regs.h b/drivers/media/i2c/saa711x_regs.h index a50d480e101a..44fabe08234d 100644 --- a/drivers/media/i2c/saa711x_regs.h +++ b/drivers/media/i2c/saa711x_regs.h @@ -2,7 +2,7 @@ * SPDX-License-Identifier: GPL-2.0+ * saa711x - Philips SAA711x video decoder register specifications * - * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org> */ #define R_00_CHIP_VERSION 0x00 diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c index 1c5c61d829d6..9b4f21237810 100644 --- a/drivers/media/i2c/tda7432.c +++ b/drivers/media/i2c/tda7432.c @@ -8,7 +8,7 @@ * Muting and tone control by Jonathan Isom <jisom@ematic.com> * * Copyright (c) 2000 Eric Sandeen <eric_sandeen@bigfoot.com> - * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org> * This code is placed under the terms of the GNU General Public License * Based on tda9855.c by Steve VanDeBogart (vandebo@uclink.berkeley.edu) * Which was based on tda8425.c by Greg Alexander (c) 1998 diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index d528fddbea16..b162c2fe62c3 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -2,7 +2,7 @@ // // tvp5150 - Texas Instruments TVP5150A/AM1 and TVP5151 video decoder driver // -// Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@kernel.org> #include <dt-bindings/media/tvp5150.h> #include <linux/i2c.h> diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h index c43b7b844021..d3a764cae1a0 100644 --- a/drivers/media/i2c/tvp5150_reg.h +++ b/drivers/media/i2c/tvp5150_reg.h @@ -3,7 +3,7 @@ * * tvp5150 - Texas Instruments TVP5150A/AM1 video decoder registers * - * Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@kernel.org> */ #define TVP5150_VD_IN_SRC_SEL_1 0x00 /* Video input source selection #1 */ diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c index a26c1a3f7183..4599b7e28a8d 100644 --- a/drivers/media/i2c/tvp7002.c +++ b/drivers/media/i2c/tvp7002.c @@ -5,7 +5,7 @@ * Author: Santiago Nunez-Corrales <santiago.nunez@ridgerun.com> * * This code is partially based upon the TVP5150 driver - * written by Mauro Carvalho Chehab (mchehab@infradead.org), + * written by Mauro Carvalho Chehab <mchehab@kernel.org>, * the TVP514x driver written by Vaibhav Hiremath <hvaibhav@ti.com> * and the TVP7002 driver in the TI LSP 2.10.00.14. Revisions by * Muralidharan Karicheri and Snehaprabha Narnakaje (TI). diff --git a/drivers/media/i2c/tvp7002_reg.h b/drivers/media/i2c/tvp7002_reg.h index 3c8c8b0a6a4c..7f56ba689dfe 100644 --- a/drivers/media/i2c/tvp7002_reg.h +++ b/drivers/media/i2c/tvp7002_reg.h @@ -5,7 +5,7 @@ * Author: Santiago Nunez-Corrales <santiago.nunez@ridgerun.com> * * This code is partially based upon the TVP5150 driver - * written by Mauro Carvalho Chehab (mchehab@infradead.org), + * written by Mauro Carvalho Chehab <mchehab@kernel.org>, * the TVP514x driver written by Vaibhav Hiremath <hvaibhav@ti.com> * and the TVP7002 driver in the TI LSP 2.10.00.14 * diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c index 67ac51eff15c..6b87a721dc49 100644 --- a/drivers/media/media-devnode.c +++ b/drivers/media/media-devnode.c @@ -4,7 +4,7 @@ * Copyright (C) 2010 Nokia Corporation * * Based on drivers/media/video/v4l2_dev.c code authored by - * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2) + * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2) * Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1) * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.c b/drivers/media/pci/bt8xx/bttv-audio-hook.c index 9f1f9169fb5b..346fc7f58839 100644 --- a/drivers/media/pci/bt8xx/bttv-audio-hook.c +++ b/drivers/media/pci/bt8xx/bttv-audio-hook.c @@ -1,7 +1,7 @@ /* * Handlers for board audio hooks, splitted from bttv-cards * - * Copyright (c) 2006 Mauro Carvalho Chehab (mchehab@infradead.org) + * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org> * This code is placed under the terms of the GNU General Public License */ diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.h b/drivers/media/pci/bt8xx/bttv-audio-hook.h index 159d07adeff8..be16a537a03a 100644 --- a/drivers/media/pci/bt8xx/bttv-audio-hook.h +++ b/drivers/media/pci/bt8xx/bttv-audio-hook.h @@ -1,7 +1,7 @@ /* * Handlers for board audio hooks, splitted from bttv-cards * - * Copyright (c) 2006 Mauro Carvalho Chehab (mchehab@infradead.org) + * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org> * This code is placed under the terms of the GNU General Public License */ diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c index 1902732f90e1..2616243b2c49 100644 --- a/drivers/media/pci/bt8xx/bttv-cards.c +++ b/drivers/media/pci/bt8xx/bttv-cards.c @@ -2447,7 +2447,7 @@ struct tvcard bttv_tvcards[] = { }, /* ---- card 0x88---------------------------------- */ [BTTV_BOARD_ACORP_Y878F] = { - /* Mauro Carvalho Chehab <mchehab@infradead.org> */ + /* Mauro Carvalho Chehab <mchehab@kernel.org> */ .name = "Acorp Y878F", .video_inputs = 3, /* .audio_inputs= 1, */ @@ -2688,7 +2688,7 @@ struct tvcard bttv_tvcards[] = { }, [BTTV_BOARD_ENLTV_FM_2] = { /* Encore TV Tuner Pro ENL TV-FM-2 - Mauro Carvalho Chehab <mchehab@infradead.org */ + Mauro Carvalho Chehab <mchehab@kernel.org> */ .name = "Encore ENL TV-FM-2", .video_inputs = 3, /* .audio_inputs= 1, */ diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index 707f57a9f940..de3f44b8dec6 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -13,7 +13,7 @@ (c) 2005-2006 Nickolay V. Shmyrev <nshmyrev@yandex.ru> Fixes to be fully V4L2 compliant by - (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org> + (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org> Cropping and overscan support Copyright (C) 2005, 2006 Michael H. Schimek <mschimek@gmx.at> diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c index eccd1e3d717a..c76823eb399d 100644 --- a/drivers/media/pci/bt8xx/bttv-i2c.c +++ b/drivers/media/pci/bt8xx/bttv-i2c.c @@ -8,7 +8,7 @@ & Marcus Metzler (mocm@thp.uni-koeln.de) (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org> - (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org> + (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org> - Multituner support and i2c address binding This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c index be49589a61d2..395ff9bba759 100644 --- a/drivers/media/pci/cx23885/cx23885-input.c +++ b/drivers/media/pci/cx23885/cx23885-input.c @@ -13,7 +13,7 @@ * Copyright (C) 2008 <srinivasa.deevi at conexant dot com> * Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> * Markus Rechberger <mrechberger@gmail.com> - * Mauro Carvalho Chehab <mchehab@infradead.org> + * Mauro Carvalho Chehab <mchehab@kernel.org> * Sascha Sommer <saschasommer@freenet.de> * Copyright (C) 2004, 2005 Chris Pascoe * Copyright (C) 2003, 2004 Gerd Knorr diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index ab09bb55cf45..8a28fda703a2 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c @@ -4,7 +4,7 @@ * * (c) 2007 Trent Piepho <xyzzy@speakeasy.org> * (c) 2005,2006 Ricardo Cerqueira <v4l@cerqueira.org> - * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org> + * (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org> * Based on a dummy cx88 module by Gerd Knorr <kraxel@bytesex.org> * Based on dummy.c by Jaroslav Kysela <perex@perex.cz> * @@ -103,7 +103,7 @@ MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s)."); MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards"); MODULE_AUTHOR("Ricardo Cerqueira"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL"); MODULE_VERSION(CX88_VERSION); diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c index 0e0952e60795..7a4876cf9f08 100644 --- a/drivers/media/pci/cx88/cx88-blackbird.c +++ b/drivers/media/pci/cx88/cx88-blackbird.c @@ -5,7 +5,7 @@ * (c) 2004 Jelle Foks <jelle@foks.us> * (c) 2004 Gerd Knorr <kraxel@bytesex.org> * - * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org> * - video_ioctl2 conversion * * Includes parts from the ivtv driver <http://sourceforge.net/projects/ivtv/> diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c index 8bfa5b7ed91b..60988e95b637 100644 --- a/drivers/media/pci/cx88/cx88-core.c +++ b/drivers/media/pci/cx88/cx88-core.c @@ -4,7 +4,7 @@ * * (c) 2003 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * - * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org> * - Multituner support * - video_ioctl2 conversion * - PAL/M fixes diff --git a/drivers/media/pci/cx88/cx88-i2c.c b/drivers/media/pci/cx88/cx88-i2c.c index f7692775fb5a..99f88a05a7c9 100644 --- a/drivers/media/pci/cx88/cx88-i2c.c +++ b/drivers/media/pci/cx88/cx88-i2c.c @@ -8,7 +8,7 @@ * (c) 2002 Yurij Sysoev <yurij@naturesoft.net> * (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org> * - * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org> + * (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org> * - Multituner support and i2c address binding * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index 9be682cdb644..7b113bad70d2 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -5,7 +5,7 @@ * * (c) 2003-04 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * - * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> + * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org> * - Multituner support * - video_ioctl2 conversion * - PAL/M fixes diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c index 5ef635e72e10..4c52ac6d8bc5 100644 --- a/drivers/media/radio/radio-aimslab.c +++ b/drivers/media/radio/radio-aimslab.c @@ -4,7 +4,7 @@ * Copyright 1997 M. Kirkwood * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> * Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk> * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> * diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c index 9e12c6027359..840b7d60462b 100644 --- a/drivers/media/radio/radio-aztech.c +++ b/drivers/media/radio/radio-aztech.c @@ -2,7 +2,7 @@ * radio-aztech.c - Aztech radio card driver * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@xs4all.nl> - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> * Adapted to support the Video for Linux API by * Russell Kroll <rkroll@exploits.org>. Based on original tuner code by: * diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c index 3ff4c4e1435f..f051f8694ab9 100644 --- a/drivers/media/radio/radio-gemtek.c +++ b/drivers/media/radio/radio-gemtek.c @@ -15,7 +15,7 @@ * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> * * Note: this card seems to swap the left and right audio channels! * diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c index 95f06f3b35dc..e4e758739246 100644 --- a/drivers/media/radio/radio-maxiradio.c +++ b/drivers/media/radio/radio-maxiradio.c @@ -27,7 +27,7 @@ * BUGS: * - card unmutes if you change frequency * - * (c) 2006, 2007 by Mauro Carvalho Chehab <mchehab@infradead.org>: + * (c) 2006, 2007 by Mauro Carvalho Chehab <mchehab@kernel.org>: * - Conversion to V4L2 API * - Uses video_ioctl2 for parsing and to add debug support */ diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c index abeaedd8d437..5a1470eb753e 100644 --- a/drivers/media/radio/radio-rtrack2.c +++ b/drivers/media/radio/radio-rtrack2.c @@ -7,7 +7,7 @@ * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> * * Fully tested with actual hardware and the v4l2-compliance tool. */ diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c index fc4e63d36e4c..4f9b97edd9eb 100644 --- a/drivers/media/radio/radio-sf16fmi.c +++ b/drivers/media/radio/radio-sf16fmi.c @@ -13,7 +13,7 @@ * No volume control - only mute/unmute - you have to use line volume * control on SB-part of SF16-FMI/SF16-FMP/SF16-FMD * - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> */ #include <linux/kernel.h> /* __setup */ diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c index 4f116ea294fb..1af8f29cc7d1 100644 --- a/drivers/media/radio/radio-terratec.c +++ b/drivers/media/radio/radio-terratec.c @@ -17,7 +17,7 @@ * Volume Control is done digitally * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> */ #include <linux/module.h> /* Modules */ diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c index 26a8c6002121..a4bad322ffff 100644 --- a/drivers/media/radio/radio-trust.c +++ b/drivers/media/radio/radio-trust.c @@ -12,7 +12,7 @@ * Scott McGrath (smcgrath@twilight.vtc.vsc.edu) * William McGrath (wmcgrath@twilight.vtc.vsc.edu) * - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> */ #include <stdarg.h> diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c index eb72a4d13758..d0d67ad85b8f 100644 --- a/drivers/media/radio/radio-typhoon.c +++ b/drivers/media/radio/radio-typhoon.c @@ -25,7 +25,7 @@ * The frequency change is necessary since the card never seems to be * completely silent. * - * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> + * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org> */ #include <linux/module.h> /* Modules */ diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c index 026e88eef29c..6007cd09b328 100644 --- a/drivers/media/radio/radio-zoltrix.c +++ b/drivers/media/radio/radio-zoltrix.c @@ -27,7 +27,7 @@ * 2002-07-15 - Fix Stereo typo * * 2006-07-24 - Converted to V4L2 API - * by Mauro Carvalho Chehab <mchehab@infradead.org> + * by Mauro Carvalho Chehab <mchehab@kernel.org> * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> * diff --git a/drivers/media/rc/keymaps/rc-avermedia-m135a.c b/drivers/media/rc/keymaps/rc-avermedia-m135a.c index f6977df1a75b..d275d98d066a 100644 --- a/drivers/media/rc/keymaps/rc-avermedia-m135a.c +++ b/drivers/media/rc/keymaps/rc-avermedia-m135a.c @@ -12,7 +12,7 @@ * * On Avermedia M135A with IR model RM-JX, the same codes exist on both * Positivo (BR) and original IR, initial version and remote control codes - * added by Mauro Carvalho Chehab <mchehab@infradead.org> + * added by Mauro Carvalho Chehab <mchehab@kernel.org> * * Positivo also ships Avermedia M135A with model RM-K6, extra control * codes added by Herton Ronaldo Krzesinski <herton@mandriva.com.br> diff --git a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c index e4e78c1f4123..057c13b765ef 100644 --- a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c +++ b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c @@ -9,7 +9,7 @@ #include <linux/module.h> /* Encore ENLTV-FM v5.3 - Mauro Carvalho Chehab <mchehab@infradead.org> + Mauro Carvalho Chehab <mchehab@kernel.org> */ static struct rc_map_table encore_enltv_fm53[] = { diff --git a/drivers/media/rc/keymaps/rc-encore-enltv2.c b/drivers/media/rc/keymaps/rc-encore-enltv2.c index c3d4437a6fda..cd0555924456 100644 --- a/drivers/media/rc/keymaps/rc-encore-enltv2.c +++ b/drivers/media/rc/keymaps/rc-encore-enltv2.c @@ -9,7 +9,7 @@ #include <linux/module.h> /* Encore ENLTV2-FM - silver plastic - "Wand Media" written at the botton - Mauro Carvalho Chehab <mchehab@infradead.org> */ + Mauro Carvalho Chehab <mchehab@kernel.org> */ static struct rc_map_table encore_enltv2[] = { { 0x4c, KEY_POWER2 }, diff --git a/drivers/media/rc/keymaps/rc-kaiomy.c b/drivers/media/rc/keymaps/rc-kaiomy.c index f0f88df18606..a00051339842 100644 --- a/drivers/media/rc/keymaps/rc-kaiomy.c +++ b/drivers/media/rc/keymaps/rc-kaiomy.c @@ -9,7 +9,7 @@ #include <linux/module.h> /* Kaiomy TVnPC U2 - Mauro Carvalho Chehab <mchehab@infradead.org> + Mauro Carvalho Chehab <mchehab@kernel.org> */ static struct rc_map_table kaiomy[] = { diff --git a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c index 453e04377de7..db5edde3eeb1 100644 --- a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c +++ b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c @@ -9,7 +9,7 @@ #include <linux/module.h> /* Kworld Plus TV Analog Lite PCI IR - Mauro Carvalho Chehab <mchehab@infradead.org> + Mauro Carvalho Chehab <mchehab@kernel.org> */ static struct rc_map_table kworld_plus_tv_analog[] = { diff --git a/drivers/media/rc/keymaps/rc-pixelview-new.c b/drivers/media/rc/keymaps/rc-pixelview-new.c index 791130f108ff..e4e34f2ccf74 100644 --- a/drivers/media/rc/keymaps/rc-pixelview-new.c +++ b/drivers/media/rc/keymaps/rc-pixelview-new.c @@ -9,7 +9,7 @@ #include <linux/module.h> /* - Mauro Carvalho Chehab <mchehab@infradead.org> + Mauro Carvalho Chehab <mchehab@kernel.org> present on PV MPEG 8000GT */ diff --git a/drivers/media/tuners/tea5761.c b/drivers/media/tuners/tea5761.c index 88b3e80c38ad..d78a2bdb3e36 100644 --- a/drivers/media/tuners/tea5761.c +++ b/drivers/media/tuners/tea5761.c @@ -2,7 +2,7 @@ // For Philips TEA5761 FM Chip // I2C address is always 0x20 (0x10 at 7-bit mode). // -// Copyright (c) 2005-2007 Mauro Carvalho Chehab (mchehab@infradead.org) +// Copyright (c) 2005-2007 Mauro Carvalho Chehab <mchehab@kernel.org> #include <linux/i2c.h> #include <linux/slab.h> @@ -337,5 +337,5 @@ EXPORT_SYMBOL_GPL(tea5761_attach); EXPORT_SYMBOL_GPL(tea5761_autodetection); MODULE_DESCRIPTION("Philips TEA5761 FM tuner driver"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/tuners/tea5767.c b/drivers/media/tuners/tea5767.c index 2b2c064d7dc3..016d0d5ec50b 100644 --- a/drivers/media/tuners/tea5767.c +++ b/drivers/media/tuners/tea5767.c @@ -2,7 +2,7 @@ // For Philips TEA5767 FM Chip used on some TV Cards like Prolink Pixelview // I2C address is always 0xC0. // -// Copyright (c) 2005 Mauro Carvalho Chehab (mchehab@infradead.org) +// Copyright (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org> // // tea5767 autodetection thanks to Torsten Seeboth and Atsushi Nakagawa // from their contributions on DScaler. @@ -469,5 +469,5 @@ EXPORT_SYMBOL_GPL(tea5767_attach); EXPORT_SYMBOL_GPL(tea5767_autodetection); MODULE_DESCRIPTION("Philips TEA5767 FM tuner driver"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h index bb0437c36c03..50d017a4822a 100644 --- a/drivers/media/tuners/tuner-xc2028-types.h +++ b/drivers/media/tuners/tuner-xc2028-types.h @@ -5,7 +5,7 @@ * This file includes internal tipes to be used inside tuner-xc2028. * Shouldn't be included outside tuner-xc2028 * - * Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org) + * Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org> */ /* xc3028 firmware types */ diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c index fca85e08ebd7..84744e138982 100644 --- a/drivers/media/tuners/tuner-xc2028.c +++ b/drivers/media/tuners/tuner-xc2028.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // tuner-xc2028 // -// Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org) +// Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org> // // Copyright (c) 2007 Michel Ludwig (michel.ludwig@gmail.com) // - frontend interface @@ -1518,7 +1518,7 @@ EXPORT_SYMBOL(xc2028_attach); MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver"); MODULE_AUTHOR("Michel Ludwig <michel.ludwig@gmail.com>"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(XC2028_DEFAULT_FIRMWARE); MODULE_FIRMWARE(XC3028L_DEFAULT_FIRMWARE); diff --git a/drivers/media/tuners/tuner-xc2028.h b/drivers/media/tuners/tuner-xc2028.h index 03fd6d4233a4..7b58bc06e35c 100644 --- a/drivers/media/tuners/tuner-xc2028.h +++ b/drivers/media/tuners/tuner-xc2028.h @@ -2,7 +2,7 @@ * SPDX-License-Identifier: GPL-2.0 * tuner-xc2028 * - * Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org) + * Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org> */ #ifndef __TUNER_XC2028_H__ diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c index 3c2694a16ed1..d1e66b503f4d 100644 --- a/drivers/media/usb/em28xx/em28xx-camera.c +++ b/drivers/media/usb/em28xx/em28xx-camera.c @@ -2,7 +2,7 @@ // // em28xx-camera.c - driver for Empia EM25xx/27xx/28xx USB video capture devices // -// Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@kernel.org> // Copyright (C) 2013 Frank Schäfer <fschaefer.oss@googlemail.com> // // This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 9a68bdfeb967..e88e239904eb 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -5,7 +5,7 @@ // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> -// Mauro Carvalho Chehab <mchehab@infradead.org> +// Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com> // diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c index 0cefd29184f6..f70845e7d8c6 100644 --- a/drivers/media/usb/em28xx/em28xx-core.c +++ b/drivers/media/usb/em28xx/em28xx-core.c @@ -4,7 +4,7 @@ // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> -// Mauro Carvalho Chehab <mchehab@infradead.org> +// Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com> // @@ -32,7 +32,7 @@ #define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \ "Markus Rechberger <mrechberger@gmail.com>, " \ - "Mauro Carvalho Chehab <mchehab@infradead.org>, " \ + "Mauro Carvalho Chehab <mchehab@kernel.org>, " \ "Sascha Sommer <saschasommer@freenet.de>" MODULE_AUTHOR(DRIVER_AUTHOR); diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c index cb85639036f6..b778d8a1983e 100644 --- a/drivers/media/usb/em28xx/em28xx-dvb.c +++ b/drivers/media/usb/em28xx/em28xx-dvb.c @@ -2,7 +2,7 @@ // // DVB device driver for em28xx // -// (c) 2008-2011 Mauro Carvalho Chehab <mchehab@infradead.org> +// (c) 2008-2011 Mauro Carvalho Chehab <mchehab@kernel.org> // // (c) 2008 Devin Heitmueller <devin.heitmueller@gmail.com> // - Fixes for the driver to properly work with HVR-950 @@ -63,7 +63,7 @@ #include "tc90522.h" #include "qm1d1c0042.h" -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION(DRIVER_DESC " - digital TV interface"); MODULE_VERSION(EM28XX_VERSION); diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index 9151bccd859a..6458682bc6e2 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c @@ -4,7 +4,7 @@ // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> -// Mauro Carvalho Chehab <mchehab@infradead.org> +// Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // Copyright (C) 2013 Frank Schäfer <fschaefer.oss@googlemail.com> // diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c index 2dc1be00b8b8..f84a1208d5d3 100644 --- a/drivers/media/usb/em28xx/em28xx-input.c +++ b/drivers/media/usb/em28xx/em28xx-input.c @@ -4,7 +4,7 @@ // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> -// Mauro Carvalho Chehab <mchehab@infradead.org> +// Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // // This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index d70ee13cc52e..68571bf36d28 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -5,7 +5,7 @@ // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> -// Mauro Carvalho Chehab <mchehab@infradead.org> +// Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com> // @@ -44,7 +44,7 @@ #define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \ "Markus Rechberger <mrechberger@gmail.com>, " \ - "Mauro Carvalho Chehab <mchehab@infradead.org>, " \ + "Mauro Carvalho Chehab <mchehab@kernel.org>, " \ "Sascha Sommer <saschasommer@freenet.de>" static unsigned int isoc_debug; diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 6e44edd2216c..953caac025f2 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -4,7 +4,7 @@ * * Copyright (C) 2005 Markus Rechberger <mrechberger@gmail.com> * Ludovico Cavedon <cavedon@sssup.it> - * Mauro Carvalho Chehab <mchehab@infradead.org> + * Mauro Carvalho Chehab <mchehab@kernel.org> * Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com> * * Based on the em2800 driver from Sascha Sommer <saschasommer@freenet.de> diff --git a/drivers/media/usb/gspca/zc3xx-reg.h b/drivers/media/usb/gspca/zc3xx-reg.h index a1bd94e8ce52..71fda38e85e0 100644 --- a/drivers/media/usb/gspca/zc3xx-reg.h +++ b/drivers/media/usb/gspca/zc3xx-reg.h @@ -1,7 +1,7 @@ /* * zc030x registers * - * Copyright (c) 2008 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2008 Mauro Carvalho Chehab <mchehab@kernel.org> * * The register aliases used here came from this driver: * http://zc0302.sourceforge.net/zc0302.php diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/media/usb/tm6000/tm6000-cards.c index 70939e96b856..23df50aa0a4a 100644 --- a/drivers/media/usb/tm6000/tm6000-cards.c +++ b/drivers/media/usb/tm6000/tm6000-cards.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // tm6000-cards.c - driver for TM5600/TM6000/TM6010 USB video capture devices // -// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> #include <linux/init.h> #include <linux/module.h> diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c index 23a1332d98e6..d3229aa45fcb 100644 --- a/drivers/media/usb/tm6000/tm6000-core.c +++ b/drivers/media/usb/tm6000/tm6000-core.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // tm6000-core.c - driver for TM5600/TM6000/TM6010 USB video capture devices // -// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> // // Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com> // - DVB-T support diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c index c9a62bbff27a..659b63febf85 100644 --- a/drivers/media/usb/tm6000/tm6000-i2c.c +++ b/drivers/media/usb/tm6000/tm6000-i2c.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // tm6000-i2c.c - driver for TM5600/TM6000/TM6010 USB video capture devices // -// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> // // Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com> // - Fix SMBus Read Byte command diff --git a/drivers/media/usb/tm6000/tm6000-regs.h b/drivers/media/usb/tm6000/tm6000-regs.h index 21587fcf11e3..d10424673db9 100644 --- a/drivers/media/usb/tm6000/tm6000-regs.h +++ b/drivers/media/usb/tm6000/tm6000-regs.h @@ -2,7 +2,7 @@ * SPDX-License-Identifier: GPL-2.0 * tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices * - * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> */ /* diff --git a/drivers/media/usb/tm6000/tm6000-usb-isoc.h b/drivers/media/usb/tm6000/tm6000-usb-isoc.h index 5c615b0a7a46..b275dbce3a1b 100644 --- a/drivers/media/usb/tm6000/tm6000-usb-isoc.h +++ b/drivers/media/usb/tm6000/tm6000-usb-isoc.h @@ -2,7 +2,7 @@ * SPDX-License-Identifier: GPL-2.0 * tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices * - * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> */ #include <linux/videodev2.h> diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c index b2399d4266da..aa85fe31c835 100644 --- a/drivers/media/usb/tm6000/tm6000-video.c +++ b/drivers/media/usb/tm6000/tm6000-video.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // tm6000-video.c - driver for TM5600/TM6000/TM6010 USB video capture devices // -// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> +// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> // // Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com> // - Fixed module load/unload diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h index e1e45770e28d..0864ed7314eb 100644 --- a/drivers/media/usb/tm6000/tm6000.h +++ b/drivers/media/usb/tm6000/tm6000.h @@ -2,7 +2,7 @@ * SPDX-License-Identifier: GPL-2.0 * tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices * - * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> + * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org> * * Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com> * - DVB-T support diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index b3bc839ee6ad..c4f4357e9ca4 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1) - * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2) + * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2) * * Fixes: 20000516 Claudio Matsuoka <claudio@conectiva.com> * - Added procfs support @@ -1079,7 +1079,7 @@ static void __exit videodev_exit(void) subsys_initcall(videodev_init); module_exit(videodev_exit) -MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_DESCRIPTION("Device registrar for Video4Linux drivers v2"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index f48c505550e0..de5d96dbe69e 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. * * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1) - * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2) + * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2) */ #include <linux/mm.h> diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c index 2b3981842b4b..7491b337002c 100644 --- a/drivers/media/v4l2-core/videobuf-core.c +++ b/drivers/media/v4l2-core/videobuf-core.c @@ -1,11 +1,11 @@ /* * generic helper functions for handling video4linux capture buffers * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> * * Highly based on video-buf written originally by: * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> - * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org> * (c) 2006 Ted Walther and John Sokol * * This program is free software; you can redistribute it and/or modify @@ -38,7 +38,7 @@ static int debug; module_param(debug, int, 0644); MODULE_DESCRIPTION("helper module to manage video4linux buffers"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL"); #define dprintk(level, fmt, arg...) \ diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index e02353e340dd..f46132504d88 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -7,7 +7,7 @@ * Copyright (c) 2008 Magnus Damm * * Based on videobuf-vmalloc.c, - * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 87b16773f326..2e5c346f9c30 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -6,11 +6,11 @@ * into PAGE_SIZE chunks). They also assume the driver does not need * to touch the video data. * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> * * Highly based on video-buf written originally by: * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> - * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org> * (c) 2006 Ted Walther and John Sokol * * This program is free software; you can redistribute it and/or modify @@ -48,7 +48,7 @@ static int debug; module_param(debug, int, 0644); MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL"); #define dprintk(level, fmt, arg...) \ diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c index 2ff7fcc77b11..45fe781aeeec 100644 --- a/drivers/media/v4l2-core/videobuf-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf-vmalloc.c @@ -6,7 +6,7 @@ * into PAGE_SIZE chunks). They also assume the driver does not need * to touch the video data. * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> + * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -41,7 +41,7 @@ static int debug; module_param(debug, int, 0644); MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers"); -MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); +MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); MODULE_LICENSE("GPL"); #define dprintk(level, fmt, arg...) \ diff --git a/drivers/memory/emif-asm-offsets.c b/drivers/memory/emif-asm-offsets.c index 71a89d5d3efd..db8043019ec6 100644 --- a/drivers/memory/emif-asm-offsets.c +++ b/drivers/memory/emif-asm-offsets.c @@ -16,77 +16,7 @@ int main(void) { - DEFINE(EMIF_SDCFG_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_sdcfg_val)); - DEFINE(EMIF_TIMING1_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_timing1_val)); - DEFINE(EMIF_TIMING2_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_timing2_val)); - DEFINE(EMIF_TIMING3_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_timing3_val)); - DEFINE(EMIF_REF_CTRL_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_ref_ctrl_val)); - DEFINE(EMIF_ZQCFG_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_zqcfg_val)); - DEFINE(EMIF_PMCR_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_pmcr_val)); - DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val)); - DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET, - offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl)); - DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET, - offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh)); - DEFINE(EMIF_COS_CONFIG_OFFSET, - offsetof(struct emif_regs_amx3, emif_cos_config)); - DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET, - offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping)); - DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET, - offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map)); - DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET, - offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map)); - DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_ocp_config_val)); - DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET, - offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim)); - DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET, - offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw)); - DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET, - offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val)); - DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET, - offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw)); - DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET, - offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1)); - DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET, - offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals)); - DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3)); - - BLANK(); - - DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET, - offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt)); - DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET, - offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys)); - DEFINE(EMIF_PM_CONFIG_OFFSET, - offsetof(struct ti_emif_pm_data, ti_emif_sram_config)); - DEFINE(EMIF_PM_REGS_VIRT_OFFSET, - offsetof(struct ti_emif_pm_data, regs_virt)); - DEFINE(EMIF_PM_REGS_PHYS_OFFSET, - offsetof(struct ti_emif_pm_data, regs_phys)); - DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data)); - - BLANK(); - - DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET, - offsetof(struct ti_emif_pm_functions, save_context)); - DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET, - offsetof(struct ti_emif_pm_functions, restore_context)); - DEFINE(EMIF_PM_ENTER_SR_OFFSET, - offsetof(struct ti_emif_pm_functions, enter_sr)); - DEFINE(EMIF_PM_EXIT_SR_OFFSET, - offsetof(struct ti_emif_pm_functions, exit_sr)); - DEFINE(EMIF_PM_ABORT_SR_OFFSET, - offsetof(struct ti_emif_pm_functions, abort_sr)); - DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions)); + ti_emif_asm_offsets(); return 0; } diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 231f3a1e27bf..86503f60468f 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = { .cmd_per_lun = 7, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = mptscsih_host_attrs, + .no_write_same = 1, }; static int mptsas_get_linkerrors(struct sas_phy *phy) diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 8e0acd197c43..6af946d16d24 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -9,6 +9,7 @@ * published by the Free Software Foundation. */ +#include <linux/bitops.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/io-64-nonatomic-hi-lo.h> @@ -62,6 +63,17 @@ * need a custom accessor. */ +static unsigned long global_flags; +/* + * Workaround for avoiding to use RX DMAC by multiple channels. + * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use + * RX DMAC simultaneously, sometimes hundreds of bytes data are not + * stored into the system memory even if the DMAC interrupt happened. + * So, this driver then uses one RX DMAC channel only. + */ +#define SDHI_INTERNAL_DMAC_ONE_RX_ONLY 0 +#define SDHI_INTERNAL_DMAC_RX_IN_USE 1 + /* Definitions for sampling clocks */ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { { @@ -126,6 +138,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) { renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, RST_RESERVED_BITS | val); + if (host->data && host->data->flags & MMC_DATA_READ) + clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + renesas_sdhi_internal_dmac_enable_dma(host, true); } @@ -155,6 +170,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, if (data->flags & MMC_DATA_READ) { dtran_mode |= DTRAN_MODE_CH_NUM_CH1; dir = DMA_FROM_DEVICE; + if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && + test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) + goto force_pio; } else { dtran_mode |= DTRAN_MODE_CH_NUM_CH0; dir = DMA_TO_DEVICE; @@ -208,6 +226,9 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) renesas_sdhi_internal_dmac_enable_dma(host, false); dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); + if (dir == DMA_FROM_DEVICE) + clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + tmio_mmc_do_data_irq(host); out: spin_unlock_irq(&host->lock); @@ -251,18 +272,24 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { * implementation as others may use a different implementation. */ static const struct soc_device_attribute gen3_soc_whitelist[] = { - { .soc_id = "r8a7795", .revision = "ES1.*" }, - { .soc_id = "r8a7795", .revision = "ES2.0" }, - { .soc_id = "r8a7796", .revision = "ES1.0" }, - { .soc_id = "r8a77995", .revision = "ES1.0" }, - { /* sentinel */ } + { .soc_id = "r8a7795", .revision = "ES1.*", + .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, + { .soc_id = "r8a7795", .revision = "ES2.0" }, + { .soc_id = "r8a7796", .revision = "ES1.0", + .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, + { .soc_id = "r8a77995", .revision = "ES1.0" }, + { /* sentinel */ } }; static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) { - if (!soc_device_match(gen3_soc_whitelist)) + const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist); + + if (!soc) return -ENODEV; + global_flags |= (unsigned long)soc->data; + return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); } diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 787434e5589d..78c25ad35fd2 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1312,7 +1312,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev) pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); } -static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) +static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode) { struct sdhci_pci_slot *slot = sdhci_priv(host); struct pci_dev *pdev = slot->chip->pdev; @@ -1351,6 +1351,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) return 0; } +static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + + /* AMD requires custom HS200 tuning */ + if (host->timing == MMC_TIMING_MMC_HS200) + return amd_execute_tuning_hs200(host, opcode); + + /* Otherwise perform standard SDHCI tuning */ + return sdhci_execute_tuning(mmc, opcode); +} + +static int amd_probe_slot(struct sdhci_pci_slot *slot) +{ + struct mmc_host_ops *ops = &slot->host->mmc_host_ops; + + ops->execute_tuning = amd_execute_tuning; + + return 0; +} + static int amd_probe(struct sdhci_pci_chip *chip) { struct pci_dev *smbus_dev; @@ -1385,12 +1406,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = { .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, - .platform_execute_tuning = amd_execute_tuning, }; static const struct sdhci_pci_fixes sdhci_amd = { .probe = amd_probe, .ops = &amd_sdhci_pci_ops, + .probe_slot = amd_probe_slot, }; static const struct pci_device_id pci_ids[] = { diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index d4c07b85f18e..f5695be14499 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -45,6 +45,7 @@ #define I82802AB 0x00ad #define I82802AC 0x00ac #define PF38F4476 0x881c +#define M28F00AP30 0x8963 /* STMicroelectronics chips */ #define M50LPW080 0x002F #define M50FLW080A 0x0080 @@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi, extp->MinorVersion = '1'; } +static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip) +{ + /* + * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t + * Erase Supend for their small Erase Blocks(0x8000) + */ + if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30) + return 1; + return 0; +} + static inline struct cfi_pri_intelext * read_pri_intelext(struct map_info *map, __u16 adr) { @@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1)))) goto sleep; + /* Do not allow suspend iff read/write to EB address */ + if ((adr & chip->in_progress_block_mask) == + chip->in_progress_block_addr) + goto sleep; + + /* do not suspend small EBs, buggy Micron Chips */ + if (cfi_is_micron_28F00AP30(cfi, chip) && + (chip->in_progress_block_mask == ~(0x8000-1))) + goto sleep; /* Erase suspend */ - map_write(map, CMD(0xB0), adr); + map_write(map, CMD(0xB0), chip->in_progress_block_addr); /* If the flash has finished erasing, then 'erase suspend' * appears to make some (28F320) flash devices switch to * 'read' mode. Make sure that we switch to 'read status' * mode so we get the right data. --rmk */ - map_write(map, CMD(0x70), adr); + map_write(map, CMD(0x70), chip->in_progress_block_addr); chip->oldstate = FL_ERASING; chip->state = FL_ERASE_SUSPENDING; chip->erase_suspended = 1; for (;;) { - status = map_read(map, adr); + status = map_read(map, chip->in_progress_block_addr); if (map_word_andequal(map, status, status_OK, status_OK)) break; @@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad sending the 0x70 (Read Status) command to an erasing chip and expecting it to be ignored, that's what we do. */ - map_write(map, CMD(0xd0), adr); - map_write(map, CMD(0x70), adr); + map_write(map, CMD(0xd0), chip->in_progress_block_addr); + map_write(map, CMD(0x70), chip->in_progress_block_addr); chip->oldstate = FL_READY; chip->state = FL_ERASING; break; @@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, map_write(map, CMD(0xD0), adr); chip->state = FL_ERASING; chip->erase_suspended = 0; + chip->in_progress_block_addr = adr; + chip->in_progress_block_mask = ~(len - 1); ret = INVAL_CACHE_AND_WAIT(map, chip, adr, adr, len, diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 668e2cbc155b..692902df2598 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -816,9 +816,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) goto sleep; - /* We could check to see if we're trying to access the sector - * that is currently being erased. However, no user will try - * anything like that so we just wait for the timeout. */ + /* Do not allow suspend iff read/write to EB address */ + if ((adr & chip->in_progress_block_mask) == + chip->in_progress_block_addr) + goto sleep; /* Erase suspend */ /* It's harmless to issue the Erase-Suspend and Erase-Resume @@ -2267,6 +2268,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) chip->state = FL_ERASING; chip->erase_suspended = 0; chip->in_progress_block_addr = adr; + chip->in_progress_block_mask = ~(map->size - 1); INVALIDATE_CACHE_UDELAY(map, chip, adr, map->size, @@ -2356,6 +2358,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, chip->state = FL_ERASING; chip->erase_suspended = 0; chip->in_progress_block_addr = adr; + chip->in_progress_block_mask = ~(len - 1); INVALIDATE_CACHE_UDELAY(map, chip, adr, len, diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c index d0cd6f8635d7..9c9f8936b63b 100644 --- a/drivers/mtd/nand/core.c +++ b/drivers/mtd/nand/core.c @@ -162,7 +162,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo) ret = nanddev_erase(nand, &pos); if (ret) { einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); - einfo->state = MTD_ERASE_FAILED; return ret; } @@ -170,8 +169,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo) nanddev_pos_next_eraseblock(nand, &pos); } - einfo->state = MTD_ERASE_DONE; - return 0; } EXPORT_SYMBOL_GPL(nanddev_mtd_erase); diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 10e953218948..1d779a35ac8e 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2299,29 +2299,20 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, /* * The legacy "num-cs" property indicates the number of CS on the only * chip connected to the controller (legacy bindings does not support - * more than one chip). CS are only incremented one by one while the RB - * pin is always the #0. + * more than one chip). The CS and RB pins are always the #0. * * When not using legacy bindings, a couple of "reg" and "nand-rb" * properties must be filled. For each chip, expressed as a subnode, * "reg" points to the CS lines and "nand-rb" to the RB line. */ - if (pdata) { + if (pdata || nfc->caps->legacy_of_bindings) { nsels = 1; - } else if (nfc->caps->legacy_of_bindings && - !of_get_property(np, "num-cs", &nsels)) { - dev_err(dev, "missing num-cs property\n"); - return -EINVAL; - } else if (!of_get_property(np, "reg", &nsels)) { - dev_err(dev, "missing reg property\n"); - return -EINVAL; - } - - if (!pdata) - nsels /= sizeof(u32); - if (!nsels) { - dev_err(dev, "invalid reg property size\n"); - return -EINVAL; + } else { + nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32)); + if (nsels <= 0) { + dev_err(dev, "missing/invalid reg property\n"); + return -EINVAL; + } } /* Alloc the nand chip structure */ diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index f54518ffb36a..f2052fae21c7 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -645,7 +645,7 @@ static int tango_nand_probe(struct platform_device *pdev) writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE); - clk = clk_get(&pdev->dev, NULL); + clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 4b8e9183489a..5872f31eaa60 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -501,7 +501,9 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf, void __iomem *reg_base = cqspi->iobase; void __iomem *ahb_base = cqspi->ahb_base; unsigned int remaining = n_rx; + unsigned int mod_bytes = n_rx % 4; unsigned int bytes_to_read = 0; + u8 *rxbuf_end = rxbuf + n_rx; int ret = 0; writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); @@ -530,11 +532,24 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf, } while (bytes_to_read != 0) { + unsigned int word_remain = round_down(remaining, 4); + bytes_to_read *= cqspi->fifo_width; bytes_to_read = bytes_to_read > remaining ? remaining : bytes_to_read; - ioread32_rep(ahb_base, rxbuf, - DIV_ROUND_UP(bytes_to_read, 4)); + bytes_to_read = round_down(bytes_to_read, 4); + /* Read 4 byte word chunks then single bytes */ + if (bytes_to_read) { + ioread32_rep(ahb_base, rxbuf, + (bytes_to_read / 4)); + } else if (!word_remain && mod_bytes) { + unsigned int temp = ioread32(ahb_base); + + bytes_to_read = mod_bytes; + memcpy(rxbuf, &temp, min((unsigned int) + (rxbuf_end - rxbuf), + bytes_to_read)); + } rxbuf += bytes_to_read; remaining -= bytes_to_read; bytes_to_read = cqspi_get_rd_sram_level(cqspi); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b7b113018853..718e4914e3a0 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, } /* switch(bond_mode) */ #ifdef CONFIG_NET_POLL_CONTROLLER - slave_dev->npinfo = bond->dev->npinfo; - if (slave_dev->npinfo) { + if (bond->dev->npinfo) { if (slave_enable_netpoll(new_slave)) { netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); res = -EBUSY; diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index ac7694c71266..a036c490b7ce 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -285,10 +285,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, struct sk_buff_head *rxq) { u16 buf[4] = { 0 }, status, seq_id; - u64 ns, timelo, timehi; struct skb_shared_hwtstamps *shwt; + struct sk_buff_head received; + u64 ns, timelo, timehi; + unsigned long flags; int err; + /* The latched timestamp belongs to one of the received frames. */ + __skb_queue_head_init(&received); + spin_lock_irqsave(&rxq->lock, flags); + skb_queue_splice_tail_init(rxq, &received); + spin_unlock_irqrestore(&rxq->lock, flags); + mutex_lock(&chip->reg_lock); err = mv88e6xxx_port_ptp_read(chip, ps->port_id, reg, buf, ARRAY_SIZE(buf)); @@ -311,7 +319,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, /* Since the device can only handle one time stamp at a time, * we purge any extra frames from the queue. */ - for ( ; skb; skb = skb_dequeue(rxq)) { + for ( ; skb; skb = __skb_dequeue(&received)) { if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) { ns = timehi << 16 | timelo; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 7ea72ef11a55..d272dc6984ac 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1321,6 +1321,10 @@ #define MDIO_VEND2_AN_STAT 0x8002 #endif +#ifndef MDIO_VEND2_PMA_CDR_CONTROL +#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 +#endif + #ifndef MDIO_CTRL1_SPEED1G #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) #endif @@ -1369,6 +1373,10 @@ #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 #define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 +#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01 +#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00 +#define XGBE_PMA_CDR_TRACK_EN_ON 0x01 + /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 7d128be61310..b91143947ed2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata) "debugfs_create_file failed\n"); } + if (pdata->vdata->an_cdr_workaround) { + pfile = debugfs_create_bool("an_cdr_workaround", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_workaround); + if (!pfile) + netdev_err(pdata->netdev, + "debugfs_create_bool failed\n"); + + pfile = debugfs_create_bool("an_cdr_track_early", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_track_early); + if (!pfile) + netdev_err(pdata->netdev, + "debugfs_create_bool failed\n"); + } + kfree(buf); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 795e556d4a3f..441d0973957b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Call MDIO/PHY initialization routine */ + pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround; ret = pdata->phy_if.phy_init(pdata); if (ret) return ret; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 072b9f664597..1b45cd73a258 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata) xgbe_an73_set(pdata, false, false); xgbe_an73_disable_interrupts(pdata); + pdata->an_start = 0; + netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n"); } static void xgbe_an_restart(struct xgbe_prv_data *pdata) { + if (pdata->phy_if.phy_impl.an_pre) + pdata->phy_if.phy_impl.an_pre(pdata); + switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: @@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata) static void xgbe_an_disable(struct xgbe_prv_data *pdata) { + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: @@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); - if (pdata->phy_if.phy_impl.kr_training_post) - pdata->phy_if.phy_impl.kr_training_post(pdata); - netif_dbg(pdata, link, pdata->netdev, "KR training initiated\n"); + + if (pdata->phy_if.phy_impl.kr_training_post) + pdata->phy_if.phy_impl.kr_training_post(pdata); } return XGBE_AN_PAGE_RECEIVED; @@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) return XGBE_AN_NO_LINK; } - xgbe_an73_disable(pdata); + xgbe_an_disable(pdata); xgbe_switch_mode(pdata); - xgbe_an73_restart(pdata); + xgbe_an_restart(pdata); return XGBE_AN_INCOMPAT_LINK; } @@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata) pdata->an_result = pdata->an_state; pdata->an_state = XGBE_AN_READY; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } @@ -903,6 +914,9 @@ again: pdata->kx_state = XGBE_RX_BPA; pdata->an_start = 0; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index eb23f9ba1a9a..82d1f416ee2a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = { .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, }; static const struct xgbe_version_data xgbe_v2b = { @@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = { .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, }; static const struct pci_device_id xgbe_pci_table[] = { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 3304a291aa96..aac884314000 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -147,6 +147,14 @@ /* Rate-change complete wait/retry count */ #define XGBE_RATECHANGE_COUNT 500 +/* CDR delay values for KR support (in usec) */ +#define XGBE_CDR_DELAY_INIT 10000 +#define XGBE_CDR_DELAY_INC 10000 +#define XGBE_CDR_DELAY_MAX 100000 + +/* RRC frequency during link status check */ +#define XGBE_RRC_FREQUENCY 10 + enum xgbe_port_mode { XGBE_PORT_MODE_RSVD = 0, XGBE_PORT_MODE_BACKPLANE, @@ -245,6 +253,10 @@ enum xgbe_sfp_speed { #define XGBE_SFP_BASE_VENDOR_SN 4 #define XGBE_SFP_BASE_VENDOR_SN_LEN 16 +#define XGBE_SFP_EXTD_OPT1 1 +#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1) +#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3) + #define XGBE_SFP_EXTD_DIAG 28 #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) @@ -324,6 +336,7 @@ struct xgbe_phy_data { unsigned int sfp_gpio_address; unsigned int sfp_gpio_mask; + unsigned int sfp_gpio_inputs; unsigned int sfp_gpio_rx_los; unsigned int sfp_gpio_tx_fault; unsigned int sfp_gpio_mod_absent; @@ -355,6 +368,10 @@ struct xgbe_phy_data { unsigned int redrv_addr; unsigned int redrv_lane; unsigned int redrv_model; + + /* KR AN support */ + unsigned int phy_cdr_notrack; + unsigned int phy_cdr_delay; }; /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */ @@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata) phy_data->sfp_phy_avail = 1; } +static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data) +{ + u8 *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS)) + return false; + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los)) + return true; + + return false; +} + +static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data) +{ + u8 *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT)) + return false; + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault)) + return true; + + return false; +} + +static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data) +{ + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent)) + return true; + + return false; +} + static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) return; + /* Update transceiver signals (eeprom extd/options) */ + phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); + phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); + if (xgbe_phy_sfp_parse_quirks(pdata)) return; @@ -1184,7 +1248,6 @@ put: static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int gpio_input; u8 gpio_reg, gpio_ports[2]; int ret; @@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) return; } - gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; - - if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) { - /* No GPIO, just assume the module is present for now */ - phy_data->sfp_mod_absent = 0; - } else { - if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent))) - phy_data->sfp_mod_absent = 0; - } - - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) && - (gpio_input & (1 << phy_data->sfp_gpio_rx_los))) - phy_data->sfp_rx_los = 1; + phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0]; - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) && - (gpio_input & (1 << phy_data->sfp_gpio_tx_fault))) - phy_data->sfp_tx_fault = 1; + phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data); } static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) @@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) return 1; /* No link, attempt a receiver reset cycle */ - if (phy_data->rrc_count++) { + if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { phy_data->rrc_count = 0; xgbe_phy_rrc(pdata); } @@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) return true; } +static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->debugfs_an_cdr_workaround) + return; + + if (!phy_data->phy_cdr_notrack) + return; + + usleep_range(phy_data->phy_cdr_delay, + phy_data->phy_cdr_delay + 500); + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, + XGBE_PMA_CDR_TRACK_EN_ON); + + phy_data->phy_cdr_notrack = 0; +} + +static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->debugfs_an_cdr_workaround) + return; + + if (phy_data->phy_cdr_notrack) + return; + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, + XGBE_PMA_CDR_TRACK_EN_OFF); + + xgbe_phy_rrc(pdata); + + phy_data->phy_cdr_notrack = 1; +} + +static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) +{ + if (!pdata->debugfs_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata) +{ + if (pdata->debugfs_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void xgbe_phy_an_post(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_track(pdata); + + switch (pdata->an_result) { + case XGBE_AN_READY: + case XGBE_AN_COMPLETE: + break; + default: + if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX) + phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC; + else + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + break; + } + break; + default: + break; + } +} + +static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_notrack(pdata); + break; + default: + break; + } +} + static void xgbe_phy_stop(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) xgbe_phy_sfp_reset(phy_data); xgbe_phy_sfp_mod_absent(pdata); + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + /* Power off the PHY */ xgbe_phy_power_off(pdata); @@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata) /* Start in highest supported mode */ xgbe_phy_set_mode(pdata, phy_data->start_mode); + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + /* After starting the I2C controller, we can check for an SFP */ switch (phy_data->port_mode) { case XGBE_PORT_MODE_SFP: @@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) } } + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + /* Register for driving external PHYs */ mii = devm_mdiobus_alloc(pdata->dev); if (!mii) { @@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if) phy_impl->an_advertising = xgbe_phy_an_advertising; phy_impl->an_outcome = xgbe_phy_an_outcome; + + phy_impl->an_pre = xgbe_phy_an_pre; + phy_impl->an_post = xgbe_phy_an_post; + + phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; + phy_impl->kr_training_post = xgbe_phy_kr_training_post; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ad102c8bac7b..95d4b56448c6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -833,6 +833,7 @@ struct xgbe_hw_if { /* This structure represents implementation specific routines for an * implementation of a PHY. All routines are required unless noted below. * Optional routines: + * an_pre, an_post * kr_training_pre, kr_training_post */ struct xgbe_phy_impl_if { @@ -875,6 +876,10 @@ struct xgbe_phy_impl_if { /* Process results of auto-negotiation */ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); + /* Pre/Post auto-negotiation support */ + void (*an_pre)(struct xgbe_prv_data *); + void (*an_post)(struct xgbe_prv_data *); + /* Pre/Post KR training enablement support */ void (*kr_training_pre)(struct xgbe_prv_data *); void (*kr_training_post)(struct xgbe_prv_data *); @@ -989,6 +994,7 @@ struct xgbe_version_data { unsigned int irq_reissue_support; unsigned int tx_desc_prefetch; unsigned int rx_desc_prefetch; + unsigned int an_cdr_workaround; }; struct xgbe_vxlan_data { @@ -1257,6 +1263,9 @@ struct xgbe_prv_data { unsigned int debugfs_xprop_reg; unsigned int debugfs_xi2c_reg; + + bool debugfs_an_cdr_workaround; + bool debugfs_an_cdr_track_early; }; /* Function prototypes*/ diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f9a3c1a76d5d..f33b25fbca63 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2144,14 +2144,21 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { .ndo_select_queue = bcm_sysport_select_queue, }; -static int bcm_sysport_map_queues(struct net_device *dev, +static int bcm_sysport_map_queues(struct notifier_block *nb, struct dsa_notifier_register_info *info) { - struct bcm_sysport_priv *priv = netdev_priv(dev); struct bcm_sysport_tx_ring *ring; + struct bcm_sysport_priv *priv; struct net_device *slave_dev; unsigned int num_tx_queues; unsigned int q, start, port; + struct net_device *dev; + + priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); + if (priv->netdev != info->master) + return 0; + + dev = info->master; /* We can't be setting up queue inspection for non directly attached * switches @@ -2174,11 +2181,12 @@ static int bcm_sysport_map_queues(struct net_device *dev, if (priv->is_lite) netif_set_real_num_tx_queues(slave_dev, slave_dev->num_tx_queues / 2); + num_tx_queues = slave_dev->real_num_tx_queues; if (priv->per_port_num_tx_queues && priv->per_port_num_tx_queues != num_tx_queues) - netdev_warn(slave_dev, "asymetric number of per-port queues\n"); + netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); priv->per_port_num_tx_queues = num_tx_queues; @@ -2201,7 +2209,7 @@ static int bcm_sysport_map_queues(struct net_device *dev, return 0; } -static int bcm_sysport_dsa_notifier(struct notifier_block *unused, +static int bcm_sysport_dsa_notifier(struct notifier_block *nb, unsigned long event, void *ptr) { struct dsa_notifier_register_info *info; @@ -2211,7 +2219,7 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused, info = ptr; - return notifier_from_errno(bcm_sysport_map_queues(info->master, info)); + return notifier_from_errno(bcm_sysport_map_queues(nb, info)); } #define REV_FMT "v%2x.%02x" diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1f622ca2a64f..8ba14ae00e8f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1927,22 +1927,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) return retval; } -static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen) +static void bnxt_get_pkgver(struct net_device *dev) { + struct bnxt *bp = netdev_priv(dev); u16 index = 0; - u32 datalen; + char *pkgver; + u32 pkglen; + u8 *pkgbuf; + int len; if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, - &index, NULL, &datalen) != 0) - return NULL; + &index, NULL, &pkglen) != 0) + return; - memset(buf, 0, buflen); - if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0) - return NULL; + pkgbuf = kzalloc(pkglen, GFP_KERNEL); + if (!pkgbuf) { + dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", + pkglen); + return; + } + + if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) + goto err; - return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf, - datalen); + pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, + pkglen); + if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { + len = strlen(bp->fw_ver_str); + snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, + "/pkg %s", pkgver); + } +err: + kfree(pkgbuf); } static int bnxt_get_eeprom(struct net_device *dev, @@ -2615,22 +2632,10 @@ void bnxt_ethtool_init(struct bnxt *bp) struct hwrm_selftest_qlist_input req = {0}; struct bnxt_test_info *test_info; struct net_device *dev = bp->dev; - char *pkglog; int i, rc; - pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); - if (pkglog) { - char *pkgver; - int len; + bnxt_get_pkgver(dev); - pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); - if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { - len = strlen(bp->fw_ver_str); - snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, - "/pkg %s", pkgver); - } - kfree(pkglog); - } if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) return; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 73f2249555b5..83444811d3c6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type { #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) #define BNX_DIR_ATTR_PROP_STREAM (1 << 1) -#define BNX_PKG_LOG_MAX_LENGTH 4096 - enum bnxnvm_pkglog_field_index { BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index 4df282ed22c7..0beee2cc2ddd 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -61,7 +61,7 @@ static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = { static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { "tx-single-collision", "tx-multiple-collision", - "tx-late-collsion", + "tx-late-collision", "tx-aborted-frames", "tx-lost-frames", "tx-carrier-sense-errors", diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 3e62692af011..fa5b30f547f6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -87,7 +87,7 @@ do { \ #define HNAE_AE_REGISTER 0x1 -#define RCB_RING_NAME_LEN 16 +#define RCB_RING_NAME_LEN (IFNAMSIZ + 4) #define HNAE_LOWEST_LATENCY_COAL_PARAM 30 #define HNAE_LOW_LATENCY_COAL_PARAM 80 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index aad5658d79d5..6e8d6a6f6aaf 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -794,46 +794,61 @@ static int ibmvnic_login(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); unsigned long timeout = msecs_to_jiffies(30000); - struct device *dev = &adapter->vdev->dev; + int retry_count = 0; int rc; do { - if (adapter->renegotiate) { - adapter->renegotiate = false; + if (retry_count > IBMVNIC_MAX_QUEUES) { + netdev_warn(netdev, "Login attempts exceeded\n"); + return -1; + } + + adapter->init_done_rc = 0; + reinit_completion(&adapter->init_done); + rc = send_login(adapter); + if (rc) { + netdev_warn(netdev, "Unable to login\n"); + return rc; + } + + if (!wait_for_completion_timeout(&adapter->init_done, + timeout)) { + netdev_warn(netdev, "Login timed out\n"); + return -1; + } + + if (adapter->init_done_rc == PARTIALSUCCESS) { + retry_count++; release_sub_crqs(adapter, 1); + adapter->init_done_rc = 0; reinit_completion(&adapter->init_done); send_cap_queries(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { - dev_err(dev, "Capabilities query timeout\n"); + netdev_warn(netdev, + "Capabilities query timed out\n"); return -1; } + rc = init_sub_crqs(adapter); if (rc) { - dev_err(dev, - "Initialization of SCRQ's failed\n"); + netdev_warn(netdev, + "SCRQ initialization failed\n"); return -1; } + rc = init_sub_crq_irqs(adapter); if (rc) { - dev_err(dev, - "Initialization of SCRQ's irqs failed\n"); + netdev_warn(netdev, + "SCRQ irq initialization failed\n"); return -1; } - } - - reinit_completion(&adapter->init_done); - rc = send_login(adapter); - if (rc) { - dev_err(dev, "Unable to attempt device login\n"); - return rc; - } else if (!wait_for_completion_timeout(&adapter->init_done, - timeout)) { - dev_err(dev, "Login timeout\n"); + } else if (adapter->init_done_rc) { + netdev_warn(netdev, "Adapter login failed\n"); return -1; } - } while (adapter->renegotiate); + } while (adapter->init_done_rc == PARTIALSUCCESS); /* handle pending MAC address changes after successful login */ if (adapter->mac_change_pending) { @@ -1034,16 +1049,14 @@ static int __ibmvnic_open(struct net_device *netdev) netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); if (prev_state == VNIC_CLOSED) enable_irq(adapter->rx_scrq[i]->irq); - else - enable_scrq_irq(adapter, adapter->rx_scrq[i]); + enable_scrq_irq(adapter, adapter->rx_scrq[i]); } for (i = 0; i < adapter->req_tx_queues; i++) { netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); if (prev_state == VNIC_CLOSED) enable_irq(adapter->tx_scrq[i]->irq); - else - enable_scrq_irq(adapter, adapter->tx_scrq[i]); + enable_scrq_irq(adapter, adapter->tx_scrq[i]); } rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); @@ -1115,7 +1128,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter) if (!adapter->rx_pool) return; - rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + rx_scrqs = adapter->num_active_rx_pools; rx_entries = adapter->req_rx_add_entries_per_subcrq; /* Free any remaining skbs in the rx buffer pools */ @@ -1164,7 +1177,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter) if (!adapter->tx_pool || !adapter->tso_pool) return; - tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + tx_scrqs = adapter->num_active_tx_pools; /* Free any remaining skbs in the tx buffer pools */ for (i = 0; i < tx_scrqs; i++) { @@ -1184,6 +1197,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) if (adapter->tx_scrq[i]->irq) { netdev_dbg(netdev, "Disabling tx_scrq[%d] irq\n", i); + disable_scrq_irq(adapter, adapter->tx_scrq[i]); disable_irq(adapter->tx_scrq[i]->irq); } } @@ -1193,6 +1207,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) if (adapter->rx_scrq[i]->irq) { netdev_dbg(netdev, "Disabling rx_scrq[%d] irq\n", i); + disable_scrq_irq(adapter, adapter->rx_scrq[i]); disable_irq(adapter->rx_scrq[i]->irq); } } @@ -1828,7 +1843,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]); - if (adapter->reset_reason != VNIC_RESET_FAILOVER) + if (adapter->reset_reason != VNIC_RESET_FAILOVER && + adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) netdev_notify_peers(netdev); netif_carrier_on(netdev); @@ -2601,12 +2617,19 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, { struct device *dev = &adapter->vdev->dev; unsigned long rc; + u64 val; if (scrq->hw_irq > 0x100000000ULL) { dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); return 1; } + val = (0xff000000) | scrq->hw_irq; + rc = plpar_hcall_norets(H_EOI, val); + if (rc) + dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", + val, rc); + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); if (rc) @@ -3170,7 +3193,7 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter) struct vnic_login_client_data { u8 type; __be16 len; - char name; + char name[]; } __packed; static int vnic_client_data_len(struct ibmvnic_adapter *adapter) @@ -3199,21 +3222,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter, vlcd->type = 1; len = strlen(os_name) + 1; vlcd->len = cpu_to_be16(len); - strncpy(&vlcd->name, os_name, len); - vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); + strncpy(vlcd->name, os_name, len); + vlcd = (struct vnic_login_client_data *)(vlcd->name + len); /* Type 2 - LPAR name */ vlcd->type = 2; len = strlen(utsname()->nodename) + 1; vlcd->len = cpu_to_be16(len); - strncpy(&vlcd->name, utsname()->nodename, len); - vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); + strncpy(vlcd->name, utsname()->nodename, len); + vlcd = (struct vnic_login_client_data *)(vlcd->name + len); /* Type 3 - device name */ vlcd->type = 3; len = strlen(adapter->netdev->name) + 1; vlcd->len = cpu_to_be16(len); - strncpy(&vlcd->name, adapter->netdev->name, len); + strncpy(vlcd->name, adapter->netdev->name, len); } static int send_login(struct ibmvnic_adapter *adapter) @@ -3942,7 +3965,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, * to resend the login buffer with fewer queues requested. */ if (login_rsp_crq->generic.rc.code) { - adapter->renegotiate = true; + adapter->init_done_rc = login_rsp_crq->generic.rc.code; complete(&adapter->init_done); return 0; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 99c0b58c2c39..22391e8805f6 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1035,7 +1035,6 @@ struct ibmvnic_adapter { struct ibmvnic_sub_crq_queue **tx_scrq; struct ibmvnic_sub_crq_queue **rx_scrq; - bool renegotiate; /* rx structs */ struct napi_struct *napi; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 5b13ca1bd85f..7dc5f045e969 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act { #define ICE_LG_ACT_MIRROR_VSI_ID_S 3 #define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) - /* Action type = 5 - Large Action */ + /* Action type = 5 - Generic Value */ #define ICE_LG_ACT_GENERIC 0x5 #define ICE_LG_ACT_GENERIC_VALUE_S 3 #define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 21977ec984c4..71d032cc5fa7 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_aq_desc desc; enum ice_status status; u16 flags; + u8 i; cmd = &desc.params.mac_read; @@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, return ICE_ERR_CFG; } - ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr); - ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr); + /* A single port can report up to two (LAN and WoL) addresses */ + for (i = 0; i < cmd->num_addr; i++) + if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { + ether_addr_copy(hw->port_info->mac.lan_addr, + resp[i].mac_addr); + ether_addr_copy(hw->port_info->mac.perm_addr, + resp[i].mac_addr); + break; + } + return 0; } @@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_sched; - /* Get port MAC information */ - mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); - mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); + /* Get MAC information */ + /* A single port can report up to two (LAN and WoL) addresses */ + mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, + sizeof(struct ice_aqc_manage_mac_read_resp), + GFP_KERNEL); + mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); if (!mac_buf) { status = ICE_ERR_NO_MEMORY; diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 1b9e2ef48a9d..499904874b3f 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -121,8 +121,6 @@ #define PFINT_FW_CTL_CAUSE_ENA_S 30 #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) #define PFINT_OICR 0x0016CA00 -#define PFINT_OICR_INTEVENT_S 0 -#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S) #define PFINT_OICR_HLP_RDY_S 14 #define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) #define PFINT_OICR_CPM_RDY_S 15 diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 210b7910f1cd..5299caf55a7f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); - if (!(oicr & PFINT_OICR_INTEVENT_M)) - goto ena_intr; - if (oicr & PFINT_OICR_GRST_M) { u32 reset; /* we have a reset warning */ @@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } ret = IRQ_HANDLED; -ena_intr: /* re-enable interrupt causes that are not handled during this pass */ wr32(hw, PFINT_OICR_ENA, ena_mask); if (!test_bit(__ICE_DOWN, pf->state)) { diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index f16ff3e4a840..2e6c1d92cc88 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, u16 num_added = 0; u32 temp; + *num_nodes_added = 0; + if (!num_nodes) return status; if (!parent || layer < hw->sw_entry_point_layer) return ICE_ERR_PARAM; - *num_nodes_added = 0; - /* max children per node per layer */ max_child_nodes = le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index c1c0bc30a16d..cce7ada89255 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); - if (enable) { + if (enable || queue == 0) { + /* i210 does not allow the queue 0 to be in the Strict + * Priority mode while the Qav mode is enabled, so, + * instead of disabling strict priority mode, we give + * queue 0 the maximum of credits possible. + * + * See section 8.12.19 of the i210 datasheet, "Note: + * Queue0 QueueMode must be set to 1b when + * TransmitMode is set to Qav." + */ + if (queue == 0 && !enable) { + /* max "linkspeed" idleslope in kbps */ + idleslope = 1000000; + hicredit = ETH_FRAME_LEN; + } + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 3d9033f26eff..e3d04f226d57 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) if (!err) continue; hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); - break; + goto err_setup_tx; } return 0; diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 54a038943c06..6f410235987c 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -663,7 +663,7 @@ enum mvpp2_tag_type { #define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) #define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) -#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1) +#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) @@ -916,6 +916,8 @@ static struct { #define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) +#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) + /* Definitions */ /* Shared Packet Processor resources */ @@ -940,6 +942,7 @@ struct mvpp2 { struct clk *pp_clk; struct clk *gop_clk; struct clk *mg_clk; + struct clk *mg_core_clk; struct clk *axi_clk; /* List of pointers to port structures */ @@ -1429,7 +1432,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, if (port->priv->hw_version == MVPP21) return tx_desc->pp21.buf_dma_addr; else - return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); + return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; } static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, @@ -1447,7 +1450,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, } else { u64 val = (u64)addr; - tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); + tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; tx_desc->pp22.buf_dma_addr_ptp |= val; tx_desc->pp22.packet_offset = offset; } @@ -1507,7 +1510,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, if (port->priv->hw_version == MVPP21) return rx_desc->pp21.buf_dma_addr; else - return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); + return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; } static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, @@ -1516,7 +1519,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, if (port->priv->hw_version == MVPP21) return rx_desc->pp21.buf_cookie; else - return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); + return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; } static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, @@ -8766,18 +8769,27 @@ static int mvpp2_probe(struct platform_device *pdev) err = clk_prepare_enable(priv->mg_clk); if (err < 0) goto err_gop_clk; + + priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); + if (IS_ERR(priv->mg_core_clk)) { + priv->mg_core_clk = NULL; + } else { + err = clk_prepare_enable(priv->mg_core_clk); + if (err < 0) + goto err_mg_clk; + } } priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); if (IS_ERR(priv->axi_clk)) { err = PTR_ERR(priv->axi_clk); if (err == -EPROBE_DEFER) - goto err_gop_clk; + goto err_mg_core_clk; priv->axi_clk = NULL; } else { err = clk_prepare_enable(priv->axi_clk); if (err < 0) - goto err_gop_clk; + goto err_mg_core_clk; } /* Get system's tclk rate */ @@ -8789,9 +8801,9 @@ static int mvpp2_probe(struct platform_device *pdev) } if (priv->hw_version == MVPP22) { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); if (err) - goto err_mg_clk; + goto err_axi_clk; /* Sadly, the BM pools all share the same register to * store the high 32 bits of their address. So they * must all have the same high 32 bits, which forces @@ -8799,14 +8811,14 @@ static int mvpp2_probe(struct platform_device *pdev) */ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) - goto err_mg_clk; + goto err_axi_clk; } /* Initialize network controller */ err = mvpp2_init(pdev, priv); if (err < 0) { dev_err(&pdev->dev, "failed to initialize controller\n"); - goto err_mg_clk; + goto err_axi_clk; } /* Initialize ports */ @@ -8819,7 +8831,7 @@ static int mvpp2_probe(struct platform_device *pdev) if (priv->port_count == 0) { dev_err(&pdev->dev, "no ports enabled\n"); err = -ENODEV; - goto err_mg_clk; + goto err_axi_clk; } /* Statistics must be gathered regularly because some of them (like @@ -8847,8 +8859,13 @@ err_port_probe: mvpp2_port_remove(priv->port_list[i]); i++; } -err_mg_clk: +err_axi_clk: clk_disable_unprepare(priv->axi_clk); + +err_mg_core_clk: + if (priv->hw_version == MVPP22) + clk_disable_unprepare(priv->mg_core_clk); +err_mg_clk: if (priv->hw_version == MVPP22) clk_disable_unprepare(priv->mg_clk); err_gop_clk: @@ -8895,6 +8912,7 @@ static int mvpp2_remove(struct platform_device *pdev) return 0; clk_disable_unprepare(priv->axi_clk); + clk_disable_unprepare(priv->mg_core_clk); clk_disable_unprepare(priv->mg_clk); clk_disable_unprepare(priv->pp_clk); clk_disable_unprepare(priv->gop_clk); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index bfef69235d71..211578ffc70d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1317,7 +1317,7 @@ static int mlx4_mf_unbond(struct mlx4_dev *dev) ret = mlx4_unbond_fs_rules(dev); if (ret) - mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); + mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret); ret1 = mlx4_unbond_mac_table(dev); if (ret1) { mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 3d46ef48d5b8..c641d5656b2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1007,12 +1007,14 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv) mutex_lock(&priv->state_lock); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto out; - new_channels.params = priv->channels.params; mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params); + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + goto out; + } + /* Skip if tx_min_inline is the same */ if (new_channels.params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index d8f68e4d1018..876c3e4c6193 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -877,13 +877,14 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { }; static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) + struct mlx5e_params *params, u16 mtu) { u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; params->hard_mtu = MLX5E_ETH_HARD_MTU; + params->sw_mtu = mtu; params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; @@ -931,7 +932,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev, priv->channels.params.num_channels = profile->max_nch(mdev); - mlx5e_build_rep_params(mdev, &priv->channels.params); + mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu); mlx5e_build_rep_netdev(netdev); mlx5e_timestamp_init(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 707976482c09..027f54ac1ca2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -290,7 +290,7 @@ static int mlx5e_test_loopback(struct mlx5e_priv *priv) if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { netdev_err(priv->netdev, - "\tCan't perform loobpack test while device is down\n"); + "\tCan't perform loopback test while device is down\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 4197001f9801..3c534fc43400 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1864,7 +1864,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, } ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); - if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + if (modify_ip_header && ip_proto != IPPROTO_TCP && + ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { pr_info("can't offload re-write of ip proto %d\n", ip_proto); return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 20297108528a..5532aa3675c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -255,7 +255,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, dma_addr = dma_map_single(sq->pdev, skb_data, headlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) - return -ENOMEM; + goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; @@ -273,7 +273,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) - return -ENOMEM; + goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; @@ -285,6 +285,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, } return num_dma; + +dma_unmap_wqe_err: + mlx5e_dma_unmap_wqe_err(sq, num_dma); + return -ENOMEM; } static inline void @@ -380,17 +384,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, (struct mlx5_wqe_data_seg *)cseg + ds_cnt); if (unlikely(num_dma < 0)) - goto dma_unmap_wqe_err; + goto err_drop; mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, num_bytes, num_dma, wi, cseg); return NETDEV_TX_OK; -dma_unmap_wqe_err: +err_drop: sq->stats.dropped++; - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); - dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -645,17 +647,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, (struct mlx5_wqe_data_seg *)cseg + ds_cnt); if (unlikely(num_dma < 0)) - goto dma_unmap_wqe_err; + goto err_drop; mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, num_bytes, num_dma, wi, cseg); return NETDEV_TX_OK; -dma_unmap_wqe_err: +err_drop: sq->stats.dropped++; - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); - dev_kfree_skb_any(skb); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index de51e7c39bc8..c39c1692e674 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -187,6 +187,7 @@ static void del_sw_ns(struct fs_node *node); static void del_sw_hw_rule(struct fs_node *node); static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, struct mlx5_flow_destination *d2); +static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns); static struct mlx5_flow_rule * find_flow_rule(struct fs_fte *fte, struct mlx5_flow_destination *dest); @@ -481,7 +482,8 @@ static void del_sw_hw_rule(struct fs_node *node) if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && --fte->dests_size) { - modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); + modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | + BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; update_fte = true; goto out; @@ -2351,23 +2353,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) static int init_root_ns(struct mlx5_flow_steering *steering) { + int err; + steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); if (!steering->root_ns) - goto cleanup; + return -ENOMEM; - if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) - goto cleanup; + err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node); + if (err) + goto out_err; set_prio_attrs(steering->root_ns); - - if (create_anchor_flow_table(steering)) - goto cleanup; + err = create_anchor_flow_table(steering); + if (err) + goto out_err; return 0; -cleanup: - mlx5_cleanup_fs(steering->dev); - return -ENOMEM; +out_err: + cleanup_root_ns(steering->root_ns); + steering->root_ns = NULL; + return err; } static void clean_tree(struct fs_node *node) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index c11c9a635866..4ed01182a82c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1718,13 +1718,11 @@ __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *dev = mlxsw_sp_port->dev; int err; - if (bridge_port->bridge_device->multicast_enabled) { - if (bridge_port->bridge_device->multicast_enabled) { - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, - false); - if (err) - netdev_err(dev, "Unable to remove port from SMID\n"); - } + if (bridge_port->bridge_device->multicast_enabled && + !bridge_port->mrouter) { + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); + if (err) + netdev_err(dev, "Unable to remove port from SMID\n"); } err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index b3567a596fc1..80df9a5d4217 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -183,17 +183,21 @@ static int nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, const struct tc_action *action, struct nfp_fl_pre_tunnel *pre_tun, - enum nfp_flower_tun_type tun_type) + enum nfp_flower_tun_type tun_type, + struct net_device *netdev) { size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); u32 tmp_set_ip_tun_type_index = 0; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; + struct net *net; if (ip_tun->options_len) return -EOPNOTSUPP; + net = dev_net(netdev); + set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; @@ -204,6 +208,7 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); set_tun->tun_id = ip_tun->key.tun_id; + set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; /* Complete pre_tunnel action. */ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; @@ -511,7 +516,8 @@ nfp_flower_loop_action(const struct tc_action *a, *a_len += sizeof(struct nfp_fl_pre_tunnel); set_tun = (void *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type); + err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type, + netdev); if (err) return err; *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun); diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 3735c09d2112..577659f332e4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -258,9 +258,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: nfp_tunnel_keep_alive(app, skb); break; - case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH: - /* Acks from the NFP that the route is added - ignore. */ - break; default: nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", type); @@ -275,18 +272,49 @@ out: void nfp_flower_cmsg_process_rx(struct work_struct *work) { + struct sk_buff_head cmsg_joined; struct nfp_flower_priv *priv; struct sk_buff *skb; priv = container_of(work, struct nfp_flower_priv, cmsg_work); + skb_queue_head_init(&cmsg_joined); + + spin_lock_bh(&priv->cmsg_skbs_high.lock); + skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined); + spin_unlock_bh(&priv->cmsg_skbs_high.lock); - while ((skb = skb_dequeue(&priv->cmsg_skbs))) + spin_lock_bh(&priv->cmsg_skbs_low.lock); + skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined); + spin_unlock_bh(&priv->cmsg_skbs_low.lock); + + while ((skb = __skb_dequeue(&cmsg_joined))) nfp_flower_cmsg_process_one_rx(priv->app, skb); } -void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) +static void +nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type) { struct nfp_flower_priv *priv = app->priv; + struct sk_buff_head *skb_head; + + if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || + type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) + skb_head = &priv->cmsg_skbs_high; + else + skb_head = &priv->cmsg_skbs_low; + + if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) { + nfp_flower_cmsg_warn(app, "Dropping queued control messages\n"); + dev_kfree_skb_any(skb); + return; + } + + skb_queue_tail(skb_head, skb); + schedule_work(&priv->cmsg_work); +} + +void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) +{ struct nfp_flower_cmsg_hdr *cmsg_hdr; cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); @@ -306,8 +334,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) nfp_flower_process_mtu_ack(app, skb)) { /* Handle MTU acks outside wq to prevent RTNL conflict. */ dev_consume_skb_any(skb); + } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { + /* Acks from the NFP that the route is added - ignore. */ + dev_consume_skb_any(skb); } else { - skb_queue_tail(&priv->cmsg_skbs, skb); - schedule_work(&priv->cmsg_work); + nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 96bc0e33980c..bee4367a2c38 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -108,6 +108,8 @@ #define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) #define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) +#define NFP_FLOWER_WORKQ_MAX_SKBS 30000 + #define nfp_flower_cmsg_warn(app, fmt, args...) \ do { \ if (net_ratelimit()) \ @@ -188,7 +190,10 @@ struct nfp_fl_set_ipv4_udp_tun { __be16 reserved; __be64 tun_id __packed; __be32 tun_type_index; - __be32 extra[3]; + __be16 reserved2; + u8 ttl; + u8 reserved3; + __be32 extra[2]; }; /* Metadata with L2 (1W/4B) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 6357e0720f43..a997e34bcec2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -360,7 +360,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) } SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); - nfp_net_get_mac_addr(app->pf, port); + nfp_net_get_mac_addr(app->pf, repr, port); cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); err = nfp_repr_init(app, repr, @@ -519,7 +519,8 @@ static int nfp_flower_init(struct nfp_app *app) app->priv = app_priv; app_priv->app = app; - skb_queue_head_init(&app_priv->cmsg_skbs); + skb_queue_head_init(&app_priv->cmsg_skbs_high); + skb_queue_head_init(&app_priv->cmsg_skbs_low); INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); init_waitqueue_head(&app_priv->reify_wait_queue); @@ -549,7 +550,8 @@ static void nfp_flower_clean(struct nfp_app *app) { struct nfp_flower_priv *app_priv = app->priv; - skb_queue_purge(&app_priv->cmsg_skbs); + skb_queue_purge(&app_priv->cmsg_skbs_high); + skb_queue_purge(&app_priv->cmsg_skbs_low); flush_work(&app_priv->cmsg_work); nfp_flower_metadata_cleanup(app); diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index e030b3ce4510..c67e1b54c614 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -107,7 +107,10 @@ struct nfp_mtu_conf { * @mask_table: Hash table used to store masks * @flow_table: Hash table used to store flower rules * @cmsg_work: Workqueue for control messages processing - * @cmsg_skbs: List of skbs for control message processing + * @cmsg_skbs_high: List of higher priority skbs for control message + * processing + * @cmsg_skbs_low: List of lower priority skbs for control message + * processing * @nfp_mac_off_list: List of MAC addresses to offload * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs * @nfp_ipv4_off_list: List of IPv4 addresses to offload @@ -136,7 +139,8 @@ struct nfp_flower_priv { DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); struct work_struct cmsg_work; - struct sk_buff_head cmsg_skbs; + struct sk_buff_head cmsg_skbs_high; + struct sk_buff_head cmsg_skbs_low; struct list_head nfp_mac_off_list; struct list_head nfp_mac_index_list; struct list_head nfp_ipv4_off_list; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c index 2a2f2fbc8850..b9618c37403f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c @@ -69,7 +69,7 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, if (err) return err < 0 ? err : 0; - nfp_net_get_mac_addr(app->pf, nn->port); + nfp_net_get_mac_addr(app->pf, nn->dp.netdev, nn->port); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index add46e28212b..42211083b51f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -171,7 +171,9 @@ void nfp_net_pci_remove(struct nfp_pf *pf); int nfp_hwmon_register(struct nfp_pf *pf); void nfp_hwmon_unregister(struct nfp_pf *pf); -void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port); +void +nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, + struct nfp_port *port); bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 15fa47f622aa..45cd2092e498 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -67,23 +67,26 @@ /** * nfp_net_get_mac_addr() - Get the MAC address. * @pf: NFP PF handle + * @netdev: net_device to set MAC address on * @port: NFP port structure * * First try to get the MAC address from NSP ETH table. If that * fails generate a random address. */ -void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port) +void +nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, + struct nfp_port *port) { struct nfp_eth_table_port *eth_port; eth_port = __nfp_port_get_eth_port(port); if (!eth_port) { - eth_hw_addr_random(port->netdev); + eth_hw_addr_random(netdev); return; } - ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr); - ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr); + ether_addr_copy(netdev->dev_addr, eth_port->mac_addr); + ether_addr_copy(netdev->perm_addr, eth_port->mac_addr); } static struct nfp_eth_table_port * @@ -511,16 +514,18 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) return PTR_ERR(mem); } - min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1); - pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats", - "net.macstats", min_size, - &pf->mac_stats_bar); - if (IS_ERR(pf->mac_stats_mem)) { - if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) { - err = PTR_ERR(pf->mac_stats_mem); - goto err_unmap_ctrl; + if (pf->eth_tbl) { + min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1); + pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats", + "net.macstats", min_size, + &pf->mac_stats_bar); + if (IS_ERR(pf->mac_stats_mem)) { + if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) { + err = PTR_ERR(pf->mac_stats_mem); + goto err_unmap_ctrl; + } + pf->mac_stats_mem = NULL; } - pf->mac_stats_mem = NULL; } pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg", diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index f7b958181126..cb28ac03e4ca 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -211,8 +211,11 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) break; err = msleep_interruptible(timeout_ms); - if (err != 0) + if (err != 0) { + nfp_info(mutex->cpp, + "interrupted waiting for NFP mutex\n"); return -ERESTARTSYS; + } if (time_is_before_eq_jiffies(warn_at)) { warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 99bb679a9801..2abee0fe3a7c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -281,8 +281,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, if ((*reg & mask) == val) return 0; - if (msleep_interruptible(25)) - return -ERESTARTSYS; + msleep(25); if (time_after(start_time, wait_until)) return -ETIMEDOUT; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 74fc626b1ec1..38502815d681 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -2370,7 +2370,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) u8 flags = 0; if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { - DP_INFO(cdev, "Cannot transmit a checksumed packet\n"); + DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index fb7c2d1562ae..6acfd43c1a4f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -848,7 +848,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn, if (!(qp->resp_offloaded)) { DP_NOTICE(p_hwfn, - "The responder's qp should be offloded before requester's\n"); + "The responder's qp should be offloaded before requester's\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index d33988570217..5f4e447c5dce 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -350,15 +350,16 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev) real_dev = priv->real_dev; - if (!rmnet_is_real_dev_registered(real_dev)) - return -ENODEV; - if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id)) goto nla_put_failure; - port = rmnet_get_port_rtnl(real_dev); + if (rmnet_is_real_dev_registered(real_dev)) { + port = rmnet_get_port_rtnl(real_dev); + f.flags = port->data_format; + } else { + f.flags = 0; + } - f.flags = port->data_format; f.mask = ~0; if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f)) diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index d24b47b8e0b2..d118da5a10a2 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev) struct rtl8139_private *tp = netdev_priv(dev); const int irq = tp->pci_dev->irq; - disable_irq(irq); + disable_irq_nosync(irq); rtl8139_interrupt(irq, dev); enable_irq(irq); } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 50daad0a1482..d90a7b1f4088 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3999,29 +3999,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx) atomic_set(&efx->active_queues, 0); } -static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, - const struct efx_filter_spec *right) -{ - if ((left->match_flags ^ right->match_flags) | - ((left->flags ^ right->flags) & - (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) - return false; - - return memcmp(&left->outer_vid, &right->outer_vid, - sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) == 0; -} - -static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) -{ - BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); - return jhash2((const u32 *)&spec->outer_vid, - (sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) / 4, - 0); - /* XXX should we randomise the initval? */ -} - /* Decide whether a filter should be exclusive or else should allow * delivery to additional recipients. Currently we decide that * filters for specific local unicast MAC and IP addresses are @@ -4346,7 +4323,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, goto out_unlock; match_pri = rc; - hash = efx_ef10_filter_hash(spec); + hash = efx_filter_spec_hash(spec); is_mc_recip = efx_filter_is_mc_recipient(spec); if (is_mc_recip) bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); @@ -4378,7 +4355,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, if (!saved_spec) { if (ins_index < 0) ins_index = i; - } else if (efx_ef10_filter_equal(spec, saved_spec)) { + } else if (efx_filter_spec_equal(spec, saved_spec)) { if (spec->priority < saved_spec->priority && spec->priority != EFX_FILTER_PRI_AUTO) { rc = -EPERM; @@ -4762,28 +4739,63 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, unsigned int filter_idx) { + struct efx_filter_spec *spec, saved_spec; struct efx_ef10_filter_table *table; - struct efx_filter_spec *spec; - bool ret; + struct efx_arfs_rule *rule = NULL; + bool ret = true, force = false; + u16 arfs_id; down_read(&efx->filter_sem); table = efx->filter_state; down_write(&table->lock); spec = efx_ef10_filter_entry_spec(table, filter_idx); - if (!spec || spec->priority != EFX_FILTER_PRI_HINT) { - ret = true; + if (!spec || spec->priority != EFX_FILTER_PRI_HINT) goto out_unlock; - } - if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, - flow_id, filter_idx)) { + spin_lock_bh(&efx->rps_hash_lock); + if (!efx->rps_hash_table) { + /* In the absence of the table, we always return 0 to ARFS. */ + arfs_id = 0; + } else { + rule = efx_rps_hash_find(efx, spec); + if (!rule) + /* ARFS table doesn't know of this filter, so remove it */ + goto expire; + arfs_id = rule->arfs_id; + ret = efx_rps_check_rule(rule, filter_idx, &force); + if (force) + goto expire; + if (!ret) { + spin_unlock_bh(&efx->rps_hash_lock); + goto out_unlock; + } + } + if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id)) ret = false; - goto out_unlock; + else if (rule) + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; +expire: + saved_spec = *spec; /* remove operation will kfree spec */ + spin_unlock_bh(&efx->rps_hash_lock); + /* At this point (since we dropped the lock), another thread might queue + * up a fresh insertion request (but the actual insertion will be held + * up by our possession of the filter table lock). In that case, it + * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that + * the rule is not removed by efx_rps_hash_del() below. + */ + if (ret) + ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, + filter_idx, true) == 0; + /* While we can't safely dereference rule (we dropped the lock), we can + * still test it for NULL. + */ + if (ret && rule) { + /* Expiring, so remove entry from ARFS table */ + spin_lock_bh(&efx->rps_hash_lock); + efx_rps_hash_del(efx, &saved_spec); + spin_unlock_bh(&efx->rps_hash_lock); } - - ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, - filter_idx, true) == 0; out_unlock: up_write(&table->lock); up_read(&efx->filter_sem); @@ -5265,7 +5277,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, ids = vlan->uc; } - filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; + filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; /* Insert/renew filters */ for (i = 0; i < addr_count; i++) { @@ -5334,7 +5346,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, int rc; u16 *id; - filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; + filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 692dd729ee2a..a4ebd8715494 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -3027,6 +3027,10 @@ static int efx_init_struct(struct efx_nic *efx, mutex_init(&efx->mac_lock); #ifdef CONFIG_RFS_ACCEL mutex_init(&efx->rps_mutex); + spin_lock_init(&efx->rps_hash_lock); + /* Failure to allocate is not fatal, but may degrade ARFS performance */ + efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, + sizeof(*efx->rps_hash_table), GFP_KERNEL); #endif efx->phy_op = &efx_dummy_phy_operations; efx->mdio.dev = net_dev; @@ -3070,6 +3074,10 @@ static void efx_fini_struct(struct efx_nic *efx) { int i; +#ifdef CONFIG_RFS_ACCEL + kfree(efx->rps_hash_table); +#endif + for (i = 0; i < EFX_MAX_CHANNELS; i++) kfree(efx->channel[i]); @@ -3092,6 +3100,141 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); } +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right) +{ + if ((left->match_flags ^ right->match_flags) | + ((left->flags ^ right->flags) & + (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) + return false; + + return memcmp(&left->outer_vid, &right->outer_vid, + sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) == 0; +} + +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) +{ + BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); + return jhash2((const u32 *)&spec->outer_vid, + (sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) / 4, + 0); +} + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force) +{ + if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { + /* ARFS is currently updating this entry, leave it */ + return false; + } + if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { + /* ARFS tried and failed to update this, so it's probably out + * of date. Remove the filter and the ARFS rule entry. + */ + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + *force = true; + return true; + } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ + /* ARFS has moved on, so old filter is not needed. Since we did + * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will + * not be removed by efx_rps_hash_del() subsequently. + */ + *force = true; + return true; + } + /* Remove it iff ARFS wants to. */ + return true; +} + +struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + u32 hash = efx_filter_spec_hash(spec); + + WARN_ON(!spin_is_locked(&efx->rps_hash_lock)); + if (!efx->rps_hash_table) + return NULL; + return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; +} + +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) + return rule; + } + return NULL; +} + +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + *new = false; + return rule; + } + } + rule = kmalloc(sizeof(*rule), GFP_ATOMIC); + *new = true; + if (rule) { + memcpy(&rule->spec, spec, sizeof(rule->spec)); + hlist_add_head(&rule->node, head); + } + return rule; +} + +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (WARN_ON(!head)) + return; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + /* Someone already reused the entry. We know that if + * this check doesn't fire (i.e. filter_id == REMOVING) + * then the REMOVING mark was put there by our caller, + * because caller is holding a lock on filter table and + * only holders of that lock set REMOVING. + */ + if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) + return; + hlist_del(node); + kfree(rule); + return; + } + } + /* We didn't find it. */ + WARN_ON(1); +} +#endif + /* RSS contexts. We're using linked lists and crappy O(n) algorithms, because * (a) this is an infrequent control-plane operation and (b) n is small (max 64) */ diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index a3140e16fcef..3f759ebdcf10 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -186,6 +186,27 @@ static inline void efx_filter_rfs_expire(struct work_struct *data) {} #endif bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right); +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec); + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force); + +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec); + +/* @new is written to indicate if entry was newly added (true) or if an old + * entry was found and returned (false). + */ +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new); + +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec); +#endif + /* RSS contexts */ struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 4a19c7efdf8d..c72adf8b52ea 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2905,18 +2905,45 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, { struct efx_farch_filter_state *state = efx->filter_state; struct efx_farch_filter_table *table; - bool ret = false; + bool ret = false, force = false; + u16 arfs_id; down_write(&state->lock); + spin_lock_bh(&efx->rps_hash_lock); table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; if (test_bit(index, table->used_bitmap) && - table->spec[index].priority == EFX_FILTER_PRI_HINT && - rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, - flow_id, index)) { - efx_farch_filter_table_clear_entry(efx, table, index); - ret = true; + table->spec[index].priority == EFX_FILTER_PRI_HINT) { + struct efx_arfs_rule *rule = NULL; + struct efx_filter_spec spec; + + efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); + if (!efx->rps_hash_table) { + /* In the absence of the table, we always returned 0 to + * ARFS, so use the same to query it. + */ + arfs_id = 0; + } else { + rule = efx_rps_hash_find(efx, &spec); + if (!rule) { + /* ARFS table doesn't know of this filter, remove it */ + force = true; + } else { + arfs_id = rule->arfs_id; + if (!efx_rps_check_rule(rule, index, &force)) + goto out_unlock; + } + } + if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id, + flow_id, arfs_id)) { + if (rule) + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + efx_rps_hash_del(efx, &spec); + efx_farch_filter_table_clear_entry(efx, table, index); + ret = true; + } } - +out_unlock: + spin_unlock_bh(&efx->rps_hash_lock); up_write(&state->lock); return ret; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 5e379a83c729..65568925c3ef 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -733,6 +733,56 @@ struct efx_rss_context { u32 rx_indir_table[128]; }; +#ifdef CONFIG_RFS_ACCEL +/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING + * is used to test if filter does or will exist. + */ +#define EFX_ARFS_FILTER_ID_PENDING -1 +#define EFX_ARFS_FILTER_ID_ERROR -2 +#define EFX_ARFS_FILTER_ID_REMOVING -3 +/** + * struct efx_arfs_rule - record of an ARFS filter and its IDs + * @node: linkage into hash table + * @spec: details of the filter (used as key for hash table). Use efx->type to + * determine which member to use. + * @rxq_index: channel to which the filter will steer traffic. + * @arfs_id: filter ID which was returned to ARFS + * @filter_id: index in software filter table. May be + * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet, + * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or + * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter. + */ +struct efx_arfs_rule { + struct hlist_node node; + struct efx_filter_spec spec; + u16 rxq_index; + u16 arfs_id; + s32 filter_id; +}; + +/* Size chosen so that the table is one page (4kB) */ +#define EFX_ARFS_HASH_TABLE_SIZE 512 + +/** + * struct efx_async_filter_insertion - Request to asynchronously insert a filter + * @net_dev: Reference to the netdevice + * @spec: The filter to insert + * @work: Workitem for this request + * @rxq_index: Identifies the channel for which this request was made + * @flow_id: Identifies the kernel-side flow for which this request was made + */ +struct efx_async_filter_insertion { + struct net_device *net_dev; + struct efx_filter_spec spec; + struct work_struct work; + u16 rxq_index; + u32 flow_id; +}; + +/* Maximum number of ARFS workitems that may be in flight on an efx_nic */ +#define EFX_RPS_MAX_IN_FLIGHT 8 +#endif /* CONFIG_RFS_ACCEL */ + /** * struct efx_nic - an Efx NIC * @name: Device name (net device name or bus id before net device registered) @@ -850,6 +900,12 @@ struct efx_rss_context { * @rps_expire_channel: Next channel to check for expiry * @rps_expire_index: Next index to check for expiry in * @rps_expire_channel's @rps_flow_id + * @rps_slot_map: bitmap of in-flight entries in @rps_slot + * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() + * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and + * @rps_next_id). + * @rps_hash_table: Mapping between ARFS filters and their various IDs + * @rps_next_id: next arfs_id for an ARFS filter * @active_queues: Count of RX and TX queues that haven't been flushed and drained. * @rxq_flush_pending: Count of number of receive queues that need to be flushed. * Decremented when the efx_flush_rx_queue() is called. @@ -1004,6 +1060,11 @@ struct efx_nic { struct mutex rps_mutex; unsigned int rps_expire_channel; unsigned int rps_expire_index; + unsigned long rps_slot_map; + struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; + spinlock_t rps_hash_lock; + struct hlist_head *rps_hash_table; + u32 rps_next_id; #endif atomic_t active_queues; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 95682831484e..d2e254f2f72b 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -827,31 +827,38 @@ MODULE_PARM_DESC(rx_refill_threshold, #ifdef CONFIG_RFS_ACCEL -/** - * struct efx_async_filter_insertion - Request to asynchronously insert a filter - * @net_dev: Reference to the netdevice - * @spec: The filter to insert - * @work: Workitem for this request - * @rxq_index: Identifies the channel for which this request was made - * @flow_id: Identifies the kernel-side flow for which this request was made - */ -struct efx_async_filter_insertion { - struct net_device *net_dev; - struct efx_filter_spec spec; - struct work_struct work; - u16 rxq_index; - u32 flow_id; -}; - static void efx_filter_rfs_work(struct work_struct *data) { struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, work); struct efx_nic *efx = netdev_priv(req->net_dev); struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); + int slot_idx = req - efx->rps_slot; + struct efx_arfs_rule *rule; + u16 arfs_id = 0; int rc; - rc = efx->type->filter_insert(efx, &req->spec, false); + rc = efx->type->filter_insert(efx, &req->spec, true); + if (rc >= 0) + rc %= efx->type->max_rx_ip_filters; + if (efx->rps_hash_table) { + spin_lock_bh(&efx->rps_hash_lock); + rule = efx_rps_hash_find(efx, &req->spec); + /* The rule might have already gone, if someone else's request + * for the same spec was already worked and then expired before + * we got around to our work. In that case we have nothing + * tying us to an arfs_id, meaning that as soon as the filter + * is considered for expiry it will be removed. + */ + if (rule) { + if (rc < 0) + rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; + else + rule->filter_id = rc; + arfs_id = rule->arfs_id; + } + spin_unlock_bh(&efx->rps_hash_lock); + } if (rc >= 0) { /* Remember this so we can check whether to expire the filter * later. @@ -863,23 +870,23 @@ static void efx_filter_rfs_work(struct work_struct *data) if (req->spec.ether_type == htons(ETH_P_IP)) netif_info(efx, rx_status, efx->net_dev, - "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", req->spec.rem_host, ntohs(req->spec.rem_port), req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc); + req->rxq_index, req->flow_id, rc, arfs_id); else netif_info(efx, rx_status, efx->net_dev, - "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", + "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", req->spec.rem_host, ntohs(req->spec.rem_port), req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc); + req->rxq_index, req->flow_id, rc, arfs_id); } /* Release references */ + clear_bit(slot_idx, &efx->rps_slot_map); dev_put(req->net_dev); - kfree(req); } int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, @@ -887,23 +894,39 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, { struct efx_nic *efx = netdev_priv(net_dev); struct efx_async_filter_insertion *req; + struct efx_arfs_rule *rule; struct flow_keys fk; + int slot_idx; + bool new; + int rc; - if (flow_id == RPS_FLOW_ID_INVALID) - return -EINVAL; + /* find a free slot */ + for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) + if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) + break; + if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) + return -EBUSY; - if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) - return -EPROTONOSUPPORT; + if (flow_id == RPS_FLOW_ID_INVALID) { + rc = -EINVAL; + goto out_clear; + } - if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) - return -EPROTONOSUPPORT; - if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) - return -EPROTONOSUPPORT; + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } - req = kmalloc(sizeof(*req), GFP_ATOMIC); - if (!req) - return -ENOMEM; + if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + req = efx->rps_slot + slot_idx; efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, rxq_index); @@ -927,12 +950,45 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, req->spec.rem_port = fk.ports.src; req->spec.loc_port = fk.ports.dst; + if (efx->rps_hash_table) { + /* Add it to ARFS hash table */ + spin_lock(&efx->rps_hash_lock); + rule = efx_rps_hash_add(efx, &req->spec, &new); + if (!rule) { + rc = -ENOMEM; + goto out_unlock; + } + if (new) + rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; + rc = rule->arfs_id; + /* Skip if existing or pending filter already does the right thing */ + if (!new && rule->rxq_index == rxq_index && + rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) + goto out_unlock; + rule->rxq_index = rxq_index; + rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; + spin_unlock(&efx->rps_hash_lock); + } else { + /* Without an ARFS hash table, we just use arfs_id 0 for all + * filters. This means if multiple flows hash to the same + * flow_id, all but the most recently touched will be eligible + * for expiry. + */ + rc = 0; + } + + /* Queue the request */ dev_hold(req->net_dev = net_dev); INIT_WORK(&req->work, efx_filter_rfs_work); req->rxq_index = rxq_index; req->flow_id = flow_id; schedule_work(&req->work); - return 0; + return rc; +out_unlock: + spin_unlock(&efx->rps_hash_lock); +out_clear: + clear_bit(slot_idx, &efx->rps_slot_map); + return rc; } bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index c7bff596c665..dedd40613090 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -347,7 +347,7 @@ enum power_event { #define MTL_RX_OVERFLOW_INT BIT(16) /* Default operating mode of the MAC */ -#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \ +#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \ GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) /* To dump the core regs excluding the Address Registers */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index a3af92ebbca8..517b1f6736a8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -31,13 +31,6 @@ static void dwmac4_core_init(struct mac_device_info *hw, value |= GMAC_CORE_INIT; - /* Clear ACS bit because Ethernet switch tagging formats such as - * Broadcom tags can look like invalid LLC/SNAP packets and cause the - * hardware to truncate packets on reception. - */ - if (netdev_uses_dsa(dev)) - value &= ~GMAC_CONFIG_ACS; - if (mtu > 1500) value |= GMAC_CONFIG_2K; if (mtu > 2000) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9a16931ce39d..b65e2d144698 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3495,8 +3495,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 * Type frames (LLC/LLC-SNAP) + * + * llc_snap is never checked in GMAC >= 4, so this ACS + * feature is always disabled and packets need to be + * stripped manually. */ - if (unlikely(status != llc_snap)) + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || + unlikely(status != llc_snap)) frame_len -= ETH_FCS_LEN; if (netif_msg_rx_status(priv)) { diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 30371274409d..28d893b93d30 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -129,7 +129,7 @@ do { \ #define RX_PRIORITY_MAPPING 0x76543210 #define TX_PRIORITY_MAPPING 0x33221100 -#define CPDMA_TX_PRIORITY_MAP 0x01234567 +#define CPDMA_TX_PRIORITY_MAP 0x76543210 #define CPSW_VLAN_AWARE BIT(1) #define CPSW_RX_VLAN_ENCAP BIT(2) @@ -1340,6 +1340,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries( cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan); + cpsw_ale_control_set(cpsw->ale, slave_port, + ALE_PORT_DROP_UNKNOWN_VLAN, 1); } static void soft_reset_slave(struct cpsw_slave *slave) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 9cbb0c8a896a..7de88b33d5b9 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, err = netdev_upper_dev_link(real_dev, dev, extack); if (err < 0) - goto put_dev; + goto unregister; /* need to be already registered so that ->init has run and * the MAC addr is set @@ -3316,8 +3316,7 @@ del_dev: macsec_del_dev(macsec); unlink: netdev_upper_dev_unlink(real_dev, dev); -put_dev: - dev_put(real_dev); +unregister: unregister_netdevice(dev); return err; } diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index c22e8e383247..25e2a099b71c 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1393,6 +1393,15 @@ static int m88e1318_set_wol(struct phy_device *phydev, if (err < 0) goto error; + /* If WOL event happened once, the LED[2] interrupt pin + * will not be cleared unless we reading the interrupt status + * register. If interrupts are in use, the normal interrupt + * handling will clear the WOL event. Clear the WOL event + * before enabling it if !phy_interrupt_is_valid() + */ + if (!phy_interrupt_is_valid(phydev)) + phy_read(phydev, MII_M1011_IEVENT); + /* Enable the WOL interrupt */ err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, MII_88E1318S_PHY_CSIER_WOL_EIE); diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 0f293ef28935..a97ac8c12c4c 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -20,6 +20,7 @@ #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/microchipphy.h> +#include <linux/delay.h> #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" #define DRIVER_DESC "Microchip LAN88XX PHY driver" @@ -30,6 +31,16 @@ struct lan88xx_priv { __u32 wolopts; }; +static int lan88xx_read_page(struct phy_device *phydev) +{ + return __phy_read(phydev, LAN88XX_EXT_PAGE_ACCESS); +} + +static int lan88xx_write_page(struct phy_device *phydev, int page) +{ + return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page); +} + static int lan88xx_phy_config_intr(struct phy_device *phydev) { int rc; @@ -66,6 +77,150 @@ static int lan88xx_suspend(struct phy_device *phydev) return 0; } +static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr, + u32 data) +{ + int val, save_page, ret = 0; + u16 buf; + + /* Save current page */ + save_page = phy_save_page(phydev); + if (save_page < 0) { + pr_warn("Failed to get current page\n"); + goto err; + } + + /* Switch to TR page */ + lan88xx_write_page(phydev, LAN88XX_EXT_PAGE_ACCESS_TR); + + ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA, + (data & 0xFFFF)); + if (ret < 0) { + pr_warn("Failed to write TR low data\n"); + goto err; + } + + ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA, + (data & 0x00FF0000) >> 16); + if (ret < 0) { + pr_warn("Failed to write TR high data\n"); + goto err; + } + + /* Config control bits [15:13] of register */ + buf = (regaddr & ~(0x3 << 13));/* Clr [14:13] to write data in reg */ + buf |= 0x8000; /* Set [15] to Packet transmit */ + + ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf); + if (ret < 0) { + pr_warn("Failed to write data in reg\n"); + goto err; + } + + usleep_range(1000, 2000);/* Wait for Data to be written */ + val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR); + if (!(val & 0x8000)) + pr_warn("TR Register[0x%X] configuration failed\n", regaddr); +err: + return phy_restore_page(phydev, save_page, ret); +} + +static void lan88xx_config_TR_regs(struct phy_device *phydev) +{ + int err; + + /* Get access to Channel 0x1, Node 0xF , Register 0x01. + * Write 24-bit value 0x12B00A to register. Setting MrvlTrFix1000Kf, + * MrvlTrFix1000Kp, MasterEnableTR bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A); + if (err < 0) + pr_warn("Failed to Set Register[0x0F82]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x06. + * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv, + * SSTrKp1000Mas bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F); + if (err < 0) + pr_warn("Failed to Set Register[0x168C]\n"); + + /* Get access to Channel b'10, Node b'1111, Register 0x11. + * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh + * bits + */ + err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620); + if (err < 0) + pr_warn("Failed to Set Register[0x17A2]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x10. + * Write 24-bit value 0xEEFFDD to register. Setting + * eee_TrKp1Long_1000, eee_TrKp2Long_1000, eee_TrKp3Long_1000, + * eee_TrKp1Short_1000,eee_TrKp2Short_1000, eee_TrKp3Short_1000 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD); + if (err < 0) + pr_warn("Failed to Set Register[0x16A0]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x13. + * Write 24-bit value 0x071448 to register. Setting + * slv_lpi_tr_tmr_val1, slv_lpi_tr_tmr_val2 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448); + if (err < 0) + pr_warn("Failed to Set Register[0x16A6]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x12. + * Write 24-bit value 0x13132F to register. Setting + * slv_sigdet_timer_val1, slv_sigdet_timer_val2 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F); + if (err < 0) + pr_warn("Failed to Set Register[0x16A4]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x14. + * Write 24-bit value 0x0 to register. Setting eee_3level_delay, + * eee_TrKf_freeze_delay bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0); + if (err < 0) + pr_warn("Failed to Set Register[0x16A8]\n"); + + /* Get access to Channel b'01, Node b'1111, Register 0x34. + * Write 24-bit value 0x91B06C to register. Setting + * FastMseSearchThreshLong1000, FastMseSearchThreshShort1000, + * FastMseSearchUpdGain1000 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C); + if (err < 0) + pr_warn("Failed to Set Register[0x0FE8]\n"); + + /* Get access to Channel b'01, Node b'1111, Register 0x3E. + * Write 24-bit value 0xC0A028 to register. Setting + * FastMseKp2ThreshLong1000, FastMseKp2ThreshShort1000, + * FastMseKp2UpdGain1000, FastMseKp2ExitEn1000 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028); + if (err < 0) + pr_warn("Failed to Set Register[0x0FFC]\n"); + + /* Get access to Channel b'01, Node b'1111, Register 0x35. + * Write 24-bit value 0x041600 to register. Setting + * FastMseSearchPhShNum1000, FastMseSearchClksPerPh1000, + * FastMsePhChangeDelay1000 bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600); + if (err < 0) + pr_warn("Failed to Set Register[0x0FEA]\n"); + + /* Get access to Channel b'10, Node b'1101, Register 0x03. + * Write 24-bit value 0x000004 to register. Setting TrFreeze bits. + */ + err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004); + if (err < 0) + pr_warn("Failed to Set Register[0x1686]\n"); +} + static int lan88xx_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; @@ -132,6 +287,25 @@ static void lan88xx_set_mdix(struct phy_device *phydev) phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0); } +static int lan88xx_config_init(struct phy_device *phydev) +{ + int val; + + genphy_config_init(phydev); + /*Zerodetect delay enable */ + val = phy_read_mmd(phydev, MDIO_MMD_PCS, + PHY_ARDENNES_MMD_DEV_3_PHY_CFG); + val |= PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_; + + phy_write_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG, + val); + + /* Config DSP registers */ + lan88xx_config_TR_regs(phydev); + + return 0; +} + static int lan88xx_config_aneg(struct phy_device *phydev) { lan88xx_set_mdix(phydev); @@ -151,7 +325,7 @@ static struct phy_driver microchip_phy_driver[] = { .probe = lan88xx_probe, .remove = lan88xx_remove, - .config_init = genphy_config_init, + .config_init = lan88xx_config_init, .config_aneg = lan88xx_config_aneg, .ack_interrupt = lan88xx_phy_ack_interrupt, @@ -160,6 +334,8 @@ static struct phy_driver microchip_phy_driver[] = { .suspend = lan88xx_suspend, .resume = genphy_resume, .set_wol = lan88xx_set_wol, + .read_page = lan88xx_read_page, + .write_page = lan88xx_write_page, } }; module_phy_driver(microchip_phy_driver); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ac23322a32e1..9e4ba8e80a18 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -535,8 +535,17 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, /* Grab the bits from PHYIR1, and put them in the upper half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID1); - if (phy_reg < 0) + if (phy_reg < 0) { + /* if there is no device, return without an error so scanning + * the bus works properly + */ + if (phy_reg == -EIO || phy_reg == -ENODEV) { + *phy_id = 0xffffffff; + return 0; + } + return -EIO; + } *phy_id = (phy_reg & 0xffff) << 16; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 1483bc7b01e1..7df07337d69c 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); error = -EINVAL; + + if (sockaddr_len != sizeof(struct sockaddr_pppox)) + goto end; + if (sp->sa_protocol != PX_PROTO_OE) goto end; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a6c6ce19eeee..ddb6bf85a59c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team, } } +static bool __team_option_inst_tmp_find(const struct list_head *opts, + const struct team_option_inst *needle) +{ + struct team_option_inst *opt_inst; + + list_for_each_entry(opt_inst, opts, tmp_list) + if (opt_inst == needle) + return true; + return false; +} + static int __team_options_register(struct team *team, const struct team_option *option, size_t option_count) @@ -1061,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port) } #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int __team_port_enable_netpoll(struct team_port *port) { struct netpoll *np; int err; - if (!team->dev->npinfo) - return 0; - np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) return -ENOMEM; @@ -1082,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port) return err; } +static int team_port_enable_netpoll(struct team_port *port) +{ + if (!port->team->dev->npinfo) + return 0; + + return __team_port_enable_netpoll(port); +} + static void team_port_disable_netpoll(struct team_port *port) { struct netpoll *np = port->np; @@ -1096,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port) kfree(np); } #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team_port *port) { return 0; } @@ -1210,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, goto err_vids_add; } - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(port); if (err) { netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); @@ -1907,7 +1923,7 @@ static int team_netpoll_setup(struct net_device *dev, mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { - err = team_port_enable_netpoll(team, port); + err = __team_port_enable_netpoll(port); if (err) { __team_netpoll_cleanup(team); break; @@ -2568,6 +2584,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) if (err) goto team_put; opt_inst->changed = true; + + /* dumb/evil user-space can send us duplicate opt, + * keep only the last one + */ + if (__team_option_inst_tmp_find(&opt_inst_list, + opt_inst)) + continue; + list_add(&opt_inst->tmp_list, &opt_inst_list); } if (!opt_found) { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 28583aa0c17d..ef33950a45d9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1102,12 +1102,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; len = run_ebpf_filter(tun, skb, len); - - /* Trim extra bytes since we may insert vlan proto & TCI - * in tun_put_user(). - */ - len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0; - if (len <= 0 || pskb_trim(skb, len)) + if (len == 0 || pskb_trim(skb, len)) goto drop; if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index ca066b785e9f..42565dd33aa6 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1098,6 +1098,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x05c6, 0x9080, 8)}, {QMI_FIXED_INTF(0x05c6, 0x9083, 3)}, {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, + {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */ {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ @@ -1107,6 +1108,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ + {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ @@ -1342,6 +1344,18 @@ static int qmi_wwan_probe(struct usb_interface *intf, id->driver_info = (unsigned long)&qmi_wwan_info; } + /* There are devices where the same interface number can be + * configured as different functions. We should only bind to + * vendor specific functions when matching on interface number + */ + if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER && + desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) { + dev_dbg(&intf->dev, + "Rejecting interface number match for class %02x\n", + desc->bInterfaceClass); + return -ENODEV; + } + /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */ if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) { dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n"); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7b187ec7411e..770422e953f7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -147,6 +147,17 @@ struct receive_queue { struct xdp_rxq_info xdp_rxq; }; +/* Control VQ buffers: protected by the rtnl lock */ +struct control_buf { + struct virtio_net_ctrl_hdr hdr; + virtio_net_ctrl_ack status; + struct virtio_net_ctrl_mq mq; + u8 promisc; + u8 allmulti; + __virtio16 vid; + __virtio64 offloads; +}; + struct virtnet_info { struct virtio_device *vdev; struct virtqueue *cvq; @@ -192,14 +203,7 @@ struct virtnet_info { struct hlist_node node; struct hlist_node node_dead; - /* Control VQ buffers: protected by the rtnl lock */ - struct virtio_net_ctrl_hdr ctrl_hdr; - virtio_net_ctrl_ack ctrl_status; - struct virtio_net_ctrl_mq ctrl_mq; - u8 ctrl_promisc; - u8 ctrl_allmulti; - u16 ctrl_vid; - u64 ctrl_offloads; + struct control_buf *ctrl; /* Ethtool settings */ u8 duplex; @@ -1269,7 +1273,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget) { struct receive_queue *rq = container_of(napi, struct receive_queue, napi); - unsigned int received; + struct virtnet_info *vi = rq->vq->vdev->priv; + struct send_queue *sq; + unsigned int received, qp; bool xdp_xmit = false; virtnet_poll_cleantx(rq); @@ -1280,8 +1286,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget) if (received < budget) virtqueue_napi_complete(napi, rq->vq, received); - if (xdp_xmit) + if (xdp_xmit) { + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + + smp_processor_id(); + sq = &vi->sq[qp]; + virtqueue_kick(sq->vq); xdp_do_flush_map(); + } return received; } @@ -1454,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); - vi->ctrl_status = ~0; - vi->ctrl_hdr.class = class; - vi->ctrl_hdr.cmd = cmd; + vi->ctrl->status = ~0; + vi->ctrl->hdr.class = class; + vi->ctrl->hdr.cmd = cmd; /* Add header */ - sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); + sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); sgs[out_num++] = &hdr; if (out) sgs[out_num++] = out; /* Add return status. */ - sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); + sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); sgs[out_num] = &stat; BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); if (unlikely(!virtqueue_kick(vi->cvq))) - return vi->ctrl_status == VIRTIO_NET_OK; + return vi->ctrl->status == VIRTIO_NET_OK; /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. @@ -1481,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, !virtqueue_is_broken(vi->cvq)) cpu_relax(); - return vi->ctrl_status == VIRTIO_NET_OK; + return vi->ctrl->status == VIRTIO_NET_OK; } static int virtnet_set_mac_address(struct net_device *dev, void *p) @@ -1593,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; - vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); - sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); + vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); + sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { @@ -1653,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; - vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); - vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); + vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); + vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); - sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); + sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", - vi->ctrl_promisc ? "en" : "dis"); + vi->ctrl->promisc ? "en" : "dis"); - sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); + sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", - vi->ctrl_allmulti ? "en" : "dis"); + vi->ctrl->allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); @@ -1714,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; - vi->ctrl_vid = vid; - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg)) @@ -1729,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; - vi->ctrl_vid = vid; - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg)) @@ -2126,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) { struct scatterlist sg; - vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); + vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); - sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); + sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { @@ -2351,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi) kfree(vi->rq); kfree(vi->sq); + kfree(vi->ctrl); } static void _free_receive_bufs(struct virtnet_info *vi) @@ -2543,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) { int i; + vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); + if (!vi->ctrl) + goto err_ctrl; vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->sq) goto err_sq; @@ -2571,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) err_rq: kfree(vi->sq); err_sq: + kfree(vi->ctrl); +err_ctrl: return -ENOMEM; } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index e04937f44f33..9ebe2a689966 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1218,6 +1218,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, union { void *ptr; struct ethhdr *eth; + struct vlan_ethhdr *veth; struct iphdr *ipv4; struct ipv6hdr *ipv6; struct tcphdr *tcp; @@ -1228,16 +1229,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) return 0; + if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || + skb->protocol == cpu_to_be16(ETH_P_8021AD)) + hlen = sizeof(struct vlan_ethhdr); + else + hlen = sizeof(struct ethhdr); + hdr.eth = eth_hdr(skb); if (gdesc->rcd.v4) { - BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP)); - hdr.ptr += sizeof(struct ethhdr); + BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) && + hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP)); + hdr.ptr += hlen; BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); hlen = hdr.ipv4->ihl << 2; hdr.ptr += hdr.ipv4->ihl << 2; } else if (gdesc->rcd.v6) { - BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6)); - hdr.ptr += sizeof(struct ethhdr); + BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) && + hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6)); + hdr.ptr += hlen; /* Use an estimated value, since we also need to handle * TSO case. */ diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 59ec34052a65..a3326463b71f 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00 +#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 9277f4c2bfeb..94e177d7c9b5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -459,7 +459,7 @@ static void brcmf_fw_free_request(struct brcmf_fw_request *req) kfree(req); } -static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) +static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) { struct brcmf_fw *fwctx = ctx; struct brcmf_fw_item *cur; @@ -498,13 +498,10 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) brcmf_dbg(TRACE, "nvram %p len %d\n", nvram, nvram_length); cur->nv_data.data = nvram; cur->nv_data.len = nvram_length; - return; + return 0; fail: - brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); - fwctx->done(fwctx->dev, -ENOENT, NULL); - brcmf_fw_free_request(fwctx->req); - kfree(fwctx); + return -ENOENT; } static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async) @@ -553,20 +550,27 @@ static void brcmf_fw_request_done(const struct firmware *fw, void *ctx) brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path, fw ? "" : "not "); - if (fw) { - if (cur->type == BRCMF_FW_TYPE_BINARY) - cur->binary = fw; - else if (cur->type == BRCMF_FW_TYPE_NVRAM) - brcmf_fw_request_nvram_done(fw, fwctx); - else - release_firmware(fw); - } else if (cur->type == BRCMF_FW_TYPE_NVRAM) { - brcmf_fw_request_nvram_done(NULL, fwctx); - } else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL)) { + if (!fw) ret = -ENOENT; + + switch (cur->type) { + case BRCMF_FW_TYPE_NVRAM: + ret = brcmf_fw_request_nvram_done(fw, fwctx); + break; + case BRCMF_FW_TYPE_BINARY: + cur->binary = fw; + break; + default: + /* something fishy here so bail out early */ + brcmf_err("unknown fw type: %d\n", cur->type); + release_firmware(fw); + ret = -EINVAL; goto fail; } + if (ret < 0 && !(cur->flags & BRCMF_FW_REQF_OPTIONAL)) + goto fail; + do { if (++fwctx->curpos == fwctx->req->n_items) { ret = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 7af3a0f51b77..a17c4a79b8d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -749,13 +750,9 @@ struct iwl_scan_req_umac { } __packed; #define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac) -#define IWL_SCAN_REQ_UMAC_SIZE_V7 (sizeof(struct iwl_scan_req_umac) - \ - 4 * sizeof(u8)) -#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \ - 2 * sizeof(u8) - sizeof(__le16)) -#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ - 2 * sizeof(__le32) - 2 * sizeof(u8) - \ - sizeof(__le16)) +#define IWL_SCAN_REQ_UMAC_SIZE_V7 48 +#define IWL_SCAN_REQ_UMAC_SIZE_V6 44 +#define IWL_SCAN_REQ_UMAC_SIZE_V1 36 /** * struct iwl_umac_scan_abort diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 8928613e033e..ca0174680af9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -76,6 +76,7 @@ #include "iwl-io.h" #include "iwl-csr.h" #include "fw/acpi.h" +#include "fw/api/nvm-reg.h" /* NVM offsets (in words) definitions */ enum nvm_offsets { @@ -146,8 +147,8 @@ static const u8 iwl_ext_nvm_channels[] = { 149, 153, 157, 161, 165, 169, 173, 177, 181 }; -#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) -#define IWL_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) +#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) +#define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) #define NUM_2GHZ_CHANNELS 14 #define NUM_2GHZ_CHANNELS_EXT 14 #define FIRST_2GHZ_HT_MINUS 5 @@ -301,11 +302,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, const u8 *nvm_chan; if (cfg->nvm_type != IWL_NVM_EXT) { - num_of_ch = IWL_NUM_CHANNELS; + num_of_ch = IWL_NVM_NUM_CHANNELS; nvm_chan = &iwl_nvm_channels[0]; num_2ghz_channels = NUM_2GHZ_CHANNELS; } else { - num_of_ch = IWL_NUM_CHANNELS_EXT; + num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; nvm_chan = &iwl_ext_nvm_channels[0]; num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT; } @@ -720,12 +721,12 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (cfg->nvm_type != IWL_NVM_EXT) data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * - IWL_NUM_CHANNELS, + IWL_NVM_NUM_CHANNELS, GFP_KERNEL); else data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * - IWL_NUM_CHANNELS_EXT, + IWL_NVM_NUM_CHANNELS_EXT, GFP_KERNEL); if (!data) return NULL; @@ -842,24 +843,34 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan, return flags; } +struct regdb_ptrs { + struct ieee80211_wmm_rule *rule; + u32 token; +}; + struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, - int num_of_ch, __le32 *channels, u16 fw_mcc) + int num_of_ch, __le32 *channels, u16 fw_mcc, + u16 geo_info) { int ch_idx; u16 ch_flags; u32 reg_rule_flags, prev_reg_rule_flags = 0; const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? iwl_ext_nvm_channels : iwl_nvm_channels; - struct ieee80211_regdomain *regd; - int size_of_regd; + struct ieee80211_regdomain *regd, *copy_rd; + int size_of_regd, regd_to_copy, wmms_to_copy; + int size_of_wmms = 0; struct ieee80211_reg_rule *rule; + struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm; + struct regdb_ptrs *regdb_ptrs; enum nl80211_band band; int center_freq, prev_center_freq = 0; - int valid_rules = 0; + int valid_rules = 0, n_wmms = 0; + int i; bool new_rule; int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? - IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS; + IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) return ERR_PTR(-EINVAL); @@ -875,10 +886,26 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, sizeof(struct ieee80211_regdomain) + num_of_ch * sizeof(struct ieee80211_reg_rule); - regd = kzalloc(size_of_regd, GFP_KERNEL); + if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) + size_of_wmms = + num_of_ch * sizeof(struct ieee80211_wmm_rule); + + regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); if (!regd) return ERR_PTR(-ENOMEM); + regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL); + if (!regdb_ptrs) { + copy_rd = ERR_PTR(-ENOMEM); + goto out; + } + + /* set alpha2 from FW. */ + regd->alpha2[0] = fw_mcc >> 8; + regd->alpha2[1] = fw_mcc & 0xff; + + wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); + for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { ch_flags = (u16)__le32_to_cpup(channels + ch_idx); band = (ch_idx < NUM_2GHZ_CHANNELS) ? @@ -927,14 +954,66 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags); + + if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || + band == NL80211_BAND_2GHZ) + continue; + + if (!reg_query_regdb_wmm(regd->alpha2, center_freq, + ®db_ptrs[n_wmms].token, wmm_rule)) { + /* Add only new rules */ + for (i = 0; i < n_wmms; i++) { + if (regdb_ptrs[i].token == + regdb_ptrs[n_wmms].token) { + rule->wmm_rule = regdb_ptrs[i].rule; + break; + } + } + if (i == n_wmms) { + rule->wmm_rule = wmm_rule; + regdb_ptrs[n_wmms++].rule = wmm_rule; + wmm_rule++; + } + } } regd->n_reg_rules = valid_rules; + regd->n_wmm_rules = n_wmms; - /* set alpha2 from FW. */ - regd->alpha2[0] = fw_mcc >> 8; - regd->alpha2[1] = fw_mcc & 0xff; + /* + * Narrow down regdom for unused regulatory rules to prevent hole + * between reg rules to wmm rules. + */ + regd_to_copy = sizeof(struct ieee80211_regdomain) + + valid_rules * sizeof(struct ieee80211_reg_rule); + + wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; + + copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL); + if (!copy_rd) { + copy_rd = ERR_PTR(-ENOMEM); + goto out; + } + + memcpy(copy_rd, regd, regd_to_copy); + memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd, + wmms_to_copy); + + d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy); + s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); + + for (i = 0; i < regd->n_reg_rules; i++) { + if (!regd->reg_rules[i].wmm_rule) + continue; + + copy_rd->reg_rules[i].wmm_rule = d_wmm + + (regd->reg_rules[i].wmm_rule - s_wmm) / + sizeof(struct ieee80211_wmm_rule); + } - return regd; +out: + kfree(regdb_ptrs); + kfree(regd); + return copy_rd; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 306736c7a042..3071a23b7606 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -101,12 +101,14 @@ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, * * This function parses the regulatory channel data received as a * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, - * to be fed into the regulatory core. An ERR_PTR is returned on error. + * to be fed into the regulatory core. In case the geo_info is set handle + * accordingly. An ERR_PTR is returned on error. * If not given to the regulatory core, the user is responsible for freeing * the regdomain returned here with kfree. */ struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, - int num_of_ch, __le32 *channels, u16 fw_mcc); + int num_of_ch, __le32 *channels, u16 fw_mcc, + u16 geo_info); #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 51b30424575b..90f8c89ea59c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -311,7 +311,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, __le32_to_cpu(resp->n_channels), resp->channels, - __le16_to_cpu(resp->mcc)); + __le16_to_cpu(resp->mcc), + __le16_to_cpu(resp->geo_info)); /* Store the return source id */ src_id = resp->source_id; kfree(resp); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 8b6b07a936f5..b026e80940a4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -158,16 +158,6 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist) static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) { - struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; - - /* override ant_num / ant_path */ - if (mod_params->ant_sel) { - rtlpriv->btcoexist.btc_info.ant_num = - (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); - - rtlpriv->btcoexist.btc_info.single_ant_path = - (mod_params->ant_sel == 1 ? 0 : 1); - } return rtlpriv->btcoexist.btc_info.single_ant_path; } @@ -178,7 +168,6 @@ static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) { - struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; u8 num; if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) @@ -186,10 +175,6 @@ static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) else num = 1; - /* override ant_num / ant_path */ - if (mod_params->ant_sel) - num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1; - return num; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index e7bbbc95cdb1..b4f3f91b590e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -848,6 +848,9 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) return false; } + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv); + bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); @@ -2696,21 +2699,21 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); rtlpriv->btcoexist.btc_info.single_ant_path = - (value & 0x40); /*0xc3[6]*/ + (value & 0x40 ? ANT_AUX : ANT_MAIN); /*0xc3[6]*/ } else { rtlpriv->btcoexist.btc_info.btcoexist = 0; rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; - rtlpriv->btcoexist.btc_info.single_ant_path = 0; + rtlpriv->btcoexist.btc_info.single_ant_path = ANT_MAIN; } /* override ant_num / ant_path */ if (mod_params->ant_sel) { rtlpriv->btcoexist.btc_info.ant_num = - (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); + (mod_params->ant_sel == 1 ? ANT_X1 : ANT_X2); rtlpriv->btcoexist.btc_info.single_ant_path = - (mod_params->ant_sel == 1 ? 0 : 1); + (mod_params->ant_sel == 1 ? ANT_AUX : ANT_MAIN); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index d27e33960e77..ce1754054a07 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2823,6 +2823,11 @@ enum bt_ant_num { ANT_X1 = 1, }; +enum bt_ant_path { + ANT_MAIN = 0, + ANT_AUX = 1, +}; + enum bt_co_type { BT_2WIRE = 0, BT_ISSC_3WIRE = 1, diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index 85997184e047..9d36473dc2a2 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig @@ -103,8 +103,7 @@ config NVDIMM_DAX Select Y if unsure config OF_PMEM - # FIXME: make tristate once OF_NUMA dependency removed - bool "Device-tree support for persistent memory regions" + tristate "Device-tree support for persistent memory regions" depends on OF default LIBNVDIMM help diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index e00d45522b80..8d348b22ba45 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) { struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); + int rc = validate_dimm(ndd), cmd_rc = 0; struct nd_cmd_get_config_data_hdr *cmd; struct nvdimm_bus_descriptor *nd_desc; - int rc = validate_dimm(ndd); u32 max_cmd_size, config_size; size_t offset; @@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) cmd->in_offset = offset; rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), ND_CMD_GET_CONFIG_DATA, cmd, - cmd->in_length + sizeof(*cmd), NULL); - if (rc || cmd->status) { - rc = -ENXIO; + cmd->in_length + sizeof(*cmd), &cmd_rc); + if (rc < 0) + break; + if (cmd_rc < 0) { + rc = cmd_rc; break; } memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); @@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, void *buf, size_t len) { - int rc = validate_dimm(ndd); size_t max_cmd_size, buf_offset; struct nd_cmd_set_config_hdr *cmd; + int rc = validate_dimm(ndd), cmd_rc = 0; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; @@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, for (buf_offset = 0; len; len -= cmd->in_length, buf_offset += cmd->in_length) { size_t cmd_size; - u32 *status; cmd->in_offset = offset + buf_offset; cmd->in_length = min(max_cmd_size, len); @@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, /* status is output in the last 4-bytes of the command buffer */ cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); - status = ((void *) cmd) + cmd_size - sizeof(u32); rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), - ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL); - if (rc || *status) { - rc = rc ? rc : -ENXIO; + ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); + if (rc < 0) + break; + if (cmd_rc < 0) { + rc = cmd_rc; break; } } diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 85013bad35de..0a701837dfc0 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -67,7 +67,7 @@ static int of_pmem_region_probe(struct platform_device *pdev) */ memset(&ndr_desc, 0, sizeof(ndr_desc)); ndr_desc.attr_groups = region_attr_groups; - ndr_desc.numa_node = of_node_to_nid(np); + ndr_desc.numa_node = dev_to_node(&pdev->dev); ndr_desc.res = &pdev->resource[i]; ndr_desc.of_node = np; set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index b979cf3bce65..88a8b5916624 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -27,7 +27,7 @@ config NVME_FABRICS config NVME_RDMA tristate "NVM Express over Fabrics RDMA host driver" - depends on INFINIBAND && BLOCK + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK select NVME_CORE select NVME_FABRICS select SG_POOL diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9df4f71e58ca..a3771c5729f5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -764,6 +764,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, ret = PTR_ERR(meta); goto out_unmap; } + req->cmd_flags |= REQ_INTEGRITY; } } @@ -2997,31 +2998,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) if (nvme_init_ns_head(ns, nsid, id)) goto out_free_id; nvme_setup_streams_ns(ctrl, ns); - -#ifdef CONFIG_NVME_MULTIPATH - /* - * If multipathing is enabled we need to always use the subsystem - * instance number for numbering our devices to avoid conflicts - * between subsystems that have multiple controllers and thus use - * the multipath-aware subsystem node and those that have a single - * controller and use the controller node directly. - */ - if (ns->head->disk) { - sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, - ctrl->cntlid, ns->head->instance); - flags = GENHD_FL_HIDDEN; - } else { - sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance, - ns->head->instance); - } -#else - /* - * But without the multipath code enabled, multiple controller per - * subsystems are visible as devices and thus we cannot use the - * subsystem instance. - */ - sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); -#endif + nvme_set_disk_name(disk_name, ns, ctrl, &flags); if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { if (nvme_nvm_register(ns, disk_name, node)) { diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 124c458806df..7ae732a77fe8 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -668,6 +668,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->transport); opts->transport = p; break; case NVMF_OPT_NQN: @@ -676,6 +677,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->subsysnqn); opts->subsysnqn = p; nqnlen = strlen(opts->subsysnqn); if (nqnlen >= NVMF_NQN_SIZE) { @@ -698,6 +700,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->traddr); opts->traddr = p; break; case NVMF_OPT_TRSVCID: @@ -706,6 +709,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->trsvcid); opts->trsvcid = p; break; case NVMF_OPT_QUEUE_SIZE: @@ -792,6 +796,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -EINVAL; goto out; } + nvmf_host_put(opts->host); opts->host = nvmf_host_add(p); kfree(p); if (!opts->host) { @@ -817,6 +822,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->host_traddr); opts->host_traddr = p; break; case NVMF_OPT_HOST_ID: diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 956e0b8e9c4d..d7b664ae5923 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -15,10 +15,32 @@ #include "nvme.h" static bool multipath = true; -module_param(multipath, bool, 0644); +module_param(multipath, bool, 0444); MODULE_PARM_DESC(multipath, "turn on native support for multiple controllers per subsystem"); +/* + * If multipathing is enabled we need to always use the subsystem instance + * number for numbering our devices to avoid conflicts between subsystems that + * have multiple controllers and thus use the multipath-aware subsystem node + * and those that have a single controller and use the controller node + * directly. + */ +void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, + struct nvme_ctrl *ctrl, int *flags) +{ + if (!multipath) { + sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); + } else if (ns->head->disk) { + sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, + ctrl->cntlid, ns->head->instance); + *flags = GENHD_FL_HIDDEN; + } else { + sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance, + ns->head->instance); + } +} + void nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 061fecfd44f5..7ded7a51c430 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -436,6 +436,8 @@ extern const struct attribute_group nvme_ns_id_attr_group; extern const struct block_device_operations nvme_ns_head_ops; #ifdef CONFIG_NVME_MULTIPATH +void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, + struct nvme_ctrl *ctrl, int *flags); void nvme_failover_req(struct request *req); bool nvme_req_needs_failover(struct request *req, blk_status_t error); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); @@ -461,6 +463,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) } #else +/* + * Without the multipath code enabled, multiple controller per subsystems are + * visible as devices and thus we cannot use the subsystem instance. + */ +static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, + struct nvme_ctrl *ctrl, int *flags) +{ + sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); +} + static inline void nvme_failover_req(struct request *req) { } diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 5f4f8b16685f..3c7b61ddb0d1 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -27,7 +27,7 @@ config NVME_TARGET_LOOP config NVME_TARGET_RDMA tristate "NVMe over Fabrics RDMA target support" - depends on INFINIBAND + depends on INFINIBAND && INFINIBAND_ADDR_TRANS depends on NVME_TARGET select SGL_ALLOC help diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 31fdfba556a8..27a8561c0cb9 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -469,6 +469,12 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) nvme_stop_ctrl(&ctrl->ctrl); nvme_loop_shutdown_ctrl(ctrl); + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { + /* state change failure should never happen */ + WARN_ON_ONCE(1); + return; + } + ret = nvme_loop_configure_admin_queue(ctrl); if (ret) goto out_disable; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 84aa9d676375..6da20b9688f7 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -942,7 +942,7 @@ int __init early_init_dt_scan_chosen_stdout(void) int offset; const char *p, *q, *options = NULL; int l; - const struct earlycon_id *match; + const struct earlycon_id **p_match; const void *fdt = initial_boot_params; offset = fdt_path_offset(fdt, "/chosen"); @@ -969,7 +969,10 @@ int __init early_init_dt_scan_chosen_stdout(void) return 0; } - for (match = __earlycon_table; match < __earlycon_table_end; match++) { + for (p_match = __earlycon_table; p_match < __earlycon_table_end; + p_match++) { + const struct earlycon_id *match = *p_match; + if (!match->compatible[0]) continue; diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index acba1f56af3e..126cf19e869b 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1263,7 +1263,7 @@ static struct parisc_driver ccio_driver __refdata = { * I/O Page Directory, the resource map, and initalizing the * U2/Uturn chip into virtual mode. */ -static void +static void __init ccio_ioc_init(struct ioc *ioc) { int i; diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c index a6b88c7f6e3e..d2970a009eb5 100644 --- a/drivers/pci/dwc/pcie-kirin.c +++ b/drivers/pci/dwc/pcie-kirin.c @@ -486,7 +486,7 @@ static int kirin_pcie_probe(struct platform_device *pdev) return ret; kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, - "reset-gpio", 0); + "reset-gpios", 0); if (kirin_pcie->gpio_id_reset < 0) return -ENODEV; diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index b04d37b3c5de..9abf549631b4 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c @@ -29,6 +29,7 @@ #define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 #define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) #define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 +#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2 #define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 #define PCIE_CORE_LINK_L0S_ENTRY BIT(0) #define PCIE_CORE_LINK_TRAINING BIT(5) @@ -100,7 +101,8 @@ #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) #define PCIE_ISR1_FLUSH BIT(5) -#define PCIE_ISR1_ALL_MASK GENMASK(5, 4) +#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val)) +#define PCIE_ISR1_ALL_MASK GENMASK(11, 4) #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) @@ -172,8 +174,6 @@ #define PCIE_CONFIG_WR_TYPE0 0xa #define PCIE_CONFIG_WR_TYPE1 0xb -/* PCI_BDF shifts 8bit, so we need extra 4bit shift */ -#define PCIE_BDF(dev) (dev << 4) #define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) #define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) #define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) @@ -296,7 +296,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | - PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT; + (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ << + PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT); advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); /* Program PCIe Control 2 to disable strict ordering */ @@ -437,7 +438,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, u32 reg; int ret; - if (PCI_SLOT(devfn) != 0) { + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) { *val = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } @@ -456,7 +457,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, advk_writel(pcie, reg, PIO_CTRL); /* Program the address registers */ - reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where); + reg = PCIE_CONF_ADDR(bus->number, devfn, where); advk_writel(pcie, reg, PIO_ADDR_LS); advk_writel(pcie, 0, PIO_ADDR_MS); @@ -491,7 +492,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int offset; int ret; - if (PCI_SLOT(devfn) != 0) + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) return PCIBIOS_DEVICE_NOT_FOUND; if (where % size) @@ -609,9 +610,9 @@ static void advk_pcie_irq_mask(struct irq_data *d) irq_hw_number_t hwirq = irqd_to_hwirq(d); u32 mask; - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); - mask |= PCIE_ISR0_INTX_ASSERT(hwirq); - advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + mask |= PCIE_ISR1_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); } static void advk_pcie_irq_unmask(struct irq_data *d) @@ -620,9 +621,9 @@ static void advk_pcie_irq_unmask(struct irq_data *d) irq_hw_number_t hwirq = irqd_to_hwirq(d); u32 mask; - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); - mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq); - advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); } static int advk_pcie_irq_map(struct irq_domain *h, @@ -765,29 +766,35 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie) static void advk_pcie_handle_int(struct advk_pcie *pcie) { - u32 val, mask, status; + u32 isr0_val, isr0_mask, isr0_status; + u32 isr1_val, isr1_mask, isr1_status; int i, virq; - val = advk_readl(pcie, PCIE_ISR0_REG); - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); - status = val & ((~mask) & PCIE_ISR0_ALL_MASK); + isr0_val = advk_readl(pcie, PCIE_ISR0_REG); + isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); + isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK); + + isr1_val = advk_readl(pcie, PCIE_ISR1_REG); + isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); - if (!status) { - advk_writel(pcie, val, PCIE_ISR0_REG); + if (!isr0_status && !isr1_status) { + advk_writel(pcie, isr0_val, PCIE_ISR0_REG); + advk_writel(pcie, isr1_val, PCIE_ISR1_REG); return; } /* Process MSI interrupts */ - if (status & PCIE_ISR0_MSI_INT_PENDING) + if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) advk_pcie_handle_msi(pcie); /* Process legacy interrupts */ for (i = 0; i < PCI_NUM_INTX; i++) { - if (!(status & PCIE_ISR0_INTX_ASSERT(i))) + if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i))) continue; - advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i), - PCIE_ISR0_REG); + advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i), + PCIE_ISR1_REG); virq = irq_find_mapping(pcie->irq_domain, i); generic_handle_irq(virq); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 6ace47099fc5..b9a131137e64 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -958,10 +958,11 @@ static int pci_pm_freeze(struct device *dev) * devices should not be touched during freeze/thaw transitions, * however. */ - if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) + if (!dev_pm_smart_suspend_and_suspended(dev)) { pm_runtime_resume(dev); + pci_dev->state_saved = false; + } - pci_dev->state_saved = false; if (pm->freeze) { int error; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e597655a5643..a04197ce767d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5273,11 +5273,11 @@ void pcie_print_link_status(struct pci_dev *dev) bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width); if (bw_avail >= bw_cap) - pci_info(dev, "%u.%03u Gb/s available bandwidth (%s x%d link)\n", + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", bw_cap / 1000, bw_cap % 1000, PCIE_SPEED2STR(speed_cap), width_cap); else - pci_info(dev, "%u.%03u Gb/s available bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", bw_avail / 1000, bw_avail % 1000, PCIE_SPEED2STR(speed), width, limiting_dev ? pci_name(limiting_dev) : "<unknown>", diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 39d06dd1f63a..bc309c5327ff 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -154,7 +154,7 @@ config DELL_LAPTOP depends on ACPI_VIDEO || ACPI_VIDEO = n depends on RFKILL || RFKILL = n depends on SERIO_I8042 - select DELL_SMBIOS + depends on DELL_SMBIOS select POWER_SUPPLY select LEDS_CLASS select NEW_LEDS diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c index d4aeac3477f5..f086469ea740 100644 --- a/drivers/platform/x86/asus-wireless.c +++ b/drivers/platform/x86/asus-wireless.c @@ -178,8 +178,10 @@ static int asus_wireless_remove(struct acpi_device *adev) { struct asus_wireless_data *data = acpi_driver_data(adev); - if (data->wq) + if (data->wq) { + devm_led_classdev_unregister(&adev->dev, &data->led); destroy_workqueue(data->wq); + } return 0; } diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 9d27016c899e..0434ab7b6497 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req, tx->callback = dma_xfer_callback; tx->callback_param = req; - req->dmach = chan; - req->sync = sync; req->status = DMA_IN_PROGRESS; - init_completion(&req->req_comp); kref_get(&req->refcount); cookie = dmaengine_submit(tx); @@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, if (!req) return -ENOMEM; - kref_init(&req->refcount); - ret = get_dma_channel(priv); if (ret) { kfree(req); return ret; } + chan = priv->dmach; + + kref_init(&req->refcount); + init_completion(&req->req_comp); + req->dir = dir; + req->filp = filp; + req->priv = priv; + req->dmach = chan; + req->sync = sync; /* * If parameter loc_addr != NULL, we are transferring data from/to @@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, xfer->offset, xfer->length); } - req->dir = dir; - req->filp = filp; - req->priv = priv; - chan = priv->dmach; - nents = dma_map_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir); if (nents == 0) { diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c index 8e70a627e0bb..cbbafdcaaecb 100644 --- a/drivers/remoteproc/qcom_q6v5_pil.c +++ b/drivers/remoteproc/qcom_q6v5_pil.c @@ -1083,6 +1083,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) dev_err(qproc->dev, "unable to resolve mba region\n"); return ret; } + of_node_put(node); qproc->mba_phys = r.start; qproc->mba_size = resource_size(&r); @@ -1100,6 +1101,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) dev_err(qproc->dev, "unable to resolve mpss region\n"); return ret; } + of_node_put(node); qproc->mpss_phys = qproc->mpss_reloc = r.start; qproc->mpss_size = resource_size(&r); diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 6d9c5832ce47..a9609d971f7f 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1163,7 +1163,7 @@ int rproc_trigger_recovery(struct rproc *rproc) if (ret) return ret; - ret = rproc_stop(rproc, false); + ret = rproc_stop(rproc, true); if (ret) goto unlock_mutex; @@ -1316,7 +1316,7 @@ void rproc_shutdown(struct rproc *rproc) if (!atomic_dec_and_test(&rproc->power)) goto out; - ret = rproc_stop(rproc, true); + ret = rproc_stop(rproc, false); if (ret) { atomic_inc(&rproc->power); goto out; diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index 64b6de9763ee..1efdf9ff8679 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -581,4 +581,6 @@ static void rpmsg_chrdev_exit(void) unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX); } module_exit(rpmsg_chrdev_exit); + +MODULE_ALIAS("rpmsg:rpmsg_chrdev"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c index 304e891e35fc..60f2250fd96b 100644 --- a/drivers/rtc/rtc-opal.c +++ b/drivers/rtc/rtc-opal.c @@ -57,7 +57,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms) static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) { - long rc = OPAL_BUSY; + s64 rc = OPAL_BUSY; int retries = 10; u32 y_m_d; u64 h_m_s_ms; @@ -66,13 +66,17 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); - if (rc == OPAL_BUSY_EVENT) + if (rc == OPAL_BUSY_EVENT) { + msleep(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); - else if (retries-- && (rc == OPAL_HARDWARE - || rc == OPAL_INTERNAL_ERROR)) - msleep(10); - else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) - break; + } else if (rc == OPAL_BUSY) { + msleep(OPAL_BUSY_DELAY_MS); + } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) { + if (retries--) { + msleep(10); /* Wait 10ms before retry */ + rc = OPAL_BUSY; /* go around again */ + } + } } if (rc != OPAL_SUCCESS) @@ -87,21 +91,26 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) { - long rc = OPAL_BUSY; + s64 rc = OPAL_BUSY; int retries = 10; u32 y_m_d = 0; u64 h_m_s_ms = 0; tm_to_opal(tm, &y_m_d, &h_m_s_ms); + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_rtc_write(y_m_d, h_m_s_ms); - if (rc == OPAL_BUSY_EVENT) + if (rc == OPAL_BUSY_EVENT) { + msleep(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); - else if (retries-- && (rc == OPAL_HARDWARE - || rc == OPAL_INTERNAL_ERROR)) - msleep(10); - else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) - break; + } else if (rc == OPAL_BUSY) { + msleep(OPAL_BUSY_DELAY_MS); + } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) { + if (retries--) { + msleep(10); /* Wait 10ms before retry */ + rc = OPAL_BUSY; /* go around again */ + } + } } return rc == OPAL_SUCCESS ? 0 : -EIO; diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 62f5f04d8f61..5e963fe0e38d 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu, int dasd_alias_add_device(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; - struct alias_lcu *lcu; + __u8 uaddr = private->uid.real_unit_addr; + struct alias_lcu *lcu = private->lcu; unsigned long flags; int rc; - lcu = private->lcu; rc = 0; spin_lock_irqsave(&lcu->lock, flags); + /* + * Check if device and lcu type differ. If so, the uac data may be + * outdated and needs to be updated. + */ + if (private->uid.type != lcu->uac->unit[uaddr].ua_type) { + lcu->flags |= UPDATE_PENDING; + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "uid type mismatch - trigger rescan"); + } if (!(lcu->flags & UPDATE_PENDING)) { rc = _add_device_to_lcu(lcu, device, device); if (rc) diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index f035c2f25d35..131f1989f6f3 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -27,7 +27,6 @@ #include <asm/io.h> #include <asm/irq.h> #include <asm/vtoc.h> -#include <asm/diag.h> #include "dasd_int.h" #include "dasd_diag.h" diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index 5f8d9ea69ebd..eceba3858cef 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c @@ -18,7 +18,7 @@ int sclp_init_state __section(.data) = sclp_init_state_uninitialized; * Used to keep track of the size of the event masks. Qemu until version 2.11 * only supports 4 and needs a workaround. */ -bool sclp_mask_compat_mode; +bool sclp_mask_compat_mode __section(.data); void sclp_early_wait_irq(void) { diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 6652a49a49b1..9029804dcd22 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) { + struct channel_path *chp; struct chp_link link; struct chp_id chpid; int status; @@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) chpid.id = sei_area->rsid; /* allocate a new channel path structure, if needed */ status = chp_get_status(chpid); - if (status < 0) - chp_new(chpid); - else if (!status) + if (!status) return; + + if (status < 0) { + chp_new(chpid); + } else { + chp = chpid_to_chp(chpid); + mutex_lock(&chp->lock); + chp_update_desc(chp); + mutex_unlock(&chp->lock); + } memset(&link, 0, sizeof(struct chp_link)); link.chpid = chpid; if ((sei_area->vf & 0xc0) != 0) { diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index ff6963ad6e39..3c800642134e 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private) int ccode; __u8 lpm; unsigned long flags; + int ret; sch = private->sch; spin_lock_irqsave(sch->lock, flags); private->state = VFIO_CCW_STATE_BUSY; - spin_unlock_irqrestore(sch->lock, flags); orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm); @@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private) * Initialize device status information */ sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; - return 0; + ret = 0; + break; case 1: /* Status pending */ case 2: /* Busy */ - return -EBUSY; + ret = -EBUSY; + break; case 3: /* Device/path not operational */ { lpm = orb->cmd.lpm; @@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private) sch->lpm = 0; if (cio_update_schib(sch)) - return -ENODEV; - - return sch->lpm ? -EACCES : -ENODEV; + ret = -ENODEV; + else + ret = sch->lpm ? -EACCES : -ENODEV; + break; } default: - return ccode; + ret = ccode; } + spin_unlock_irqrestore(sch->lock, flags); + return ret; } static void fsm_notoper(struct vfio_ccw_private *private, diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 4326715dc13e..78b98b3e7efa 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -557,7 +557,6 @@ enum qeth_prot_versions { enum qeth_cmd_buffer_state { BUF_STATE_FREE, BUF_STATE_LOCKED, - BUF_STATE_PROCESSED, }; enum qeth_cq { @@ -601,7 +600,6 @@ struct qeth_channel { struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; atomic_t irq_pending; int io_buf_no; - int buf_no; }; /** diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 04fefa5bb08d..dffd820731f2 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) qeth_put_reply(reply); } spin_unlock_irqrestore(&card->lock, flags); - atomic_set(&card->write.irq_pending, 0); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); @@ -818,7 +817,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel) for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) qeth_release_buffer(channel, &channel->iob[cnt]); - channel->buf_no = 0; channel->io_buf_no = 0; } EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); @@ -924,7 +922,6 @@ static int qeth_setup_channel(struct qeth_channel *channel) kfree(channel->iob[cnt].data); return -ENOMEM; } - channel->buf_no = 0; channel->io_buf_no = 0; atomic_set(&channel->irq_pending, 0); spin_lock_init(&channel->iob_lock); @@ -1100,16 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, { int rc; int cstat, dstat; - struct qeth_cmd_buffer *buffer; + struct qeth_cmd_buffer *iob = NULL; struct qeth_channel *channel; struct qeth_card *card; - struct qeth_cmd_buffer *iob; - __u8 index; - - if (__qeth_check_irb_error(cdev, intparm, irb)) - return; - cstat = irb->scsw.cmd.cstat; - dstat = irb->scsw.cmd.dstat; card = CARD_FROM_CDEV(cdev); if (!card) @@ -1127,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel = &card->data; QETH_CARD_TEXT(card, 5, "data"); } + + if (qeth_intparm_is_iob(intparm)) + iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); + + if (__qeth_check_irb_error(cdev, intparm, irb)) { + /* IO was terminated, free its resources. */ + if (iob) + qeth_release_buffer(iob->channel, iob); + atomic_set(&channel->irq_pending, 0); + wake_up(&card->wait_q); + return; + } + atomic_set(&channel->irq_pending, 0); if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) @@ -1150,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, /* we don't have to handle this further */ intparm = 0; } + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + if ((dstat & DEV_STAT_UNIT_EXCEP) || (dstat & DEV_STAT_UNIT_CHECK) || (cstat)) { @@ -1182,25 +1189,15 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel->state = CH_STATE_RCD_DONE; goto out; } - if (intparm) { - buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm); - buffer->state = BUF_STATE_PROCESSED; - } if (channel == &card->data) return; if (channel == &card->read && channel->state == CH_STATE_UP) __qeth_issue_next_read(card); - iob = channel->iob; - index = channel->buf_no; - while (iob[index].state == BUF_STATE_PROCESSED) { - if (iob[index].callback != NULL) - iob[index].callback(channel, iob + index); + if (iob && iob->callback) + iob->callback(iob->channel, iob); - index = (index + 1) % QETH_CMD_BUFFER_NO; - } - channel->buf_no = index; out: wake_up(&card->wait_q); return; @@ -1870,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start(channel->ccwdev, - &channel->ccw, (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + (addr_t) iob, 0, 0, QETH_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { @@ -1888,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, if (channel->state != CH_STATE_UP) { rc = -ETIME; QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); - qeth_clear_cmd_buffers(channel); } else rc = 0; return rc; @@ -1942,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start(channel->ccwdev, - &channel->ccw, (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + (addr_t) iob, 0, 0, QETH_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { @@ -1964,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", dev_name(&channel->ccwdev->dev)); QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); - qeth_clear_cmd_buffers(channel); return -ETIME; } return qeth_idx_activate_get_answer(channel, idx_reply_cb); @@ -2166,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); - rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, - (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw, + (addr_t) iob, 0, 0, event_timeout); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " @@ -2199,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len, } } - if (reply->rc == -EIO) - goto error; rc = reply->rc; qeth_put_reply(reply); return rc; @@ -2211,10 +2204,6 @@ time_err: list_del_init(&reply->list); spin_unlock_irqrestore(&reply->card->lock, flags); atomic_inc(&reply->received); -error: - atomic_set(&card->write.irq_pending, 0); - qeth_release_buffer(iob->channel, iob); - card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; rc = reply->rc; qeth_put_reply(reply); return rc; @@ -3033,28 +3022,23 @@ static int qeth_send_startlan(struct qeth_card *card) return rc; } -static int qeth_default_setadapterparms_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long data) +static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) { - struct qeth_ipa_cmd *cmd; - - QETH_CARD_TEXT(card, 4, "defadpcb"); - - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code == 0) + if (!cmd->hdr.return_code) cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; - return 0; + return cmd->hdr.return_code; } static int qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; QETH_CARD_TEXT(card, 3, "quyadpcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *) data; if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { card->info.link_type = cmd->data.setadapterparms.data.query_cmds_supp.lan_type; @@ -3062,7 +3046,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, } card->options.adp.supported_funcs = cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; - return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); + return 0; } static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, @@ -3154,22 +3138,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists); static int qeth_query_switch_attributes_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; - struct qeth_switch_info *sw_info; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_query_switch_attributes *attrs; + struct qeth_switch_info *sw_info; QETH_CARD_TEXT(card, 2, "qswiatcb"); - cmd = (struct qeth_ipa_cmd *) data; - sw_info = (struct qeth_switch_info *)reply->param; - if (cmd->data.setadapterparms.hdr.return_code == 0) { - attrs = &cmd->data.setadapterparms.data.query_switch_attributes; - sw_info->capabilities = attrs->capabilities; - sw_info->settings = attrs->settings; - QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, - sw_info->settings); - } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; + sw_info = (struct qeth_switch_info *)reply->param; + attrs = &cmd->data.setadapterparms.data.query_switch_attributes; + sw_info->capabilities = attrs->capabilities; + sw_info->settings = attrs->settings; + QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, + sw_info->settings); return 0; } @@ -4207,16 +4189,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet); static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_ipacmd_setadpparms *setparms; QETH_CARD_TEXT(card, 4, "prmadpcb"); - cmd = (struct qeth_ipa_cmd *) data; setparms = &(cmd->data.setadapterparms); - - qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); - if (cmd->hdr.return_code) { + if (qeth_setadpparms_inspect_rc(cmd)) { QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); setparms->data.mode = SET_PROMISC_MODE_OFF; } @@ -4286,18 +4265,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats); static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; QETH_CARD_TEXT(card, 4, "chgmaccb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *) data; if (!card->options.layer2 || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { ether_addr_copy(card->dev->dev_addr, cmd->data.setadapterparms.data.change_addr.addr); card->info.mac_bits |= QETH_LAYER2_MAC_READ; } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } @@ -4328,13 +4307,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_set_access_ctrl *access_ctrl_req; int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); + if (cmd->hdr.return_code) + return 0; + qeth_setadpparms_inspect_rc(cmd); - cmd = (struct qeth_ipa_cmd *) data; access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; QETH_DBF_TEXT_(SETUP, 2, "setaccb"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); @@ -4407,7 +4388,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, card->options.isolation = card->options.prev_isolation; break; } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } @@ -4695,14 +4675,15 @@ out: static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; struct qeth_qoat_priv *priv; char *resdata; int resdatalen; QETH_CARD_TEXT(card, 3, "qoatcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *)data; priv = (struct qeth_qoat_priv *)reply->param; resdatalen = cmd->data.setadapterparms.hdr.cmdlength; resdata = (char *)data + 28; @@ -4796,21 +4777,18 @@ out: static int qeth_query_card_info_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct carrier_info *carrier_info = (struct carrier_info *)reply->param; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; struct qeth_query_card_info *card_info; - struct carrier_info *carrier_info; QETH_CARD_TEXT(card, 2, "qcrdincb"); - carrier_info = (struct carrier_info *)reply->param; - cmd = (struct qeth_ipa_cmd *)data; - card_info = &cmd->data.setadapterparms.data.card_info; - if (cmd->data.setadapterparms.hdr.return_code == 0) { - carrier_info->card_type = card_info->card_type; - carrier_info->port_mode = card_info->port_mode; - carrier_info->port_speed = card_info->port_speed; - } + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); + card_info = &cmd->data.setadapterparms.data.card_info; + carrier_info->card_type = card_info->card_type; + carrier_info->port_mode = card_info->port_mode; + carrier_info->port_speed = card_info->port_speed; return 0; } @@ -4857,7 +4835,7 @@ int qeth_vm_request_mac(struct qeth_card *card) goto out; } - ccw_device_get_id(CARD_DDEV(card), &id); + ccw_device_get_id(CARD_RDEV(card), &id); request->resp_buf_len = sizeof(*response); request->resp_version = DIAG26C_VERSION2; request->op_code = DIAG26C_GET_MAC; @@ -6563,10 +6541,14 @@ static int __init qeth_core_init(void) mutex_init(&qeth_mod_mutex); qeth_wq = create_singlethread_workqueue("qeth_wq"); + if (!qeth_wq) { + rc = -ENOMEM; + goto out_err; + } rc = qeth_register_dbf_views(); if (rc) - goto out_err; + goto dbf_err; qeth_core_root_dev = root_device_register("qeth"); rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); if (rc) @@ -6603,6 +6585,8 @@ slab_err: root_device_unregister(qeth_core_root_dev); register_err: qeth_unregister_dbf_views(); +dbf_err: + destroy_workqueue(qeth_wq); out_err: pr_err("Initializing the qeth device driver failed\n"); return rc; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 619f897b4bb0..f4d1ec0b8f5a 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[]; #define QETH_HALT_CHANNEL_PARM -11 #define QETH_RCD_PARM -12 +static inline bool qeth_intparm_is_iob(unsigned long intparm) +{ + switch (intparm) { + case QETH_CLEAR_CHANNEL_PARM: + case QETH_HALT_CHANNEL_PARM: + case QETH_RCD_PARM: + case 0: + return false; + } + return true; +} + /*****************************************************************************/ /* IP Assist related definitions */ /*****************************************************************************/ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 50a313806dde..b8079f2a65b3 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -21,7 +21,6 @@ #include <linux/list.h> #include <linux/hash.h> #include <linux/hashtable.h> -#include <linux/string.h> #include <asm/setup.h> #include "qeth_core.h" #include "qeth_l2.h" @@ -122,13 +121,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) QETH_CARD_TEXT(card, 2, "L2Setmac"); rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); if (rc == 0) { - card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - ether_addr_copy(card->dev->dev_addr, mac); dev_info(&card->gdev->dev, - "MAC address %pM successfully registered on device %s\n", - card->dev->dev_addr, card->dev->name); + "MAC address %pM successfully registered on device %s\n", + mac, card->dev->name); } else { - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; switch (rc) { case -EEXIST: dev_warn(&card->gdev->dev, @@ -143,19 +139,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) return rc; } -static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) -{ - int rc; - - QETH_CARD_TEXT(card, 2, "L2Delmac"); - if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) - return 0; - rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC); - if (rc == 0) - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - return rc; -} - static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) { enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? @@ -520,6 +503,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct qeth_card *card = dev->ml_priv; + u8 old_addr[ETH_ALEN]; int rc = 0; QETH_CARD_TEXT(card, 3, "setmac"); @@ -531,14 +515,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return -EOPNOTSUPP; } QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { QETH_CARD_TEXT(card, 3, "setmcREC"); return -ERESTARTSYS; } - rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); - if (!rc || (rc == -ENOENT)) - rc = qeth_l2_send_setmac(card, addr->sa_data); - return rc ? -EINVAL : 0; + + if (!qeth_card_hw_is_reachable(card)) { + ether_addr_copy(dev->dev_addr, addr->sa_data); + return 0; + } + + /* don't register the same address twice */ + if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && + (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) + return 0; + + /* add the new address, switch over, drop the old */ + rc = qeth_l2_send_setmac(card, addr->sa_data); + if (rc) + return rc; + ether_addr_copy(old_addr, dev->dev_addr); + ether_addr_copy(dev->dev_addr, addr->sa_data); + + if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) + qeth_l2_remove_mac(card, old_addr); + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; + return 0; } static void qeth_promisc_to_bridge(struct qeth_card *card) @@ -1068,8 +1073,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } - if (card->info.type != QETH_CARD_TYPE_OSN) - qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); + if (card->info.type != QETH_CARD_TYPE_OSN && + !qeth_l2_send_setmac(card, card->dev->dev_addr)) + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { if (card->info.hwtrap && @@ -1339,8 +1345,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, qeth_prepare_control_data(card, len, iob); QETH_CARD_TEXT(card, 6, "osnoirqp"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); - rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, - (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw, + (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 3b0c8b8a7634..066b5c3aaae6 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -176,7 +176,7 @@ static struct device_driver smsg_driver = { static void __exit smsg_exit(void) { - cpcmd("SET SMSG IUCV", NULL, 0, NULL); + cpcmd("SET SMSG OFF", NULL, 0, NULL); device_unregister(smsg_dev); iucv_unregister(&smsg_handler, 1); driver_unregister(&smsg_driver); diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c index c44d7c7ffc92..1754f55e2fac 100644 --- a/drivers/sbus/char/oradax.c +++ b/drivers/sbus/char/oradax.c @@ -3,7 +3,7 @@ * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or + * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index abddde11982b..98597b59c12a 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -296,7 +296,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug, "Number of Abort FW Timeouts: %lld\n" "Number of Abort IO NOT Found: %lld\n" - "Abord issued times: \n" + "Abort issued times: \n" " < 6 sec : %lld\n" " 6 sec - 20 sec : %lld\n" " 20 sec - 30 sec : %lld\n" diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index edb7be786c65..9e8de1462593 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -291,7 +291,7 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, * Note: We have not moved the current phy_index so we will actually * compare the startting phy with itself. * This is expected and required to add the phy to the port. */ - while (phy_index < SCI_MAX_PHYS) { + for (; phy_index < SCI_MAX_PHYS; phy_index++) { if ((phy_mask & (1 << phy_index)) == 0) continue; sci_phy_get_sas_address(&ihost->phys[phy_index], @@ -311,7 +311,6 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, &ihost->phys[phy_index]); assigned_phy_mask |= (1 << phy_index); - phy_index++; } } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index ce97cde3b41c..f4d988dd1e9d 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1124,12 +1124,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) goto fail_fw_init; } - ret = 0; + return 0; fail_fw_init: dev_err(&instance->pdev->dev, - "Init cmd return status %s for SCSI host %d\n", - ret ? "FAILED" : "SUCCESS", instance->host->host_no); + "Init cmd return status FAILED for SCSI host %d\n", + instance->host->host_no); return ret; } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 9ef5e3b810f6..656c98e116a9 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -234,11 +234,13 @@ static const char *sdebug_version_date = "20180128"; #define F_INV_OP 0x200 #define F_FAKE_RW 0x400 #define F_M_ACCESS 0x800 /* media access */ -#define F_LONG_DELAY 0x1000 +#define F_SSU_DELAY 0x1000 +#define F_SYNC_DELAY 0x2000 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW) #define FF_SA (F_SA_HIGH | F_SA_LOW) +#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY) #define SDEBUG_MAX_PARTS 4 @@ -510,7 +512,7 @@ static const struct opcode_info_t release_iarr[] = { }; static const struct opcode_info_t sync_cache_iarr[] = { - {0, 0x91, 0, F_LONG_DELAY | F_M_ACCESS, resp_sync_cache, NULL, + {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL, {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */ }; @@ -553,7 +555,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { resp_write_dt0, write_iarr, /* WRITE(16) */ {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, - {0, 0x1b, 0, F_LONG_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ + {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */ @@ -606,7 +608,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_LONG_DELAY | F_M_ACCESS, + {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, sync_cache_iarr, {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* SYNC_CACHE (10) */ @@ -667,6 +669,7 @@ static bool sdebug_strict = DEF_STRICT; static bool sdebug_any_injecting_opt; static bool sdebug_verbose; static bool have_dif_prot; +static bool write_since_sync; static bool sdebug_statistics = DEF_STATISTICS; static unsigned int sdebug_store_sectors; @@ -1607,6 +1610,7 @@ static int resp_start_stop(struct scsi_cmnd *scp, { unsigned char *cmd = scp->cmnd; int power_cond, stop; + bool changing; power_cond = (cmd[4] & 0xf0) >> 4; if (power_cond) { @@ -1614,8 +1618,12 @@ static int resp_start_stop(struct scsi_cmnd *scp, return check_condition_result; } stop = !(cmd[4] & 1); + changing = atomic_read(&devip->stopped) == !stop; atomic_xchg(&devip->stopped, stop); - return (cmd[1] & 0x1) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */ + if (!changing || cmd[1] & 0x1) /* state unchanged or IMMED set */ + return SDEG_RES_IMMED_MASK; + else + return 0; } static sector_t get_sdebug_capacity(void) @@ -2473,6 +2481,7 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba, if (do_write) { sdb = scsi_out(scmd); dir = DMA_TO_DEVICE; + write_since_sync = true; } else { sdb = scsi_in(scmd); dir = DMA_FROM_DEVICE; @@ -3583,6 +3592,7 @@ static int resp_get_lba_status(struct scsi_cmnd *scp, static int resp_sync_cache(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { + int res = 0; u64 lba; u32 num_blocks; u8 *cmd = scp->cmnd; @@ -3598,7 +3608,11 @@ static int resp_sync_cache(struct scsi_cmnd *scp, mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); return check_condition_result; } - return (cmd[1] & 0x2) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */ + if (!write_since_sync || cmd[1] & 0x2) + res = SDEG_RES_IMMED_MASK; + else /* delay if write_since_sync and IMMED clear */ + write_since_sync = false; + return res; } #define RL_BUCKET_ELEMS 8 @@ -5777,13 +5791,14 @@ fini: return schedule_resp(scp, devip, errsts, pfp, 0, 0); else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) { /* - * If any delay is active, want F_LONG_DELAY to be at least 1 + * If any delay is active, for F_SSU_DELAY want at least 1 * second and if sdebug_jdelay>0 want a long delay of that - * many seconds. + * many seconds; for F_SYNC_DELAY want 1/20 of that. */ int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay; + int denom = (flags & F_SYNC_DELAY) ? 20 : 1; - jdelay = mult_frac(USER_HZ * jdelay, HZ, USER_HZ); + jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ); return schedule_resp(scp, devip, errsts, pfp, jdelay, 0); } else return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay, diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index f4b52b44b966..65f6c94f2e9b 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp) return nlmsg_multicast(nls, skb, 0, group, gfp); } +static int +iscsi_unicast_skb(struct sk_buff *skb, u32 portid) +{ + return nlmsg_unicast(nls, skb, portid); +} + int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { @@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); static int -iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, - void *payload, int size) +iscsi_if_send_reply(u32 portid, int type, void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; int len = nlmsg_total_size(size); - int flags = multi ? NLM_F_MULTI : 0; - int t = done ? NLMSG_DONE : type; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { @@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, return -ENOMEM; } - nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); - nlh->nlmsg_flags = flags; + nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0); memcpy(nlmsg_data(nlh), payload, size); - return iscsi_multicast_skb(skb, group, GFP_ATOMIC); + return iscsi_unicast_skb(skb, portid); } static int @@ -3470,6 +3472,7 @@ static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; + u32 portid; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; @@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) if (!try_module_get(transport->owner)) return -EINVAL; + portid = NETLINK_CB(skb).portid; + switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_SESSION: err = iscsi_if_create_session(priv, ep, ev, - NETLINK_CB(skb).portid, + portid, ev->u.c_session.initial_cmdsn, ev->u.c_session.cmds_max, ev->u.c_session.queue_depth); @@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) } err = iscsi_if_create_session(priv, ep, ev, - NETLINK_CB(skb).portid, + portid, ev->u.c_bound_session.initial_cmdsn, ev->u.c_bound_session.cmds_max, ev->u.c_bound_session.queue_depth); @@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) static void iscsi_if_rx(struct sk_buff *skb) { + u32 portid = NETLINK_CB(skb).portid; + mutex_lock(&rx_queue_mutex); while (skb->len >= NLMSG_HDRLEN) { int err; @@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb) break; if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) break; - err = iscsi_if_send_reply(group, nlh->nlmsg_seq, - nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); + err = iscsi_if_send_reply(portid, nlh->nlmsg_type, + ev, sizeof(*ev)); } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); skb_pull(skb, rlen); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a6201e696ab9..9421d9877730 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2121,6 +2121,8 @@ sd_spinup_disk(struct scsi_disk *sdkp) break; /* standby */ if (sshdr.asc == 4 && sshdr.ascq == 0xc) break; /* unavailable */ + if (sshdr.asc == 4 && sshdr.ascq == 0x1b) + break; /* sanitize in progress */ /* * Issue command to spin up drive when not ready */ diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 41df75eea57b..210407cd2341 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -400,8 +400,10 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf) * * Check that all zones of the device are equal. The last zone can however * be smaller. The zone size must also be a power of two number of LBAs. + * + * Returns the zone size in bytes upon success or an error code upon failure. */ -static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) +static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) { u64 zone_blocks = 0; sector_t block = 0; @@ -412,8 +414,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) int ret; u8 same; - sdkp->zone_blocks = 0; - /* Get a buffer */ buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); if (!buf) @@ -445,16 +445,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) /* Parse zone descriptors */ while (rec < buf + buf_len) { - zone_blocks = get_unaligned_be64(&rec[8]); - if (sdkp->zone_blocks == 0) { - sdkp->zone_blocks = zone_blocks; - } else if (zone_blocks != sdkp->zone_blocks && - (block + zone_blocks < sdkp->capacity - || zone_blocks > sdkp->zone_blocks)) { - zone_blocks = 0; + u64 this_zone_blocks = get_unaligned_be64(&rec[8]); + + if (zone_blocks == 0) { + zone_blocks = this_zone_blocks; + } else if (this_zone_blocks != zone_blocks && + (block + this_zone_blocks < sdkp->capacity + || this_zone_blocks > zone_blocks)) { + this_zone_blocks = 0; goto out; } - block += zone_blocks; + block += this_zone_blocks; rec += 64; } @@ -467,8 +468,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) } while (block < sdkp->capacity); - zone_blocks = sdkp->zone_blocks; - out: if (!zone_blocks) { if (sdkp->first_scan) @@ -488,8 +487,7 @@ out: "Zone size too large\n"); ret = -ENODEV; } else { - sdkp->zone_blocks = zone_blocks; - sdkp->zone_shift = ilog2(zone_blocks); + ret = zone_blocks; } out_free: @@ -500,15 +498,14 @@ out_free: /** * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone). - * @sdkp: The disk of the bitmap + * @nr_zones: Number of zones to allocate space for. + * @numa_node: NUMA node to allocate the memory from. */ -static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp) +static inline unsigned long * +sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node) { - struct request_queue *q = sdkp->disk->queue; - - return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones) - * sizeof(unsigned long), - GFP_KERNEL, q->node); + return kzalloc_node(BITS_TO_LONGS(nr_zones) * sizeof(unsigned long), + GFP_KERNEL, numa_node); } /** @@ -516,6 +513,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp) * @sdkp: disk used * @buf: report reply buffer * @buflen: length of @buf + * @zone_shift: logarithm base 2 of the number of blocks in a zone * @seq_zones_bitmap: bitmap of sequential zones to set * * Parse reported zone descriptors in @buf to identify sequential zones and @@ -525,7 +523,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp) * Return the LBA after the last zone reported. */ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf, - unsigned int buflen, + unsigned int buflen, u32 zone_shift, unsigned long *seq_zones_bitmap) { sector_t lba, next_lba = sdkp->capacity; @@ -544,7 +542,7 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf, if (type != ZBC_ZONE_TYPE_CONV && cond != ZBC_ZONE_COND_READONLY && cond != ZBC_ZONE_COND_OFFLINE) - set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap); + set_bit(lba >> zone_shift, seq_zones_bitmap); next_lba = lba + get_unaligned_be64(&rec[8]); rec += 64; } @@ -553,12 +551,16 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf, } /** - * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap. + * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap. * @sdkp: target disk + * @zone_shift: logarithm base 2 of the number of blocks in a zone + * @nr_zones: number of zones to set up a seq zone bitmap for * * Allocate a zone bitmap and initialize it by identifying sequential zones. */ -static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp) +static unsigned long * +sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift, + u32 nr_zones) { struct request_queue *q = sdkp->disk->queue; unsigned long *seq_zones_bitmap; @@ -566,9 +568,9 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp) unsigned char *buf; int ret = -ENOMEM; - seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp); + seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node); if (!seq_zones_bitmap) - return -ENOMEM; + return ERR_PTR(-ENOMEM); buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); if (!buf) @@ -579,7 +581,7 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp) if (ret) goto out; lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE, - seq_zones_bitmap); + zone_shift, seq_zones_bitmap); } if (lba != sdkp->capacity) { @@ -591,12 +593,9 @@ out: kfree(buf); if (ret) { kfree(seq_zones_bitmap); - return ret; + return ERR_PTR(ret); } - - q->seq_zones_bitmap = seq_zones_bitmap; - - return 0; + return seq_zones_bitmap; } static void sd_zbc_cleanup(struct scsi_disk *sdkp) @@ -612,44 +611,64 @@ static void sd_zbc_cleanup(struct scsi_disk *sdkp) q->nr_zones = 0; } -static int sd_zbc_setup(struct scsi_disk *sdkp) +static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks) { struct request_queue *q = sdkp->disk->queue; + u32 zone_shift = ilog2(zone_blocks); + u32 nr_zones; int ret; - /* READ16/WRITE16 is mandatory for ZBC disks */ - sdkp->device->use_16_for_rw = 1; - sdkp->device->use_10_for_rw = 0; - /* chunk_sectors indicates the zone size */ - blk_queue_chunk_sectors(sdkp->disk->queue, - logical_to_sectors(sdkp->device, sdkp->zone_blocks)); - sdkp->nr_zones = - round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift; + blk_queue_chunk_sectors(q, + logical_to_sectors(sdkp->device, zone_blocks)); + nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift; /* * Initialize the device request queue information if the number * of zones changed. */ - if (sdkp->nr_zones != q->nr_zones) { - - sd_zbc_cleanup(sdkp); - - q->nr_zones = sdkp->nr_zones; - if (sdkp->nr_zones) { - q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp); - if (!q->seq_zones_wlock) { + if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) { + unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL; + size_t zone_bitmap_size; + + if (nr_zones) { + seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones, + q->node); + if (!seq_zones_wlock) { ret = -ENOMEM; goto err; } - ret = sd_zbc_setup_seq_zones_bitmap(sdkp); - if (ret) { - sd_zbc_cleanup(sdkp); + seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp, + zone_shift, nr_zones); + if (IS_ERR(seq_zones_bitmap)) { + ret = PTR_ERR(seq_zones_bitmap); + kfree(seq_zones_wlock); goto err; } } - + zone_bitmap_size = BITS_TO_LONGS(nr_zones) * + sizeof(unsigned long); + blk_mq_freeze_queue(q); + if (q->nr_zones != nr_zones) { + /* READ16/WRITE16 is mandatory for ZBC disks */ + sdkp->device->use_16_for_rw = 1; + sdkp->device->use_10_for_rw = 0; + + sdkp->zone_blocks = zone_blocks; + sdkp->zone_shift = zone_shift; + sdkp->nr_zones = nr_zones; + q->nr_zones = nr_zones; + swap(q->seq_zones_wlock, seq_zones_wlock); + swap(q->seq_zones_bitmap, seq_zones_bitmap); + } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap, + zone_bitmap_size) != 0) { + memcpy(q->seq_zones_bitmap, seq_zones_bitmap, + zone_bitmap_size); + } + blk_mq_unfreeze_queue(q); + kfree(seq_zones_wlock); + kfree(seq_zones_bitmap); } return 0; @@ -661,6 +680,7 @@ err: int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) { + int64_t zone_blocks; int ret; if (!sd_is_zoned(sdkp)) @@ -697,12 +717,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) * Check zone size: only devices with a constant zone size (except * an eventual last runt zone) that is a power of 2 are supported. */ - ret = sd_zbc_check_zone_size(sdkp); - if (ret) + zone_blocks = sd_zbc_check_zone_size(sdkp); + ret = -EFBIG; + if (zone_blocks != (u32)zone_blocks) + goto err; + ret = zone_blocks; + if (ret < 0) goto err; /* The drive satisfies the kernel restrictions: set it up */ - ret = sd_zbc_setup(sdkp); + ret = sd_zbc_setup(sdkp, zone_blocks); if (ret) goto err; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 8c51d628b52e..a2ec0bc9e9fa 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1722,11 +1722,14 @@ static int storvsc_probe(struct hv_device *device, max_targets = STORVSC_MAX_TARGETS; max_channels = STORVSC_MAX_CHANNELS; /* - * On Windows8 and above, we support sub-channels for storage. + * On Windows8 and above, we support sub-channels for storage + * on SCSI and FC controllers. * The number of sub-channels offerred is based on the number of * VCPUs in the guest. */ - max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel); + if (!dev_is_ide) + max_sub_channels = + (num_cpus - 1) / storvsc_vcpus_per_sub_channel; } scsi_driver.can_queue = (max_outstanding_req_per_channel * diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index c5b1bf1cadcb..00e79057f870 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -276,6 +276,35 @@ static inline void ufshcd_remove_non_printable(char *val) *val = ' '; } +static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, + const char *str) +{ + struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; + + trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb); +} + +static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag, + const char *str) +{ + struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; + + trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr); +} + +static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, + const char *str) +{ + struct utp_task_req_desc *descp; + struct utp_upiu_task_req *task_req; + int off = (int)tag - hba->nutrs; + + descp = &hba->utmrdl_base_addr[off]; + task_req = (struct utp_upiu_task_req *)descp->task_req_upiu; + trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header, + &task_req->input_param1); +} + static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, const char *str) { @@ -285,6 +314,9 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, struct ufshcd_lrb *lrbp; int transfer_len = -1; + /* trace UPIU also */ + ufshcd_add_cmd_upiu_trace(hba, tag, str); + if (!trace_ufshcd_command_enabled()) return; @@ -2550,6 +2582,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, hba->dev_cmd.complete = &wait; + ufshcd_add_query_upiu_trace(hba, tag, "query_send"); /* Make sure descriptors are ready before ringing the doorbell */ wmb(); spin_lock_irqsave(hba->host->host_lock, flags); @@ -2559,6 +2592,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); + ufshcd_add_query_upiu_trace(hba, tag, + err ? "query_complete_err" : "query_complete"); + out_put_tag: ufshcd_put_dev_cmd_tag(hba, tag); wake_up(&hba->dev_cmd.tag_wq); @@ -5443,11 +5479,14 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, spin_unlock_irqrestore(host->host_lock, flags); + ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send"); + /* wait until the task management command is completed */ err = wait_event_timeout(hba->tm_wq, test_bit(free_slot, &hba->tm_condition), msecs_to_jiffies(TM_CMD_TIMEOUT)); if (!err) { + ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); if (ufshcd_clear_tm_cmd(hba, free_slot)) @@ -5456,6 +5495,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, err = -ETIMEDOUT; } else { err = ufshcd_task_req_compl(hba, free_slot, tm_response); + ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); } clear_bit(free_slot, &hba->tm_condition); diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c index 884419c37e84..457ea1f8db30 100644 --- a/drivers/slimbus/messaging.c +++ b/drivers/slimbus/messaging.c @@ -183,7 +183,7 @@ static u16 slim_slicesize(int code) 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7 }; - clamp(code, 1, (int)ARRAY_SIZE(sizetocode)); + code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode)); return sizetocode[code - 1]; } diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c index fe96a8b956fb..f7ed1187518b 100644 --- a/drivers/soc/bcm/raspberrypi-power.c +++ b/drivers/soc/bcm/raspberrypi-power.c @@ -45,7 +45,7 @@ struct rpi_power_domains { struct rpi_power_domain_packet { u32 domain; u32 on; -} __packet; +}; /* * Asks the firmware to enable or disable power on a specific power diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c index 08b636084286..95d7805f3485 100644 --- a/drivers/staging/media/imx/imx-media-csi.c +++ b/drivers/staging/media/imx/imx-media-csi.c @@ -1800,7 +1800,7 @@ static int imx_csi_probe(struct platform_device *pdev) priv->dev->of_node = pdata->of_node; pinctrl = devm_pinctrl_get_select_default(priv->dev); if (IS_ERR(pinctrl)) { - ret = PTR_ERR(priv->vdev); + ret = PTR_ERR(pinctrl); dev_dbg(priv->dev, "devm_pinctrl_get_select_default() failed: %d\n", ret); if (ret != -ENODEV) diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c index 6b5300ca44a6..885f5fcead77 100644 --- a/drivers/staging/wilc1000/host_interface.c +++ b/drivers/staging/wilc1000/host_interface.c @@ -1390,7 +1390,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif, } if (hif_drv->usr_conn_req.ies) { - conn_info.req_ies = kmemdup(conn_info.req_ies, + conn_info.req_ies = kmemdup(hif_drv->usr_conn_req.ies, hif_drv->usr_conn_req.ies_len, GFP_KERNEL); if (conn_info.req_ies) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 07c814c42648..60429011292a 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -427,8 +427,8 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct scatterlist *sg = &cmd->t_data_sg[0]; - unsigned char *buf, zero = 0x00, *p = &zero; - int rc, ret; + unsigned char *buf, *not_zero; + int ret; buf = kmap(sg_page(sg)) + sg->offset; if (!buf) @@ -437,10 +437,10 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) * Fall back to block_execute_write_same() slow-path if * incoming WRITE_SAME payload does not contain zeros. */ - rc = memcmp(buf, p, cmd->data_length); + not_zero = memchr_inv(buf, 0x00, cmd->data_length); kunmap(sg_page(sg)); - if (rc) + if (not_zero) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ret = blkdev_issue_zeroout(bdev, diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 0d99b242e82e..6cb933ecc084 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, bytes = min(bytes, data_len); if (!bio) { +new_bio: nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); nr_pages -= nr_vecs; /* @@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, * be allocated with pscsi_get_bio() above. */ bio = NULL; + goto new_bio; } data_len -= bytes; diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 3b3e1f6632d7..1dbe27c9946c 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -121,6 +121,9 @@ struct gsm_dlci { struct mutex mutex; /* Link layer */ + int mode; +#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */ +#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */ spinlock_t lock; /* Protects the internal state */ struct timer_list t1; /* Retransmit timer for SABM and UA */ int retries; @@ -1364,7 +1367,13 @@ retry: ctrl->data = data; ctrl->len = clen; gsm->pending_cmd = ctrl; - gsm->cretries = gsm->n2; + + /* If DLCI0 is in ADM mode skip retries, it won't respond */ + if (gsm->dlci[0]->mode == DLCI_MODE_ADM) + gsm->cretries = 1; + else + gsm->cretries = gsm->n2; + mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); gsm_control_transmit(gsm, ctrl); spin_unlock_irqrestore(&gsm->control_lock, flags); @@ -1472,6 +1481,7 @@ static void gsm_dlci_t1(struct timer_list *t) if (debug & 8) pr_info("DLCI %d opening in ADM mode.\n", dlci->addr); + dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { gsm_dlci_close(dlci); @@ -2861,11 +2871,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk) static int gsm_carrier_raised(struct tty_port *port) { struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); + struct gsm_mux *gsm = dlci->gsm; + /* Not yet open so no carrier info */ if (dlci->state != DLCI_OPEN) return 0; if (debug & 2) return 1; + + /* + * Basic mode with control channel in ADM mode may not respond + * to CMD_MSC at all and modem_rx is empty. + */ + if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM && + !dlci->modem_rx) + return 1; + return dlci->modem_rx & TIOCM_CD; } diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index a24278380fec..22683393a0f2 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c @@ -169,7 +169,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match) */ int __init setup_earlycon(char *buf) { - const struct earlycon_id *match; + const struct earlycon_id **p_match; if (!buf || !buf[0]) return -EINVAL; @@ -177,7 +177,9 @@ int __init setup_earlycon(char *buf) if (early_con.flags & CON_ENABLED) return -EALREADY; - for (match = __earlycon_table; match < __earlycon_table_end; match++) { + for (p_match = __earlycon_table; p_match < __earlycon_table_end; + p_match++) { + const struct earlycon_id *match = *p_match; size_t len = strlen(match->name); if (strncmp(buf, match->name, len)) diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 91f3a1a5cb7f..c2fc6bef7a6f 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -316,7 +316,7 @@ static u32 imx_uart_readl(struct imx_port *sport, u32 offset) * differ from the value that was last written. As it only * clears after being set, reread conditionally. */ - if (sport->ucr2 & UCR2_SRST) + if (!(sport->ucr2 & UCR2_SRST)) sport->ucr2 = readl(sport->port.membase + offset); return sport->ucr2; break; @@ -1833,6 +1833,11 @@ static int imx_uart_rs485_config(struct uart_port *port, rs485conf->flags &= ~SER_RS485_ENABLED; if (rs485conf->flags & SER_RS485_ENABLED) { + /* Enable receiver if low-active RTS signal is requested */ + if (sport->have_rtscts && !sport->have_rtsgpio && + !(rs485conf->flags & SER_RS485_RTS_ON_SEND)) + rs485conf->flags |= SER_RS485_RX_DURING_TX; + /* disable transmitter */ ucr2 = imx_uart_readl(sport, UCR2); if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) @@ -2265,6 +2270,18 @@ static int imx_uart_probe(struct platform_device *pdev) (!sport->have_rtscts && !sport->have_rtsgpio)) dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); + /* + * If using the i.MX UART RTS/CTS control then the RTS (CTS_B) + * signal cannot be set low during transmission in case the + * receiver is off (limitation of the i.MX UART IP). + */ + if (sport->port.rs485.flags & SER_RS485_ENABLED && + sport->have_rtscts && !sport->have_rtsgpio && + (!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) && + !(sport->port.rs485.flags & SER_RS485_RX_DURING_TX))) + dev_err(&pdev->dev, + "low-active RTS not possible when receiver is off, enabling receiver\n"); + imx_uart_rs485_config(&sport->port, &sport->port.rs485); /* Disable interrupts before requesting them */ diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index 750e5645dc85..f503fab1e268 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -495,7 +495,6 @@ static void mvebu_uart_set_termios(struct uart_port *port, termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); termios->c_cflag &= CREAD | CBAUD; termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); - termios->c_lflag = old->c_lflag; } spin_unlock_irqrestore(&port->lock, flags); diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 65ff669373d4..a1b3eb04cb32 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1022,6 +1022,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) struct qcom_geni_serial_port *port; struct uart_port *uport; struct resource *res; + int irq; if (pdev->dev.of_node) line = of_alias_get_id(pdev->dev.of_node, "serial"); @@ -1061,11 +1062,12 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS; port->tx_fifo_width = DEF_FIFO_WIDTH_BITS; - uport->irq = platform_get_irq(pdev, 0); - if (uport->irq < 0) { - dev_err(&pdev->dev, "Failed to get IRQ %d\n", uport->irq); - return uport->irq; + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq); + return irq; } + uport->irq = irq; uport->private_data = &qcom_geni_console_driver; platform_set_drvdata(pdev, port); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index abcb4d09a2d8..bd72dd843338 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1181,7 +1181,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device, /* only set baud if specified on command line - otherwise * assume it has been initialized by a boot loader. */ - if (device->baud) { + if (port->uartclk && device->baud) { u32 cd = 0, bdiv = 0; u32 mr; int div8; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 63114ea35ec1..7c838b90a31d 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2816,7 +2816,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx) kref_init(&tty->kref); tty->magic = TTY_MAGIC; - tty_ldisc_init(tty); + if (tty_ldisc_init(tty)) { + kfree(tty); + return NULL; + } tty->session = NULL; tty->pgrp = NULL; mutex_init(&tty->legacy_mutex); diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 050f4d650891..fb7329ab2b37 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -176,12 +176,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc) return ERR_CAST(ldops); } - ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL); - if (ld == NULL) { - put_ldops(ldops); - return ERR_PTR(-ENOMEM); - } - + /* + * There is no way to handle allocation failure of only 16 bytes. + * Let's simplify error handling and save more memory. + */ + ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL); ld->ops = ldops; ld->tty = tty; @@ -527,19 +526,16 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld) static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) { /* There is an outstanding reference here so this is safe */ - old = tty_ldisc_get(tty, old->ops->num); - WARN_ON(IS_ERR(old)); - tty->ldisc = old; - tty_set_termios_ldisc(tty, old->ops->num); - if (tty_ldisc_open(tty, old) < 0) { - tty_ldisc_put(old); + if (tty_ldisc_failto(tty, old->ops->num) < 0) { + const char *name = tty_name(tty); + + pr_warn("Falling back ldisc for %s.\n", name); /* The traditional behaviour is to fall back to N_TTY, we want to avoid falling back to N_NULL unless we have no choice to avoid the risk of breaking anything */ if (tty_ldisc_failto(tty, N_TTY) < 0 && tty_ldisc_failto(tty, N_NULL) < 0) - panic("Couldn't open N_NULL ldisc for %s.", - tty_name(tty)); + panic("Couldn't open N_NULL ldisc for %s.", name); } } @@ -824,12 +820,13 @@ EXPORT_SYMBOL_GPL(tty_ldisc_release); * the tty structure is not completely set up when this call is made. */ -void tty_ldisc_init(struct tty_struct *tty) +int tty_ldisc_init(struct tty_struct *tty) { struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY); if (IS_ERR(ld)) - panic("n_tty: init_tty"); + return PTR_ERR(ld); tty->ldisc = ld; + return 0; } /** diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index f695a7e8c314..c690d100adcd 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -19,7 +19,7 @@ * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \ * > /sys/bus/vmbus/drivers/uio_hv_generic/bind */ - +#define DEBUG 1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> @@ -94,10 +94,11 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state) */ static void hv_uio_channel_cb(void *context) { - struct hv_uio_private_data *pdata = context; - struct hv_device *dev = pdata->device; + struct vmbus_channel *chan = context; + struct hv_device *hv_dev = chan->device_obj; + struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); - dev->channel->inbound.ring_buffer->interrupt_mask = 1; + chan->inbound.ring_buffer->interrupt_mask = 1; virt_mb(); uio_event_notify(&pdata->info); @@ -121,78 +122,46 @@ static void hv_uio_rescind(struct vmbus_channel *channel) uio_event_notify(&pdata->info); } -/* - * Handle fault when looking for sub channel ring buffer - * Subchannel ring buffer is same as resource 0 which is main ring buffer - * This is derived from uio_vma_fault +/* Sysfs API to allow mmap of the ring buffers + * The ring buffer is allocated as contiguous memory by vmbus_open */ -static int hv_uio_vma_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - void *ring_buffer = vma->vm_private_data; - struct page *page; - void *addr; - - addr = ring_buffer + (vmf->pgoff << PAGE_SHIFT); - page = virt_to_page(addr); - get_page(page); - vmf->page = page; - return 0; -} - -static const struct vm_operations_struct hv_uio_vm_ops = { - .fault = hv_uio_vma_fault, -}; - -/* Sysfs API to allow mmap of the ring buffers */ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { struct vmbus_channel *channel = container_of(kobj, struct vmbus_channel, kobj); - unsigned long requested_pages, actual_pages; - - if (vma->vm_end < vma->vm_start) - return -EINVAL; - - /* only allow 0 for now */ - if (vma->vm_pgoff > 0) - return -EINVAL; + struct hv_device *dev = channel->primary_channel->device_obj; + u16 q_idx = channel->offermsg.offer.sub_channel_index; - requested_pages = vma_pages(vma); - actual_pages = 2 * HV_RING_SIZE; - if (requested_pages > actual_pages) - return -EINVAL; + dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n", + q_idx, vma_pages(vma), vma->vm_pgoff); - vma->vm_private_data = channel->ringbuffer_pages; - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_ops = &hv_uio_vm_ops; - return 0; + return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages), + channel->ringbuffer_pagecount << PAGE_SHIFT); } -static struct bin_attribute ring_buffer_bin_attr __ro_after_init = { +static const struct bin_attribute ring_buffer_bin_attr = { .attr = { .name = "ring", .mode = 0600, - /* size is set at init time */ }, + .size = 2 * HV_RING_SIZE * PAGE_SIZE, .mmap = hv_uio_ring_mmap, }; -/* Callback from VMBUS subystem when new channel created. */ +/* Callback from VMBUS subsystem when new channel created. */ static void hv_uio_new_channel(struct vmbus_channel *new_sc) { struct hv_device *hv_dev = new_sc->primary_channel->device_obj; struct device *device = &hv_dev->device; - struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE; int ret; /* Create host communication ring */ ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0, - hv_uio_channel_cb, pdata); + hv_uio_channel_cb, new_sc); if (ret) { dev_err(device, "vmbus_open subchannel failed: %d\n", ret); return; @@ -234,7 +203,7 @@ hv_uio_probe(struct hv_device *dev, ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE, HV_RING_SIZE * PAGE_SIZE, NULL, 0, - hv_uio_channel_cb, pdata); + hv_uio_channel_cb, dev->channel); if (ret) goto fail; @@ -326,6 +295,11 @@ hv_uio_probe(struct hv_device *dev, vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind); vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel); + ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr); + if (ret) + dev_notice(&dev->device, + "sysfs create ring bin file failed; %d\n", ret); + hv_set_drvdata(dev, pdata); return 0; diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 75f7fb151f71..987fc5ba6321 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -207,5 +207,6 @@ config USB_ULPI_BUS config USB_ROLE_SWITCH tristate + select USB_COMMON endif # USB_SUPPORT diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index c821b4b9647e..7b5cb28ffb35 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -191,7 +191,9 @@ static const unsigned short full_speed_maxpacket_maxes[4] = { static const unsigned short high_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_CONTROL] = 64, [USB_ENDPOINT_XFER_ISOC] = 1024, - [USB_ENDPOINT_XFER_BULK] = 512, + + /* Bulk should be 512, but some devices use 1024: we will warn below */ + [USB_ENDPOINT_XFER_BULK] = 1024, [USB_ENDPOINT_XFER_INT] = 1024, }; static const unsigned short super_speed_maxpacket_maxes[4] = { diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 777036ae6367..0a42c5df3c0f 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2262,7 +2262,8 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) hcd->state = HC_STATE_SUSPENDED; if (!PMSG_IS_AUTO(msg)) - usb_phy_roothub_power_off(hcd->phy_roothub); + usb_phy_roothub_suspend(hcd->self.sysdev, + hcd->phy_roothub); /* Did we race with a root-hub wakeup event? */ if (rhdev->do_remote_wakeup) { @@ -2302,7 +2303,8 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) } if (!PMSG_IS_AUTO(msg)) { - status = usb_phy_roothub_power_on(hcd->phy_roothub); + status = usb_phy_roothub_resume(hcd->self.sysdev, + hcd->phy_roothub); if (status) return status; } @@ -2344,7 +2346,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) } } else { hcd->state = old_state; - usb_phy_roothub_power_off(hcd->phy_roothub); + usb_phy_roothub_suspend(hcd->self.sysdev, hcd->phy_roothub); dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", "resume", status); if (status != -ESHUTDOWN) @@ -2377,6 +2379,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd) spin_lock_irqsave (&hcd_root_hub_lock, flags); if (hcd->rh_registered) { + pm_wakeup_event(&hcd->self.root_hub->dev, 0); set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); queue_work(pm_wq, &hcd->wakeup_work); } @@ -2758,12 +2761,16 @@ int usb_add_hcd(struct usb_hcd *hcd, } if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) { - hcd->phy_roothub = usb_phy_roothub_init(hcd->self.sysdev); + hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev); if (IS_ERR(hcd->phy_roothub)) { retval = PTR_ERR(hcd->phy_roothub); - goto err_phy_roothub_init; + goto err_phy_roothub_alloc; } + retval = usb_phy_roothub_init(hcd->phy_roothub); + if (retval) + goto err_phy_roothub_alloc; + retval = usb_phy_roothub_power_on(hcd->phy_roothub); if (retval) goto err_usb_phy_roothub_power_on; @@ -2936,7 +2943,7 @@ err_create_buf: usb_phy_roothub_power_off(hcd->phy_roothub); err_usb_phy_roothub_power_on: usb_phy_roothub_exit(hcd->phy_roothub); -err_phy_roothub_init: +err_phy_roothub_alloc: if (hcd->remove_phy && hcd->usb_phy) { usb_phy_shutdown(hcd->usb_phy); usb_put_phy(hcd->usb_phy); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index f6ea16e9f6bb..aa9968d90a48 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -653,12 +653,17 @@ void usb_wakeup_notification(struct usb_device *hdev, unsigned int portnum) { struct usb_hub *hub; + struct usb_port *port_dev; if (!hdev) return; hub = usb_hub_to_struct_hub(hdev); if (hub) { + port_dev = hub->ports[portnum - 1]; + if (port_dev && port_dev->child) + pm_wakeup_event(&port_dev->child->dev, 0); + set_bit(portnum, hub->wakeup_bits); kick_hub_wq(hub); } @@ -3434,8 +3439,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) /* Skip the initial Clear-Suspend step for a remote wakeup */ status = hub_port_status(hub, port1, &portstatus, &portchange); - if (status == 0 && !port_is_suspended(hub, portstatus)) + if (status == 0 && !port_is_suspended(hub, portstatus)) { + if (portchange & USB_PORT_STAT_C_SUSPEND) + pm_wakeup_event(&udev->dev, 0); goto SuspendCleared; + } /* see 7.1.7.7; affects power usage, but not budgeting */ if (hub_is_superspeed(hub->hdev)) diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c index 09b7c43c0ea4..9879767452a2 100644 --- a/drivers/usb/core/phy.c +++ b/drivers/usb/core/phy.c @@ -19,19 +19,6 @@ struct usb_phy_roothub { struct list_head list; }; -static struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev) -{ - struct usb_phy_roothub *roothub_entry; - - roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL); - if (!roothub_entry) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&roothub_entry->list); - - return roothub_entry; -} - static int usb_phy_roothub_add_phy(struct device *dev, int index, struct list_head *list) { @@ -45,9 +32,11 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index, return PTR_ERR(phy); } - roothub_entry = usb_phy_roothub_alloc(dev); - if (IS_ERR(roothub_entry)) - return PTR_ERR(roothub_entry); + roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL); + if (!roothub_entry) + return -ENOMEM; + + INIT_LIST_HEAD(&roothub_entry->list); roothub_entry->phy = phy; @@ -56,28 +45,44 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index, return 0; } -struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev) +struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev) { struct usb_phy_roothub *phy_roothub; - struct usb_phy_roothub *roothub_entry; - struct list_head *head; int i, num_phys, err; + if (!IS_ENABLED(CONFIG_GENERIC_PHY)) + return NULL; + num_phys = of_count_phandle_with_args(dev->of_node, "phys", "#phy-cells"); if (num_phys <= 0) return NULL; - phy_roothub = usb_phy_roothub_alloc(dev); - if (IS_ERR(phy_roothub)) - return phy_roothub; + phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL); + if (!phy_roothub) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&phy_roothub->list); for (i = 0; i < num_phys; i++) { err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list); if (err) - goto err_out; + return ERR_PTR(err); } + return phy_roothub; +} +EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc); + +int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub) +{ + struct usb_phy_roothub *roothub_entry; + struct list_head *head; + int err; + + if (!phy_roothub) + return 0; + head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { @@ -86,14 +91,13 @@ struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev) goto err_exit_phys; } - return phy_roothub; + return 0; err_exit_phys: list_for_each_entry_continue_reverse(roothub_entry, head, list) phy_exit(roothub_entry->phy); -err_out: - return ERR_PTR(err); + return err; } EXPORT_SYMBOL_GPL(usb_phy_roothub_init); @@ -111,7 +115,7 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub) list_for_each_entry(roothub_entry, head, list) { err = phy_exit(roothub_entry->phy); if (err) - ret = ret; + ret = err; } return ret; @@ -156,3 +160,38 @@ void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub) phy_power_off(roothub_entry->phy); } EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off); + +int usb_phy_roothub_suspend(struct device *controller_dev, + struct usb_phy_roothub *phy_roothub) +{ + usb_phy_roothub_power_off(phy_roothub); + + /* keep the PHYs initialized so the device can wake up the system */ + if (device_may_wakeup(controller_dev)) + return 0; + + return usb_phy_roothub_exit(phy_roothub); +} +EXPORT_SYMBOL_GPL(usb_phy_roothub_suspend); + +int usb_phy_roothub_resume(struct device *controller_dev, + struct usb_phy_roothub *phy_roothub) +{ + int err; + + /* if the device can't wake up the system _exit was called */ + if (!device_may_wakeup(controller_dev)) { + err = usb_phy_roothub_init(phy_roothub); + if (err) + return err; + } + + err = usb_phy_roothub_power_on(phy_roothub); + + /* undo _init if _power_on failed */ + if (err && !device_may_wakeup(controller_dev)) + usb_phy_roothub_exit(phy_roothub); + + return err; +} +EXPORT_SYMBOL_GPL(usb_phy_roothub_resume); diff --git a/drivers/usb/core/phy.h b/drivers/usb/core/phy.h index 6fde59bfbff8..88a3c037e9df 100644 --- a/drivers/usb/core/phy.h +++ b/drivers/usb/core/phy.h @@ -1,7 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * USB roothub wrapper + * + * Copyright (C) 2018 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + */ + +#ifndef __USB_CORE_PHY_H_ +#define __USB_CORE_PHY_H_ + +struct device; struct usb_phy_roothub; -struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev); +struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev); + +int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub); int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub); int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub); void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub); + +int usb_phy_roothub_suspend(struct device *controller_dev, + struct usb_phy_roothub *phy_roothub); +int usb_phy_roothub_resume(struct device *controller_dev, + struct usb_phy_roothub *phy_roothub); + +#endif /* __USB_CORE_PHY_H_ */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 920f48a49a87..c55def2f1320 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -186,6 +186,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, + /* HP v222w 16GB Mini USB Drive */ + { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Creative SB Audigy 2 NX */ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index d83be5651f87..a666e0758a99 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -985,6 +985,7 @@ struct dwc2_hsotg { /* DWC OTG HW Release versions */ #define DWC2_CORE_REV_2_71a 0x4f54271a +#define DWC2_CORE_REV_2_72a 0x4f54272a #define DWC2_CORE_REV_2_80a 0x4f54280a #define DWC2_CORE_REV_2_90a 0x4f54290a #define DWC2_CORE_REV_2_91a 0x4f54291a @@ -992,6 +993,7 @@ struct dwc2_hsotg { #define DWC2_CORE_REV_2_94a 0x4f54294a #define DWC2_CORE_REV_3_00a 0x4f54300a #define DWC2_CORE_REV_3_10a 0x4f54310a +#define DWC2_CORE_REV_4_00a 0x4f54400a #define DWC2_FS_IOT_REV_1_00a 0x5531100a #define DWC2_HS_IOT_REV_1_00a 0x5532100a diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 6c32bf26e48e..83cb5577a52f 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3928,6 +3928,27 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, if (index && !hs_ep->isochronous) epctrl |= DXEPCTL_SETD0PID; + /* WA for Full speed ISOC IN in DDMA mode. + * By Clear NAK status of EP, core will send ZLP + * to IN token and assert NAK interrupt relying + * on TxFIFO status only + */ + + if (hsotg->gadget.speed == USB_SPEED_FULL && + hs_ep->isochronous && dir_in) { + /* The WA applies only to core versions from 2.72a + * to 4.00a (including both). Also for FS_IOT_1.00a + * and HS_IOT_1.00a. + */ + u32 gsnpsid = dwc2_readl(hsotg->regs + GSNPSID); + + if ((gsnpsid >= DWC2_CORE_REV_2_72a && + gsnpsid <= DWC2_CORE_REV_4_00a) || + gsnpsid == DWC2_FS_IOT_REV_1_00a || + gsnpsid == DWC2_HS_IOT_REV_1_00a) + epctrl |= DXEPCTL_CNAK; + } + dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n", __func__, epctrl); diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 190f95964000..c51b73b3e048 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -358,9 +358,14 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg) { + int ret; + hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus"); - if (IS_ERR(hsotg->vbus_supply)) - return 0; + if (IS_ERR(hsotg->vbus_supply)) { + ret = PTR_ERR(hsotg->vbus_supply); + hsotg->vbus_supply = NULL; + return ret == -ENODEV ? 0 : ret; + } return regulator_enable(hsotg->vbus_supply); } @@ -4342,9 +4347,7 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd) spin_unlock_irqrestore(&hsotg->lock, flags); - dwc2_vbus_supply_init(hsotg); - - return 0; + return dwc2_vbus_supply_init(hsotg); } /* diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c index 7f21747007f1..bea2e8ec0369 100644 --- a/drivers/usb/dwc2/pci.c +++ b/drivers/usb/dwc2/pci.c @@ -141,8 +141,10 @@ static int dwc2_pci_probe(struct pci_dev *pci, goto err; glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL); - if (!glue) + if (!glue) { + ret = -ENOMEM; goto err; + } ret = platform_device_add(dwc2); if (ret) { diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 8796a5ee9bb9..0dedf8a799f4 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -166,7 +166,7 @@ static void dwc3_ep_inc_deq(struct dwc3_ep *dep) dwc3_ep_inc_trb(&dep->trb_dequeue); } -void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, +static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, struct dwc3_request *req, int status) { struct dwc3 *dwc = dep->dwc; @@ -1424,7 +1424,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, dwc->lock); if (!r->trb) - goto out1; + goto out0; if (r->num_pending_sgs) { struct dwc3_trb *trb; diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index 7889bcc0509a..8b72b192c747 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c @@ -221,7 +221,7 @@ static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req) netif_wake_queue(dev); } -static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t pn_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct phonet_port *port = netdev_priv(dev); struct f_phonet *fp; diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index 4c6c08b675b5..21307d862af6 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c @@ -73,9 +73,10 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) if (!qh) goto done; qh->hw = (struct ehci_qh_hw *) - dma_pool_zalloc(ehci->qh_pool, flags, &dma); + dma_pool_alloc(ehci->qh_pool, flags, &dma); if (!qh->hw) goto fail; + memset(qh->hw, 0, sizeof *qh->hw); qh->qh_dma = dma; // INIT_LIST_HEAD (&qh->qh_list); INIT_LIST_HEAD (&qh->qtd_list); diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 28e2a338b481..e56db44708bc 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1287,7 +1287,7 @@ itd_urb_transaction( } else { alloc_itd: spin_unlock_irqrestore(&ehci->lock, flags); - itd = dma_pool_zalloc(ehci->itd_pool, mem_flags, + itd = dma_pool_alloc(ehci->itd_pool, mem_flags, &itd_dma); spin_lock_irqsave(&ehci->lock, flags); if (!itd) { @@ -1297,6 +1297,7 @@ itd_urb_transaction( } } + memset(itd, 0, sizeof(*itd)); itd->itd_dma = itd_dma; itd->frame = NO_FRAME; list_add(&itd->itd_list, &sched->td_list); @@ -2080,7 +2081,7 @@ sitd_urb_transaction( } else { alloc_sitd: spin_unlock_irqrestore(&ehci->lock, flags); - sitd = dma_pool_zalloc(ehci->sitd_pool, mem_flags, + sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags, &sitd_dma); spin_lock_irqsave(&ehci->lock, flags); if (!sitd) { @@ -2090,6 +2091,7 @@ sitd_urb_transaction( } } + memset(sitd, 0, sizeof(*sitd)); sitd->sitd_dma = sitd_dma; sitd->frame = NO_FRAME; list_add(&sitd->sitd_list, &iso_sched->td_list); diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index 48779c44c361..eb494ec547e8 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -320,9 +320,11 @@ int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci) void xhci_dbc_tty_unregister_driver(void) { - tty_unregister_driver(dbc_tty_driver); - put_tty_driver(dbc_tty_driver); - dbc_tty_driver = NULL; + if (dbc_tty_driver) { + tty_unregister_driver(dbc_tty_driver); + put_tty_driver(dbc_tty_driver); + dbc_tty_driver = NULL; + } } static void dbc_rx_push(unsigned long _port) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index f17b7eab66cf..85ffda85f8ab 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -126,7 +126,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) xhci->quirks |= XHCI_AMD_PLL_FIX; - if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb) + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == 0x15e0 || + pdev->device == 0x15e1 || + pdev->device == 0x43bb)) xhci->quirks |= XHCI_SUSPEND_DELAY; if (pdev->vendor == PCI_VENDOR_ID_AMD) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index df327dcc2bac..c1b22fc64e38 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -157,6 +157,7 @@ static int xhci_plat_probe(struct platform_device *pdev) struct resource *res; struct usb_hcd *hcd; struct clk *clk; + struct clk *reg_clk; int ret; int irq; @@ -226,17 +227,27 @@ static int xhci_plat_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); /* - * Not all platforms have a clk so it is not an error if the - * clock does not exists. + * Not all platforms have clks so it is not an error if the + * clock do not exist. */ + reg_clk = devm_clk_get(&pdev->dev, "reg"); + if (!IS_ERR(reg_clk)) { + ret = clk_prepare_enable(reg_clk); + if (ret) + goto put_hcd; + } else if (PTR_ERR(reg_clk) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto put_hcd; + } + clk = devm_clk_get(&pdev->dev, NULL); if (!IS_ERR(clk)) { ret = clk_prepare_enable(clk); if (ret) - goto put_hcd; + goto disable_reg_clk; } else if (PTR_ERR(clk) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; - goto put_hcd; + goto disable_reg_clk; } xhci = hcd_to_xhci(hcd); @@ -252,6 +263,7 @@ static int xhci_plat_probe(struct platform_device *pdev) device_wakeup_enable(hcd->self.controller); xhci->clk = clk; + xhci->reg_clk = reg_clk; xhci->main_hcd = hcd; xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, dev_name(&pdev->dev), hcd); @@ -320,8 +332,10 @@ put_usb3_hcd: usb_put_hcd(xhci->shared_hcd); disable_clk: - if (!IS_ERR(clk)) - clk_disable_unprepare(clk); + clk_disable_unprepare(clk); + +disable_reg_clk: + clk_disable_unprepare(reg_clk); put_hcd: usb_put_hcd(hcd); @@ -338,6 +352,7 @@ static int xhci_plat_remove(struct platform_device *dev) struct usb_hcd *hcd = platform_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct clk *clk = xhci->clk; + struct clk *reg_clk = xhci->reg_clk; xhci->xhc_state |= XHCI_STATE_REMOVING; @@ -347,8 +362,8 @@ static int xhci_plat_remove(struct platform_device *dev) usb_remove_hcd(hcd); usb_put_hcd(xhci->shared_hcd); - if (!IS_ERR(clk)) - clk_disable_unprepare(clk); + clk_disable_unprepare(clk); + clk_disable_unprepare(reg_clk); usb_put_hcd(hcd); pm_runtime_set_suspended(&dev->dev); @@ -420,7 +435,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); static struct platform_driver usb_xhci_driver = { .probe = xhci_plat_probe, .remove = xhci_plat_remove, - .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xhci-hcd", .pm = &xhci_plat_pm_ops, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 9b27798ecce5..711da3306b14 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3621,6 +3621,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); } xhci_debugfs_remove_slot(xhci, udev->slot_id); + virt_dev->udev = NULL; ret = xhci_disable_slot(xhci, udev->slot_id); if (ret) xhci_free_virt_device(xhci, udev->slot_id); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 05c909b04f14..6dfc4867dbcf 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1729,8 +1729,9 @@ struct xhci_hcd { int page_shift; /* msi-x vectors */ int msix_count; - /* optional clock */ + /* optional clocks */ struct clk *clk; + struct clk *reg_clk; /* data structures */ struct xhci_device_context_array *dcbaa; struct xhci_ring *cmd_ring; diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 05a679d5e3a2..6a60bc0490c5 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -451,7 +451,6 @@ static int dsps_musb_init(struct musb *musb) if (!rev) return -ENODEV; - usb_phy_init(musb->xceiv); if (IS_ERR(musb->phy)) { musb->phy = NULL; } else { @@ -501,7 +500,6 @@ static int dsps_musb_exit(struct musb *musb) struct dsps_glue *glue = dev_get_drvdata(dev->parent); del_timer_sync(&musb->dev_timer); - usb_phy_shutdown(musb->xceiv); phy_power_off(musb->phy); phy_exit(musb->phy); debugfs_remove_recursive(glue->dbgfs_root); diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index e564695c6c8d..71c5835ea9cd 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -417,7 +417,6 @@ void musb_g_tx(struct musb *musb, u8 epnum) req = next_request(musb_ep); request = &req->request; - trace_musb_req_tx(req); csr = musb_readw(epio, MUSB_TXCSR); musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr); @@ -456,6 +455,8 @@ void musb_g_tx(struct musb *musb, u8 epnum) u8 is_dma = 0; bool short_packet = false; + trace_musb_req_tx(req); + if (dma && (csr & MUSB_TXCSR_DMAENAB)) { is_dma = 1; csr |= MUSB_TXCSR_P_WZC_BITS; diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 3a8451a15f7f..e7f99d55922a 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -990,7 +990,9 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, /* set tx_reinit and schedule the next qh */ ep->tx_reinit = 1; } - musb_start_urb(musb, is_in, next_qh); + + if (next_qh) + musb_start_urb(musb, is_in, next_qh); } } @@ -2754,6 +2756,7 @@ int musb_host_setup(struct musb *musb, int power_budget) hcd->self.otg_port = 1; musb->xceiv->otg->host = &hcd->self; hcd->power_budget = 2 * (power_budget ? : 250); + hcd->skip_phy_initialization = 1; ret = usb_add_hcd(hcd, 0, 0); if (ret < 0) diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index a646820f5a78..533f127c30ad 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE - Fundamental Software dongle. - Google USB serial devices - HP4x calculators + - Libtransistor USB console - a number of Motorola phones - Motorola Tetra devices - Novatel Wireless GPS receivers diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index de1e759dd512..eb6c26cbe579 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -214,6 +214,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ + { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ { } /* Terminating Entry */ }; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 87202ad5a50d..7ea221d42dba 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1898,7 +1898,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) return ftdi_jtag_probe(serial); if (udev->product && - (!strcmp(udev->product, "BeagleBone/XDS100V2") || + (!strcmp(udev->product, "Arrow USB Blaster") || + !strcmp(udev->product, "BeagleBone/XDS100V2") || !strcmp(udev->product, "SNAP Connect E10"))) return ftdi_jtag_probe(serial); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c3f252283ab9..2058852a87fa 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -233,6 +233,8 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Qualcomm's vendor ID */ #define QUECTEL_PRODUCT_UC20 0x9003 #define QUECTEL_PRODUCT_UC15 0x9090 +/* These u-blox products use Qualcomm's vendor ID */ +#define UBLOX_PRODUCT_R410M 0x90b2 /* These Yuga products use Qualcomm's vendor ID */ #define YUGA_PRODUCT_CLM920_NC5 0x9625 @@ -1065,6 +1067,9 @@ static const struct usb_device_id option_ids[] = { /* Yuga products use Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5), .driver_info = RSVD(1) | RSVD(4) }, + /* u-blox products using Qualcomm vendor ID */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), + .driver_info = RSVD(1) | RSVD(3) }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), .driver_info = RSVD(4) }, diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 4ef79e29cb26..40864c2bd9dc 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -63,6 +63,11 @@ DEVICE(flashloader, FLASHLOADER_IDS); 0x01) } DEVICE(google, GOOGLE_IDS); +/* Libtransistor USB console */ +#define LIBTRANSISTOR_IDS() \ + { USB_DEVICE(0x1209, 0x8b00) } +DEVICE(libtransistor, LIBTRANSISTOR_IDS); + /* ViVOpay USB Serial Driver */ #define VIVOPAY_IDS() \ { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ @@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = { &funsoft_device, &flashloader_device, &google_device, + &libtransistor_device, &vivopay_device, &moto_modem_device, &motorola_tetra_device, @@ -126,6 +132,7 @@ static const struct usb_device_id id_table[] = { FUNSOFT_IDS(), FLASHLOADER_IDS(), GOOGLE_IDS(), + LIBTRANSISTOR_IDS(), VIVOPAY_IDS(), MOTO_IDS(), MOTOROLA_TETRA_IDS(), diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index f5373ed2cd45..8ddbecc25d89 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -335,47 +335,48 @@ static int palm_os_3_probe(struct usb_serial *serial, goto exit; } - if (retval == sizeof(*connection_info)) { - connection_info = (struct visor_connection_info *) - transfer_buffer; - - num_ports = le16_to_cpu(connection_info->num_ports); - for (i = 0; i < num_ports; ++i) { - switch ( - connection_info->connections[i].port_function_id) { - case VISOR_FUNCTION_GENERIC: - string = "Generic"; - break; - case VISOR_FUNCTION_DEBUGGER: - string = "Debugger"; - break; - case VISOR_FUNCTION_HOTSYNC: - string = "HotSync"; - break; - case VISOR_FUNCTION_CONSOLE: - string = "Console"; - break; - case VISOR_FUNCTION_REMOTE_FILE_SYS: - string = "Remote File System"; - break; - default: - string = "unknown"; - break; - } - dev_info(dev, "%s: port %d, is for %s use\n", - serial->type->description, - connection_info->connections[i].port, string); - } + if (retval != sizeof(*connection_info)) { + dev_err(dev, "Invalid connection information received from device\n"); + retval = -ENODEV; + goto exit; } - /* - * Handle devices that report invalid stuff here. - */ + + connection_info = (struct visor_connection_info *)transfer_buffer; + + num_ports = le16_to_cpu(connection_info->num_ports); + + /* Handle devices that report invalid stuff here. */ if (num_ports == 0 || num_ports > 2) { dev_warn(dev, "%s: No valid connect info available\n", serial->type->description); num_ports = 2; } + for (i = 0; i < num_ports; ++i) { + switch (connection_info->connections[i].port_function_id) { + case VISOR_FUNCTION_GENERIC: + string = "Generic"; + break; + case VISOR_FUNCTION_DEBUGGER: + string = "Debugger"; + break; + case VISOR_FUNCTION_HOTSYNC: + string = "HotSync"; + break; + case VISOR_FUNCTION_CONSOLE: + string = "Console"; + break; + case VISOR_FUNCTION_REMOTE_FILE_SYS: + string = "Remote File System"; + break; + default: + string = "unknown"; + break; + } + dev_info(dev, "%s: port %d, is for %s use\n", + serial->type->description, + connection_info->connections[i].port, string); + } dev_info(dev, "%s: Number of ports: %d\n", serial->type->description, num_ports); diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c index 677d12138dbd..ded49e3bf2b0 100644 --- a/drivers/usb/typec/tcpm.c +++ b/drivers/usb/typec/tcpm.c @@ -3725,6 +3725,7 @@ void tcpm_unregister_port(struct tcpm_port *port) for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++) typec_unregister_altmode(port->port_altmode[i]); typec_unregister_port(port->typec_port); + usb_role_switch_put(port->role_sw); tcpm_debugfs_exit(port); destroy_workqueue(port->wq); } diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c index 8b8406867c02..4b4c8d271b27 100644 --- a/drivers/usb/typec/tps6598x.c +++ b/drivers/usb/typec/tps6598x.c @@ -73,6 +73,7 @@ struct tps6598x { struct device *dev; struct regmap *regmap; struct mutex lock; /* device lock */ + u8 i2c_protocol:1; struct typec_port *port; struct typec_partner *partner; @@ -80,19 +81,39 @@ struct tps6598x { struct typec_capability typec_cap; }; +static int +tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len) +{ + u8 data[len + 1]; + int ret; + + if (!tps->i2c_protocol) + return regmap_raw_read(tps->regmap, reg, val, len); + + ret = regmap_raw_read(tps->regmap, reg, data, sizeof(data)); + if (ret) + return ret; + + if (data[0] < len) + return -EIO; + + memcpy(val, &data[1], len); + return 0; +} + static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val) { - return regmap_raw_read(tps->regmap, reg, val, sizeof(u16)); + return tps6598x_block_read(tps, reg, val, sizeof(u16)); } static inline int tps6598x_read32(struct tps6598x *tps, u8 reg, u32 *val) { - return regmap_raw_read(tps->regmap, reg, val, sizeof(u32)); + return tps6598x_block_read(tps, reg, val, sizeof(u32)); } static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val) { - return regmap_raw_read(tps->regmap, reg, val, sizeof(u64)); + return tps6598x_block_read(tps, reg, val, sizeof(u64)); } static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val) @@ -121,8 +142,8 @@ static int tps6598x_read_partner_identity(struct tps6598x *tps) struct tps6598x_rx_identity_reg id; int ret; - ret = regmap_raw_read(tps->regmap, TPS_REG_RX_IDENTITY_SOP, - &id, sizeof(id)); + ret = tps6598x_block_read(tps, TPS_REG_RX_IDENTITY_SOP, + &id, sizeof(id)); if (ret) return ret; @@ -224,13 +245,13 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd, } while (val); if (out_len) { - ret = regmap_raw_read(tps->regmap, TPS_REG_DATA1, - out_data, out_len); + ret = tps6598x_block_read(tps, TPS_REG_DATA1, + out_data, out_len); if (ret) return ret; val = out_data[0]; } else { - ret = regmap_read(tps->regmap, TPS_REG_DATA1, &val); + ret = tps6598x_block_read(tps, TPS_REG_DATA1, &val, sizeof(u8)); if (ret) return ret; } @@ -385,6 +406,16 @@ static int tps6598x_probe(struct i2c_client *client) if (!vid) return -ENODEV; + /* + * Checking can the adapter handle SMBus protocol. If it can not, the + * driver needs to take care of block reads separately. + * + * FIXME: Testing with I2C_FUNC_I2C. regmap-i2c uses I2C protocol + * unconditionally if the adapter has I2C_FUNC_I2C set. + */ + if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + tps->i2c_protocol = true; + ret = tps6598x_read32(tps, TPS_REG_STATUS, &status); if (ret < 0) return ret; diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile index b57891c1fd31..7afbea512207 100644 --- a/drivers/usb/typec/ucsi/Makefile +++ b/drivers/usb/typec/ucsi/Makefile @@ -5,6 +5,6 @@ obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o typec_ucsi-y := ucsi.o -typec_ucsi-$(CONFIG_FTRACE) += trace.o +typec_ucsi-$(CONFIG_TRACING) += trace.o obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index bf0977fbd100..bd5cca5632b3 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -28,7 +28,7 @@ * difficult to estimate the time it takes for the system to process the command * before it is actually passed to the PPM. */ -#define UCSI_TIMEOUT_MS 1000 +#define UCSI_TIMEOUT_MS 5000 /* * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index c31c8402a0c5..d41d0cdeec0f 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -186,7 +186,12 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf, if (!bid) return -ENODEV; + /* device_attach() callers should hold parent lock for USB */ + if (bid->udev->dev.parent) + device_lock(bid->udev->dev.parent); ret = device_attach(&bid->udev->dev); + if (bid->udev->dev.parent) + device_unlock(bid->udev->dev.parent); if (ret < 0) { dev_err(&bid->udev->dev, "rebind failed\n"); return ret; diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 473fb8a87289..bf8afe9b5883 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -243,7 +243,7 @@ enum usbip_side { #define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) #define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) -#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE) +#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE) #define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) #define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) #define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c index 5b4c0864ad92..5d88917c9631 100644 --- a/drivers/usb/usbip/usbip_event.c +++ b/drivers/usb/usbip/usbip_event.c @@ -91,10 +91,6 @@ static void event_handler(struct work_struct *work) unset_event(ud, USBIP_EH_UNUSABLE); } - /* Stop the error handler. */ - if (ud->event & USBIP_EH_BYE) - usbip_dbg_eh("removed %p\n", ud); - wake_up(&ud->eh_waitq); } } diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 20e3d4609583..d11f3f8dad40 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -354,6 +354,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, usbip_dbg_vhci_rh(" ClearHubFeature\n"); break; case ClearPortFeature: + if (rhport < 0) + goto error; switch (wValue) { case USB_PORT_FEAT_SUSPEND: if (hcd->speed == HCD_USB3) { @@ -511,11 +513,16 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, goto error; } + if (rhport < 0) + goto error; + vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND; break; case USB_PORT_FEAT_POWER: usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_POWER\n"); + if (rhport < 0) + goto error; if (hcd->speed == HCD_USB3) vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER; else @@ -524,6 +531,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_BH_PORT_RESET: usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n"); + if (rhport < 0) + goto error; /* Applicable only for USB3.0 hub */ if (hcd->speed != HCD_USB3) { pr_err("USB_PORT_FEAT_BH_PORT_RESET req not " @@ -534,6 +543,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_RESET: usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_RESET\n"); + if (rhport < 0) + goto error; /* if it's already enabled, disable */ if (hcd->speed == HCD_USB3) { vhci_hcd->port_status[rhport] = 0; @@ -554,6 +565,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, default: usbip_dbg_vhci_rh(" SetPortFeature: default %d\n", wValue); + if (rhport < 0) + goto error; if (hcd->speed == HCD_USB3) { if ((vhci_hcd->port_status[rhport] & USB_SS_PORT_STAT_POWER) != 0) { diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index 190dbf8cfcb5..2f3856a95856 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -114,7 +114,7 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev) } out: - kfree(req); + vbg_req_free(req, sizeof(*req)); kfree(pages); } @@ -144,7 +144,7 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev) rc = vbg_req_perform(gdev, req); - kfree(req); + vbg_req_free(req, sizeof(*req)); if (rc < 0) { vbg_err("%s error: %d\n", __func__, rc); @@ -214,8 +214,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev) ret = vbg_status_code_to_errno(rc); out_free: - kfree(req2); - kfree(req1); + vbg_req_free(req2, sizeof(*req2)); + vbg_req_free(req1, sizeof(*req1)); return ret; } @@ -245,7 +245,7 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active) if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */ rc = VINF_SUCCESS; - kfree(req); + vbg_req_free(req, sizeof(*req)); return vbg_status_code_to_errno(rc); } @@ -431,7 +431,7 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled) rc = vbg_req_perform(gdev, req); do_div(req->interval_ns, 1000000); /* ns -> ms */ gdev->heartbeat_interval_ms = req->interval_ns; - kfree(req); + vbg_req_free(req, sizeof(*req)); return vbg_status_code_to_errno(rc); } @@ -454,12 +454,6 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev) if (ret < 0) return ret; - /* - * Preallocate the request to use it from the timer callback because: - * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL - * and the timer callback runs at DISPATCH_LEVEL; - * 2) avoid repeated allocations. - */ gdev->guest_heartbeat_req = vbg_req_alloc( sizeof(*gdev->guest_heartbeat_req), VMMDEVREQ_GUEST_HEARTBEAT); @@ -481,8 +475,8 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev) { del_timer_sync(&gdev->heartbeat_timer); vbg_heartbeat_host_config(gdev, false); - kfree(gdev->guest_heartbeat_req); - + vbg_req_free(gdev->guest_heartbeat_req, + sizeof(*gdev->guest_heartbeat_req)); } /** @@ -543,7 +537,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev, if (rc < 0) vbg_err("%s error, rc: %d\n", __func__, rc); - kfree(req); + vbg_req_free(req, sizeof(*req)); return vbg_status_code_to_errno(rc); } @@ -617,7 +611,7 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev, out: mutex_unlock(&gdev->session_mutex); - kfree(req); + vbg_req_free(req, sizeof(*req)); return ret; } @@ -642,7 +636,7 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev) if (rc < 0) vbg_err("%s error, rc: %d\n", __func__, rc); - kfree(req); + vbg_req_free(req, sizeof(*req)); return vbg_status_code_to_errno(rc); } @@ -712,7 +706,7 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev, out: mutex_unlock(&gdev->session_mutex); - kfree(req); + vbg_req_free(req, sizeof(*req)); return ret; } @@ -733,8 +727,10 @@ static int vbg_query_host_version(struct vbg_dev *gdev) rc = vbg_req_perform(gdev, req); ret = vbg_status_code_to_errno(rc); - if (ret) + if (ret) { + vbg_err("%s error: %d\n", __func__, rc); goto out; + } snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u", req->major, req->minor, req->build, req->revision); @@ -749,7 +745,7 @@ static int vbg_query_host_version(struct vbg_dev *gdev) } out: - kfree(req); + vbg_req_free(req, sizeof(*req)); return ret; } @@ -847,11 +843,16 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events) return 0; err_free_reqs: - kfree(gdev->mouse_status_req); - kfree(gdev->ack_events_req); - kfree(gdev->cancel_req); - kfree(gdev->mem_balloon.change_req); - kfree(gdev->mem_balloon.get_req); + vbg_req_free(gdev->mouse_status_req, + sizeof(*gdev->mouse_status_req)); + vbg_req_free(gdev->ack_events_req, + sizeof(*gdev->ack_events_req)); + vbg_req_free(gdev->cancel_req, + sizeof(*gdev->cancel_req)); + vbg_req_free(gdev->mem_balloon.change_req, + sizeof(*gdev->mem_balloon.change_req)); + vbg_req_free(gdev->mem_balloon.get_req, + sizeof(*gdev->mem_balloon.get_req)); return ret; } @@ -872,11 +873,16 @@ void vbg_core_exit(struct vbg_dev *gdev) vbg_reset_host_capabilities(gdev); vbg_core_set_mouse_status(gdev, 0); - kfree(gdev->mouse_status_req); - kfree(gdev->ack_events_req); - kfree(gdev->cancel_req); - kfree(gdev->mem_balloon.change_req); - kfree(gdev->mem_balloon.get_req); + vbg_req_free(gdev->mouse_status_req, + sizeof(*gdev->mouse_status_req)); + vbg_req_free(gdev->ack_events_req, + sizeof(*gdev->ack_events_req)); + vbg_req_free(gdev->cancel_req, + sizeof(*gdev->cancel_req)); + vbg_req_free(gdev->mem_balloon.change_req, + sizeof(*gdev->mem_balloon.change_req)); + vbg_req_free(gdev->mem_balloon.get_req, + sizeof(*gdev->mem_balloon.get_req)); } /** @@ -1415,7 +1421,7 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, req->flags = dump->u.in.flags; dump->hdr.rc = vbg_req_perform(gdev, req); - kfree(req); + vbg_req_free(req, sizeof(*req)); return 0; } @@ -1513,7 +1519,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features) if (rc < 0) vbg_err("%s error, rc: %d\n", __func__, rc); - kfree(req); + vbg_req_free(req, sizeof(*req)); return vbg_status_code_to_errno(rc); } diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h index 6c784bf4fa6d..7ad9ec45bfa9 100644 --- a/drivers/virt/vboxguest/vboxguest_core.h +++ b/drivers/virt/vboxguest/vboxguest_core.h @@ -171,4 +171,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id); void vbg_linux_mouse_event(struct vbg_dev *gdev); +/* Private (non exported) functions form vboxguest_utils.c */ +void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); +void vbg_req_free(void *req, size_t len); +int vbg_req_perform(struct vbg_dev *gdev, void *req); +int vbg_hgcm_call32( + struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, + struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, + int *vbox_status); + #endif diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index 82e280d38cc2..398d22693234 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -87,6 +87,7 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, struct vbg_session *session = filp->private_data; size_t returned_size, size; struct vbg_ioctl_hdr hdr; + bool is_vmmdev_req; int ret = 0; void *buf; @@ -106,8 +107,17 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, if (size > SZ_16M) return -E2BIG; - /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */ - buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32); + /* + * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid + * the need for a bounce-buffer and another copy later on. + */ + is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || + req == VBG_IOCTL_VMMDEV_REQUEST_BIG; + + if (is_vmmdev_req) + buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); + else + buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -132,7 +142,10 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, ret = -EFAULT; out: - kfree(buf); + if (is_vmmdev_req) + vbg_req_free(buf, size); + else + kfree(buf); return ret; } diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index 0f0dab8023cf..bf4474214b4d 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -65,8 +65,9 @@ VBG_LOG(vbg_debug, pr_debug); void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) { struct vmmdev_request_header *req; + int order = get_order(PAGE_ALIGN(len)); - req = kmalloc(len, GFP_KERNEL | __GFP_DMA32); + req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order); if (!req) return NULL; @@ -82,6 +83,14 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) return req; } +void vbg_req_free(void *req, size_t len) +{ + if (!req) + return; + + free_pages((unsigned long)req, get_order(PAGE_ALIGN(len))); +} + /* Note this function returns a VBox status code, not a negative errno!! */ int vbg_req_perform(struct vbg_dev *gdev, void *req) { @@ -137,7 +146,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev, rc = hgcm_connect->header.result; } - kfree(hgcm_connect); + vbg_req_free(hgcm_connect, sizeof(*hgcm_connect)); *vbox_status = rc; return 0; @@ -166,7 +175,7 @@ int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status) if (rc >= 0) rc = hgcm_disconnect->header.result; - kfree(hgcm_disconnect); + vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect)); *vbox_status = rc; return 0; @@ -623,7 +632,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, } if (!leak_it) - kfree(call); + vbg_req_free(call, size); free_bounce_bufs: if (bounce_bufs) { diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index a5b8eb21201f..1abe4d021fd2 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c @@ -55,6 +55,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); #define WDT_CTRL_WDT_INTR BIT(2) #define WDT_CTRL_RESET_SYSTEM BIT(1) #define WDT_CTRL_ENABLE BIT(0) +#define WDT_TIMEOUT_STATUS 0x10 +#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1) /* * WDT_RESET_WIDTH controls the characteristics of the external pulse (if @@ -192,6 +194,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev) struct device_node *np; const char *reset_type; u32 duration; + u32 status; int ret; wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); @@ -307,6 +310,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev) writel(duration - 1, wdt->base + WDT_RESET_WIDTH); } + status = readl(wdt->base + WDT_TIMEOUT_STATUS); + if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) + wdt->wdd.bootstatus = WDIOF_CARDRESET; + ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); if (ret) { dev_err(&pdev->dev, "failed to register\n"); diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c index 6b8c6ddfe30b..514db5cc1595 100644 --- a/drivers/watchdog/renesas_wdt.c +++ b/drivers/watchdog/renesas_wdt.c @@ -121,7 +121,8 @@ static int rwdt_restart(struct watchdog_device *wdev, unsigned long action, } static const struct watchdog_info rwdt_ident = { - .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, + .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | + WDIOF_CARDRESET, .identity = "Renesas WDT Watchdog", }; @@ -197,9 +198,10 @@ static int rwdt_probe(struct platform_device *pdev) return PTR_ERR(clk); pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); priv->clk_rate = clk_get_rate(clk); + priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) & + RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0; pm_runtime_put(&pdev->dev); if (!priv->clk_rate) { diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index 43d0cbb7ba0b..814cdf539b0f 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c @@ -299,7 +299,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd, if (sch311x_wdt_set_heartbeat(new_timeout)) return -EINVAL; sch311x_wdt_keepalive(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 20e2bba10400..672b61a7f9a3 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c @@ -427,7 +427,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -EINVAL; wdt_keepalive(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, uarg.i); diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c index db0da7ea4fd8..93c5b610e264 100644 --- a/drivers/watchdog/wafer5823wdt.c +++ b/drivers/watchdog/wafer5823wdt.c @@ -178,7 +178,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd, timeout = new_timeout; wafwdt_stop(); wafwdt_start(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, p); diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c index 89d9744ece61..ed593d1042a6 100644 --- a/drivers/xen/xen-pciback/conf_space_quirks.c +++ b/drivers/xen/xen-pciback/conf_space_quirks.c @@ -95,7 +95,7 @@ int xen_pcibk_config_quirks_init(struct pci_dev *dev) struct xen_pcibk_config_quirk *quirk; int ret = 0; - quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC); + quirk = kzalloc(sizeof(*quirk), GFP_KERNEL); if (!quirk) { ret = -ENOMEM; goto out; diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 9e480fdebe1f..59661db144e5 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -71,7 +71,7 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev) dev_dbg(&dev->dev, "pcistub_device_alloc\n"); - psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC); + psdev = kzalloc(sizeof(*psdev), GFP_KERNEL); if (!psdev) return NULL; @@ -364,7 +364,7 @@ static int pcistub_init_device(struct pci_dev *dev) * here and then to call kfree(pci_get_drvdata(psdev->dev)). */ dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]") - + strlen(pci_name(dev)) + 1, GFP_ATOMIC); + + strlen(pci_name(dev)) + 1, GFP_KERNEL); if (!dev_data) { err = -ENOMEM; goto out; @@ -577,7 +577,7 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id) } if (!match) { - pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC); + pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL); if (!pci_dev_id) { err = -ENOMEM; goto out; @@ -1149,7 +1149,7 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func, } dev = psdev->dev; - field = kzalloc(sizeof(*field), GFP_ATOMIC); + field = kzalloc(sizeof(*field), GFP_KERNEL); if (!field) { err = -ENOMEM; goto out; diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 0d6d9264d6a9..c3e201025ef0 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u, { struct { struct xsd_sockmsg hdr; - const char body[16]; + char body[16]; } msg; int rc; @@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u, msg.hdr.len = strlen(reply) + 1; if (msg.hdr.len > sizeof(msg.body)) return -E2BIG; + memcpy(&msg.body, reply, msg.hdr.len); mutex_lock(&u->reply_mutex); rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); |