diff options
author | David S. Miller <davem@davemloft.net> | 2017-10-22 14:36:53 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-10-22 14:39:14 +0200 |
commit | f8ddadc4db6c7b7029b6d0e0d9af24f74ad27ca2 (patch) | |
tree | 0a6432aba336bae42313613f4c891bcfce02bd4e /drivers | |
parent | Merge branch 'tun-timer-cleanups' (diff) | |
parent | Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff) | |
download | linux-f8ddadc4db6c7b7029b6d0e0d9af24f74ad27ca2.tar.xz linux-f8ddadc4db6c7b7029b6d0e0d9af24f74ad27ca2.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
There were quite a few overlapping sets of changes here.
Daniel's bug fix for off-by-ones in the new BPF branch instructions,
along with the added allowances for "data_end > ptr + x" forms
collided with the metadata additions.
Along with those three changes came veritifer test cases, which in
their final form I tried to group together properly. If I had just
trimmed GIT's conflict tags as-is, this would have split up the
meta tests unnecessarily.
In the socketmap code, a set of preemption disabling changes
overlapped with the rename of bpf_compute_data_end() to
bpf_compute_data_pointers().
Changes were made to the mv88e6060.c driver set addr method
which got removed in net-next.
The hyperv transport socket layer had a locking change in 'net'
which overlapped with a change of socket state macro usage
in 'net-next'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
162 files changed, 1783 insertions, 799 deletions
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 3fb8ff513461..e26ea209b63e 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data, * } * } * - * Calling this function with index %2 return %-ENOENT and with index %3 - * returns the last entry. If the property does not contain any more values - * %-ENODATA is returned. The NULL entry must be single integer and - * preferably contain value %0. + * Calling this function with index %2 or index %3 return %-ENOENT. If the + * property does not contain any more values %-ENOENT is returned. The NULL + * entry must be single integer and preferably contain value %0. * * Return: %0 on success, negative error code on failure. */ @@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, data = acpi_device_data_of_node(fwnode); if (!data) - return -EINVAL; + return -ENOENT; ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); if (ret) - return ret; + return ret == -EINVAL ? -ENOENT : -EINVAL; /* * The simplest case is when the value is a single reference. Just @@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, ret = acpi_bus_get_device(obj->reference.handle, &device); if (ret) - return ret; + return ret == -ENODEV ? -EINVAL : ret; args->adev = device; args->nargs = 0; @@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, * The index argument is then used to determine which reference * the caller wants (along with the arguments). */ - if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count) - return -EPROTO; + if (obj->type != ACPI_TYPE_PACKAGE) + return -EINVAL; + if (index >= obj->package.count) + return -ENOENT; element = obj->package.elements; end = element + obj->package.count; @@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, ret = acpi_bus_get_device(element->reference.handle, &device); if (ret) - return -ENODEV; + return -EINVAL; nargs = 0; element++; @@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, else if (type == ACPI_TYPE_LOCAL_REFERENCE) break; else - return -EPROTO; + return -EINVAL; } if (nargs > MAX_ACPI_REFERENCE_ARGS) - return -EPROTO; + return -EINVAL; if (idx == index) { args->adev = device; @@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, return -ENOENT; element++; } else { - return -EPROTO; + return -EINVAL; } idx++; } - return -ENODATA; + return -ENOENT; } EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index ab34239a76ee..0621a95b8597 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2582,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t, return true; } +/** + * binder_get_node_refs_for_txn() - Get required refs on node for txn + * @node: struct binder_node for which to get refs + * @proc: returns @node->proc if valid + * @error: if no @proc then returns BR_DEAD_REPLY + * + * User-space normally keeps the node alive when creating a transaction + * since it has a reference to the target. The local strong ref keeps it + * alive if the sending process dies before the target process processes + * the transaction. If the source process is malicious or has a reference + * counting bug, relying on the local strong ref can fail. + * + * Since user-space can cause the local strong ref to go away, we also take + * a tmpref on the node to ensure it survives while we are constructing + * the transaction. We also need a tmpref on the proc while we are + * constructing the transaction, so we take that here as well. + * + * Return: The target_node with refs taken or NULL if no @node->proc is NULL. + * Also sets @proc if valid. If the @node->proc is NULL indicating that the + * target proc has died, @error is set to BR_DEAD_REPLY + */ +static struct binder_node *binder_get_node_refs_for_txn( + struct binder_node *node, + struct binder_proc **procp, + uint32_t *error) +{ + struct binder_node *target_node = NULL; + + binder_node_inner_lock(node); + if (node->proc) { + target_node = node; + binder_inc_node_nilocked(node, 1, 0, NULL); + binder_inc_node_tmpref_ilocked(node); + node->proc->tmp_ref++; + *procp = node->proc; + } else + *error = BR_DEAD_REPLY; + binder_node_inner_unlock(node); + + return target_node; +} + static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, @@ -2685,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc, ref = binder_get_ref_olocked(proc, tr->target.handle, true); if (ref) { - binder_inc_node(ref->node, 1, 0, NULL); - target_node = ref->node; - } - binder_proc_unlock(proc); - if (target_node == NULL) { + target_node = binder_get_node_refs_for_txn( + ref->node, &target_proc, + &return_error); + } else { binder_user_error("%d:%d got transaction to invalid handle\n", - proc->pid, thread->pid); + proc->pid, thread->pid); return_error = BR_FAILED_REPLY; - return_error_param = -EINVAL; - return_error_line = __LINE__; - goto err_invalid_target_handle; } + binder_proc_unlock(proc); } else { mutex_lock(&context->context_mgr_node_lock); target_node = context->binder_context_mgr_node; - if (target_node == NULL) { + if (target_node) + target_node = binder_get_node_refs_for_txn( + target_node, &target_proc, + &return_error); + else return_error = BR_DEAD_REPLY; - mutex_unlock(&context->context_mgr_node_lock); - return_error_line = __LINE__; - goto err_no_context_mgr_node; - } - binder_inc_node(target_node, 1, 0, NULL); mutex_unlock(&context->context_mgr_node_lock); } - e->to_node = target_node->debug_id; - binder_node_lock(target_node); - target_proc = target_node->proc; - if (target_proc == NULL) { - binder_node_unlock(target_node); - return_error = BR_DEAD_REPLY; + if (!target_node) { + /* + * return_error is set above + */ + return_error_param = -EINVAL; return_error_line = __LINE__; goto err_dead_binder; } - binder_inner_proc_lock(target_proc); - target_proc->tmp_ref++; - binder_inner_proc_unlock(target_proc); - binder_node_unlock(target_node); + e->to_node = target_node->debug_id; if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; @@ -3071,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc, if (target_thread) binder_thread_dec_tmpref(target_thread); binder_proc_dec_tmpref(target_proc); + if (target_node) + binder_dec_node_tmpref(target_node); /* * write barrier to synchronize with initialization * of log entry @@ -3090,6 +3126,8 @@ err_bad_parent: err_copy_data_failed: trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, t->buffer, offp); + if (target_node) + binder_dec_node_tmpref(target_node); target_node = NULL; t->buffer->transaction = NULL; binder_alloc_free_buf(&target_proc->alloc, t->buffer); @@ -3104,13 +3142,14 @@ err_bad_call_stack: err_empty_call_stack: err_dead_binder: err_invalid_target_handle: -err_no_context_mgr_node: if (target_thread) binder_thread_dec_tmpref(target_thread); if (target_proc) binder_proc_dec_tmpref(target_proc); - if (target_node) + if (target_node) { binder_dec_node(target_node, 1, 0); + binder_dec_node_tmpref(target_node); + } binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", diff --git a/drivers/base/node.c b/drivers/base/node.c index 3855902f2c5b..aae2402f3791 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -27,13 +27,21 @@ static struct bus_type node_subsys = { static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) { + ssize_t n; + cpumask_var_t mask; struct node *node_dev = to_node(dev); - const struct cpumask *mask = cpumask_of_node(node_dev->dev.id); /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); - return cpumap_print_to_pagebuf(list, buf, mask); + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return 0; + + cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask); + n = cpumap_print_to_pagebuf(list, buf, mask); + free_cpumask_var(mask); + + return n; } static inline ssize_t node_read_cpumask(struct device *dev, diff --git a/drivers/base/property.c b/drivers/base/property.c index d0b65bbe7e15..7ed99c1b2a8b 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -21,6 +21,7 @@ #include <linux/phy.h> struct property_set { + struct device *dev; struct fwnode_handle fwnode; const struct property_entry *properties; }; @@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string); * Caller is responsible to call fwnode_handle_put() on the returned * args->fwnode pointer. * + * Returns: %0 on success + * %-ENOENT when the index is out of bounds, the index has an empty + * reference or the property was not found + * %-EINVAL on parse error */ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, const char *prop, const char *nargs_prop, @@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset) void device_remove_properties(struct device *dev) { struct fwnode_handle *fwnode; + struct property_set *pset; fwnode = dev_fwnode(dev); if (!fwnode) @@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev) * the pset. If there is no real firmware node (ACPI/DT) primary * will hold the pset. */ - if (is_pset_node(fwnode)) { + pset = to_pset_node(fwnode); + if (pset) { set_primary_fwnode(dev, NULL); - pset_free_set(to_pset_node(fwnode)); } else { - fwnode = fwnode->secondary; - if (!IS_ERR(fwnode) && is_pset_node(fwnode)) { + pset = to_pset_node(fwnode->secondary); + if (pset && dev == pset->dev) set_secondary_fwnode(dev, NULL); - pset_free_set(to_pset_node(fwnode)); - } } + if (pset && dev == pset->dev) + pset_free_set(pset); } EXPORT_SYMBOL_GPL(device_remove_properties); @@ -938,6 +944,7 @@ int device_add_properties(struct device *dev, p->fwnode.ops = &pset_fwnode_ops; set_secondary_fwnode(dev, &p->fwnode); + p->dev = dev; return 0; } EXPORT_SYMBOL_GPL(device_add_properties); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 883dfebd3014..baebbdfd74d5 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, struct nbd_config *config = nbd->config; config->blksize = blocksize; config->bytesize = blocksize * nr_blocks; - nbd_size_update(nbd); } static void nbd_complete_rq(struct request *req) @@ -1094,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd) args->index = i; queue_work(recv_workqueue, &args->work); } + nbd_size_update(nbd); return error; } diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 7cedb4295e9d..64d0fc17c174 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s, return NULL; *dma_handle = dma_map_single(dev, buf, s->size, dir); if (dma_mapping_error(dev, *dma_handle)) { - kfree(buf); + kmem_cache_free(s, buf); buf = NULL; } return buf; diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index c7f396903184..70db4d5638a6 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) if (mbus->hw_io_coherency) w->mbus_attr |= ATTR_HW_COHERENCY; w->base = base & DDR_BASE_CS_LOW_MASK; - w->size = (size | ~DDR_SIZE_MASK) + 1; + w->size = (u64)(size | ~DDR_SIZE_MASK) + 1; } } mvebu_mbus_dram_info.num_cs = cs; diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index d9fbbf01062b..0f9754e07719 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx { /* The crypto framework makes it hard to avoid this global. */ static struct device *artpec6_crypto_dev; -static struct dentry *dbgfs_root; - #ifdef CONFIG_FAULT_INJECTION static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); @@ -2984,6 +2982,8 @@ struct dbgfs_u32 { char *desc; }; +static struct dentry *dbgfs_root; + static void artpec6_crypto_init_debugfs(void) { dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index b585ce54a802..4835dd4a9e50 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct scatterlist sg[1], *tsg; - int err = 0, len = 0, reg, ncp; + int err = 0, len = 0, reg, ncp = 0; unsigned int i; - const u32 *buffer = (const u32 *)rctx->buffer; + u32 *buffer = (void *)rctx->buffer; rctx->sg = hdev->req->src; rctx->total = hdev->req->nbytes; @@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) reg |= HASH_CR_DMAA; stm32_hash_write(hdev, HASH_CR, reg); - for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++) - stm32_hash_write(hdev, HASH_DIN, buffer[i]); - - stm32_hash_set_nblw(hdev, ncp); + if (ncp) { + memset(buffer + ncp, 0, + DIV_ROUND_UP(ncp, sizeof(u32)) - ncp); + writesl(hdev->io_base + HASH_DIN, buffer, + DIV_ROUND_UP(ncp, sizeof(u32))); + } + stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32))); reg = stm32_hash_read(hdev, HASH_STR); reg |= HASH_STR_DCAL; stm32_hash_write(hdev, HASH_STR, reg); diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 66fb40d0ebdb..03830634e141 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -383,7 +383,7 @@ err_put_fd: return err; } -static void sync_fill_fence_info(struct dma_fence *fence, +static int sync_fill_fence_info(struct dma_fence *fence, struct sync_fence_info *info) { strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), @@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence, test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? ktime_to_ns(fence->timestamp) : ktime_set(0, 0); + + return info->status; } static long sync_file_ioctl_fence_info(struct sync_file *sync_file, @@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, * sync_fence_info and return the actual number of fences on * info->num_fences. */ - if (!info.num_fences) + if (!info.num_fences) { + info.status = dma_fence_is_signaled(sync_file->fence); goto no_fences; + } else { + info.status = 1; + } if (info.num_fences < num_fences) return -EINVAL; @@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, if (!fence_info) return -ENOMEM; - for (i = 0; i < num_fences; i++) - sync_fill_fence_info(fences[i], &fence_info[i]); + for (i = 0; i < num_fences; i++) { + int status = sync_fill_fence_info(fences[i], &fence_info[i]); + info.status = info.status <= 0 ? info.status : status; + } if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, size)) { @@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, no_fences: sync_file_get_name(sync_file, info.name, sizeof(info.name)); - info.status = dma_fence_is_signaled(sync_file->fence); info.num_fences = num_fences; if (copy_to_user((void __user *)arg, &info, sizeof(info))) diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index 32905d5606ac..55f9c62ee54b 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c @@ -212,11 +212,12 @@ struct msgdma_device { static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) { struct msgdma_sw_desc *desc; + unsigned long flags; - spin_lock_bh(&mdev->lock); + spin_lock_irqsave(&mdev->lock, flags); desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); list_del(&desc->node); - spin_unlock_bh(&mdev->lock); + spin_unlock_irqrestore(&mdev->lock, flags); INIT_LIST_HEAD(&desc->tx_list); @@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx) struct msgdma_device *mdev = to_mdev(tx->chan); struct msgdma_sw_desc *new; dma_cookie_t cookie; + unsigned long flags; new = tx_to_desc(tx); - spin_lock_bh(&mdev->lock); + spin_lock_irqsave(&mdev->lock, flags); cookie = dma_cookie_assign(tx); list_add_tail(&new->node, &mdev->pending_list); - spin_unlock_bh(&mdev->lock); + spin_unlock_irqrestore(&mdev->lock, flags); return cookie; } @@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, struct msgdma_extended_desc *desc; size_t copy; u32 desc_cnt; + unsigned long irqflags; desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); - spin_lock_bh(&mdev->lock); + spin_lock_irqsave(&mdev->lock, irqflags); if (desc_cnt > mdev->desc_free_cnt) { - spin_unlock_bh(&mdev->lock); + spin_unlock_irqrestore(&mdev->lock, irqflags); dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); return NULL; } mdev->desc_free_cnt -= desc_cnt; - spin_unlock_bh(&mdev->lock); + spin_unlock_irqrestore(&mdev->lock, irqflags); do { /* Allocate and populate the descriptor */ @@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, u32 desc_cnt = 0, i; struct scatterlist *sg; u32 stride; + unsigned long irqflags; for_each_sg(sgl, sg, sg_len, i) desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); - spin_lock_bh(&mdev->lock); + spin_lock_irqsave(&mdev->lock, irqflags); if (desc_cnt > mdev->desc_free_cnt) { - spin_unlock_bh(&mdev->lock); + spin_unlock_irqrestore(&mdev->lock, irqflags); dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); return NULL; } mdev->desc_free_cnt -= desc_cnt; - spin_unlock_bh(&mdev->lock); + spin_unlock_irqrestore(&mdev->lock, irqflags); avail = sg_dma_len(sgl); @@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev) static void msgdma_issue_pending(struct dma_chan *chan) { struct msgdma_device *mdev = to_mdev(chan); + unsigned long flags; - spin_lock_bh(&mdev->lock); + spin_lock_irqsave(&mdev->lock, flags); msgdma_start_transfer(mdev); - spin_unlock_bh(&mdev->lock); + spin_unlock_irqrestore(&mdev->lock, flags); } /** @@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev) static void msgdma_free_chan_resources(struct dma_chan *dchan) { struct msgdma_device *mdev = to_mdev(dchan); + unsigned long flags; - spin_lock_bh(&mdev->lock); + spin_lock_irqsave(&mdev->lock, flags); msgdma_free_descriptors(mdev); - spin_unlock_bh(&mdev->lock); + spin_unlock_irqrestore(&mdev->lock, flags); kfree(mdev->sw_desq); } @@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data) u32 count; u32 __maybe_unused size; u32 __maybe_unused status; + unsigned long flags; - spin_lock(&mdev->lock); + spin_lock_irqsave(&mdev->lock, flags); /* Read number of responses that are available */ count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); @@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data) * bits. So we need to just drop these values. */ size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); - status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); + status = ioread32(mdev->resp + MSGDMA_RESP_STATUS); msgdma_complete_descriptor(mdev); msgdma_chan_desc_cleanup(mdev); } - spin_unlock(&mdev->lock); + spin_unlock_irqrestore(&mdev->lock, flags); } /** diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3879f80a4815..a7ea20e7b8e9 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( struct edma_desc *edesc; struct device *dev = chan->device->dev; struct edma_chan *echan = to_edma_chan(chan); - unsigned int width, pset_len; + unsigned int width, pset_len, array_size; if (unlikely(!echan || !len)) return NULL; + /* Align the array size (acnt block) with the transfer properties */ + switch (__ffs((src | dest | len))) { + case 0: + array_size = SZ_32K - 1; + break; + case 1: + array_size = SZ_32K - 2; + break; + default: + array_size = SZ_32K - 4; + break; + } + if (len < SZ_64K) { /* * Transfer size less than 64K can be handled with one paRAM @@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( * When the full_length is multibple of 32767 one slot can be * used to complete the transfer. */ - width = SZ_32K - 1; + width = array_size; pset_len = rounddown(len, width); /* One slot is enough for lengths multiple of (SZ_32K -1) */ if (unlikely(pset_len == len)) @@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( } dest += pset_len; src += pset_len; - pset_len = width = len % (SZ_32K - 1); + pset_len = width = len % array_size; ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, width, pset_len, DMA_MEM_TO_MEM); diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 2f65a8fde21d..f1d04b70ee67 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, mutex_lock(&xbar->mutex); map->xbar_out = find_first_zero_bit(xbar->dma_inuse, xbar->dma_requests); - mutex_unlock(&xbar->mutex); if (map->xbar_out == xbar->dma_requests) { + mutex_unlock(&xbar->mutex); dev_err(&pdev->dev, "Run out of free DMA requests\n"); kfree(map); return ERR_PTR(-ENOMEM); } set_bit(map->xbar_out, xbar->dma_inuse); + mutex_unlock(&xbar->mutex); map->xbar_in = (u16)dma_spec->args[0]; diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 3388d54ba114..3f80f167ed56 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -453,7 +453,8 @@ config GPIO_TS4800 config GPIO_THUNDERX tristate "Cavium ThunderX/OCTEON-TX GPIO" depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) - depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY + depends on PCI_MSI + select IRQ_DOMAIN_HIERARCHY select IRQ_FASTEOI_HIERARCHY_HANDLERS help Say yes here to support the on-chip GPIO lines on the ThunderX diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index dbf869fb63ce..3233b72b6828 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) irq_set_handler_locked(d, handle_level_irq); else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) - irq_set_handler_locked(d, handle_edge_irq); + /* + * Edge IRQs are already cleared/acked in irq_handler and + * not need to be masked, as result handle_edge_irq() + * logic is excessed here and may cause lose of interrupts. + * So just use handle_simple_irq. + */ + irq_set_handler_locked(d, handle_simple_irq); return 0; @@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) { void __iomem *isr_reg = NULL; - u32 isr; + u32 enabled, isr, level_mask; unsigned int bit; struct gpio_bank *bank = gpiobank; unsigned long wa_lock_flags; @@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) pm_runtime_get_sync(bank->chip.parent); while (1) { - u32 isr_saved, level_mask = 0; - u32 enabled; - raw_spin_lock_irqsave(&bank->lock, lock_flags); enabled = omap_get_gpio_irqbank_mask(bank); - isr_saved = isr = readl_relaxed(isr_reg) & enabled; + isr = readl_relaxed(isr_reg) & enabled; if (bank->level_mask) level_mask = bank->level_mask & enabled; + else + level_mask = 0; /* clear edge sensitive interrupts before handler(s) are called so that we don't miss any interrupt occurred while executing them */ - omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); - omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); - omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask); + if (isr & ~level_mask) + omap_clear_gpio_irqbank(bank, isr & ~level_mask); raw_spin_unlock_irqrestore(&bank->lock, lock_flags); @@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) /*---------------------------------------------------------------------*/ -static void __init omap_gpio_show_rev(struct gpio_bank *bank) +static void omap_gpio_show_rev(struct gpio_bank *bank) { static bool called; u32 rev; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 4d2113530735..eb4528c87c0b 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, if (pin <= 255) { char ev_name[5]; - sprintf(ev_name, "_%c%02X", + sprintf(ev_name, "_%c%02hhX", agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', pin); if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7ef6c28a34d9..bc746131987f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.flags = bo->mem.placement | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); if (unlikely(r)) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 97c94f9683fa..38cea6fb25a8 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity) { struct amd_sched_rq *rq = entity->rq; - int r; if (!amd_sched_entity_is_initialized(sched, entity)) return; + /** * The client will not queue more IBs during this fini, consume existing - * queued IBs or discard them on SIGKILL + * queued IBs */ - if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) - r = -ERESTARTSYS; - else - r = wait_event_killable(sched->job_scheduled, - amd_sched_entity_is_idle(entity)); - amd_sched_rq_remove_entity(rq, entity); - if (r) { - struct amd_sched_job *job; + wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); - /* Park the kernel for a moment to make sure it isn't processing - * our enity. - */ - kthread_park(sched->thread); - kthread_unpark(sched->thread); - while (kfifo_out(&entity->job_queue, &job, sizeof(job))) - sched->ops->free_job(job); - - } + amd_sched_rq_remove_entity(rq, entity); kfifo_free(&entity->job_queue); } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 4e53aae9a1fb..0028591f3f95 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2960,6 +2960,7 @@ out: drm_modeset_backoff(&ctx); } + drm_atomic_state_put(state); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index e651a58c18cf..82b72425a42f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = { static int exynos_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct exynos_drm_private *private = drm_dev->dev_private; + struct exynos_drm_private *private; if (pm_runtime_suspended(dev) || !drm_dev) return 0; + private = drm_dev->dev_private; + drm_kms_helper_poll_disable(drm_dev); exynos_drm_fbdev_suspend(drm_dev); private->suspend_state = drm_atomic_helper_suspend(drm_dev); @@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev) static int exynos_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct exynos_drm_private *private = drm_dev->dev_private; + struct exynos_drm_private *private; if (pm_runtime_suspended(dev) || !drm_dev) return 0; + private = drm_dev->dev_private; drm_atomic_helper_resume(drm_dev, private->suspend_state); exynos_drm_fbdev_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev); @@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev) kfree(drm->dev_private); drm->dev_private = NULL; + dev_set_drvdata(dev, NULL); drm_dev_unref(drm); } diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 436377da41ba..03532dfc0cd5 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) { - struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; - int ring_id; - kfree(vgpu->sched_data); vgpu->sched_data = NULL; - - spin_lock_bh(&scheduler->mmio_context_lock); - for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { - if (scheduler->engine_owner[ring_id] == vgpu) { - intel_gvt_switch_mmio(vgpu, NULL, ring_id); - scheduler->engine_owner[ring_id] = NULL; - } - } - spin_unlock_bh(&scheduler->mmio_context_lock); } static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) @@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) { struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; + int ring_id; gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); @@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) scheduler->need_reschedule = true; scheduler->current_vgpu = NULL; } + + spin_lock_bh(&scheduler->mmio_context_lock); + for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { + if (scheduler->engine_owner[ring_id] == vgpu) { + intel_gvt_switch_mmio(vgpu, NULL, ring_id); + scheduler->engine_owner[ring_id] = NULL; + } + } + spin_unlock_bh(&scheduler->mmio_context_lock); } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19404c96eeb1..32e857dc507c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, if (READ_ONCE(obj->mm.pages)) return -ENODEV; + if (obj->mm.madv != I915_MADV_WILLNEED) + return -EFAULT; + /* Before the pages are instantiated the object is treated as being * in the CPU domain. The pages will be clflushed as required before * use, and we can freely write into the pages directly. If userspace @@ -3013,10 +3016,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) static void nop_submit_request(struct drm_i915_gem_request *request) { + unsigned long flags; + GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); dma_fence_set_error(&request->fence, -EIO); - i915_gem_request_submit(request); + + spin_lock_irqsave(&request->engine->timeline->lock, flags); + __i915_gem_request_submit(request); intel_engine_init_global_seqno(request->engine, request->global_seqno); + spin_unlock_irqrestore(&request->engine->timeline->lock, flags); } static void engine_set_wedged(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 4df039ef2ce3..e161d383b526 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -33,21 +33,20 @@ #include "intel_drv.h" #include "i915_trace.h" -static bool ggtt_is_idle(struct drm_i915_private *dev_priv) +static bool ggtt_is_idle(struct drm_i915_private *i915) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct intel_engine_cs *engine; - enum intel_engine_id id; + struct intel_engine_cs *engine; + enum intel_engine_id id; - for_each_engine(engine, dev_priv, id) { - struct intel_timeline *tl; + if (i915->gt.active_requests) + return false; - tl = &ggtt->base.timeline.engine[engine->id]; - if (i915_gem_active_isset(&tl->last_request)) - return false; - } + for_each_engine(engine, i915, id) { + if (engine->last_retired_context != i915->kernel_context) + return false; + } - return true; + return true; } static int ggtt_flush(struct drm_i915_private *i915) @@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm, min_size, alignment, cache_level, start, end, mode); - /* Retire before we search the active list. Although we have + /* + * Retire before we search the active list. Although we have * reasonable accuracy in our retirement lists, we may have * a stray pin (preventing eviction) that can only be resolved by * retiring. @@ -182,7 +182,8 @@ search_again: BUG_ON(ret); } - /* Can we unpin some objects such as idle hw contents, + /* + * Can we unpin some objects such as idle hw contents, * or pending flips? But since only the GGTT has global entries * such as scanouts, rinbuffers and contexts, we can skip the * purge when inspecting per-process local address spaces. @@ -190,19 +191,33 @@ search_again: if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) return -ENOSPC; - if (ggtt_is_idle(dev_priv)) { - /* If we still have pending pageflip completions, drop - * back to userspace to give our workqueues time to - * acquire our locks and unpin the old scanouts. - */ - return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; - } + /* + * Not everything in the GGTT is tracked via VMA using + * i915_vma_move_to_active(), otherwise we could evict as required + * with minimal stalling. Instead we are forced to idle the GPU and + * explicitly retire outstanding requests which will then remove + * the pinning for active objects such as contexts and ring, + * enabling us to evict them on the next iteration. + * + * To ensure that all user contexts are evictable, we perform + * a switch to the perma-pinned kernel context. This all also gives + * us a termination condition, when the last retired context is + * the kernel's there is no more we can evict. + */ + if (!ggtt_is_idle(dev_priv)) { + ret = ggtt_flush(dev_priv); + if (ret) + return ret; - ret = ggtt_flush(dev_priv); - if (ret) - return ret; + goto search_again; + } - goto search_again; + /* + * If we still have pending pageflip completions, drop + * back to userspace to give our workqueues time to + * acquire our locks and unpin the old scanouts. + */ + return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; found: /* drm_mm doesn't allow any other other operations while diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ed7cd9ee2c2a..c9bcc6c45012 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6998,6 +6998,7 @@ enum { */ #define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) #define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) +#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14)) #define GEN7_L3CNTLREG1 _MMIO(0xB01C) #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 00c6aee0a9a1..5d4cd3d00564 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1240,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, { enum port port; - if (!HAS_DDI(dev_priv)) + if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv)) return; if (!dev_priv->vbt.child_dev_num) diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index ff9ecd211abb..b8315bca852b 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -74,7 +74,7 @@ #define I9XX_CSC_COEFF_1_0 \ ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) -static bool crtc_state_is_legacy(struct drm_crtc_state *state) +static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state) { return !state->degamma_lut && !state->ctm && @@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state) } mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); - if (!crtc_state_is_legacy(state)) { + if (!crtc_state_is_legacy_gamma(state)) { mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); } @@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state) struct intel_crtc_state *intel_state = to_intel_crtc_state(state); enum pipe pipe = to_intel_crtc(state->crtc)->pipe; - if (crtc_state_is_legacy(state)) { + if (crtc_state_is_legacy_gamma(state)) { haswell_load_luts(state); return; } @@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state) glk_load_degamma_lut(state); - if (crtc_state_is_legacy(state)) { + if (crtc_state_is_legacy_gamma(state)) { haswell_load_luts(state); return; } @@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) uint32_t i, lut_size; uint32_t word0, word1; - if (crtc_state_is_legacy(state)) { + if (crtc_state_is_legacy_gamma(state)) { /* Turn off degamma/gamma on CGM block. */ I915_WRITE(CGM_PIPE_MODE(pipe), (state->ctm ? CGM_PIPE_MODE_CSC : 0)); @@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc, return 0; /* - * We also allow no degamma lut and a gamma lut at the legacy + * We also allow no degamma lut/ctm and a gamma lut at the legacy * size (256 entries). */ - if (!crtc_state->degamma_lut && - crtc_state->gamma_lut && - crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH) + if (crtc_state_is_legacy_gamma(crtc_state)) return 0; return -EINVAL; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 476681d5940c..5e5fe03b638c 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, int *n_entries) { if (IS_BROADWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); - return hsw_ddi_translations_fdi; + *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi); + return bdw_ddi_translations_fdi; } else if (IS_HASWELL(dev_priv)) { *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); return hsw_ddi_translations_fdi; @@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, * register writes. */ val = I915_READ(DPCLKA_CFGCR0); - val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) | - DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port)); + val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); I915_WRITE(DPCLKA_CFGCR0, val); } else if (IS_GEN9_BC(dev_priv)) { /* DDI -> PLL mapping */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 64f7b51ed97c..5c7828c52d12 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; + enum transcoder cpu_transcoder; struct drm_display_mode *mode; struct intel_crtc_state *pipe_config; - int htot = I915_READ(HTOTAL(cpu_transcoder)); - int hsync = I915_READ(HSYNC(cpu_transcoder)); - int vtot = I915_READ(VTOTAL(cpu_transcoder)); - int vsync = I915_READ(VSYNC(cpu_transcoder)); + u32 htot, hsync, vtot, vsync; enum pipe pipe = intel_crtc->pipe; mode = kzalloc(sizeof(*mode), GFP_KERNEL); @@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, i9xx_crtc_clock_get(intel_crtc, pipe_config); mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; + + cpu_transcoder = pipe_config->cpu_transcoder; + htot = I915_READ(HTOTAL(cpu_transcoder)); + hsync = I915_READ(HSYNC(cpu_transcoder)); + vtot = I915_READ(VTOTAL(cpu_transcoder)); + vsync = I915_READ(VSYNC(cpu_transcoder)); + mode->hdisplay = (htot & 0xffff) + 1; mode->htotal = ((htot & 0xffff0000) >> 16) + 1; mode->hsync_start = (hsync & 0xffff) + 1; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 64134947c0aa..203198659ab2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp) I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); - intel_dp->panel_power_off_time = ktime_get_boottime(); wait_panel_off(intel_dp); + intel_dp->panel_power_off_time = ktime_get_boottime(); /* We got a reference when we enabled the VDD. */ intel_display_power_put(dev_priv, intel_dp->aux_power_domain); @@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, * seems sufficient to avoid this problem. */ if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { - vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10); + vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", vbt.t11_t12); } diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index a2a3d93d67bd..df808a94c511 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, /* 3. Configure DPLL_CFGCR0 */ /* Avoid touch CFGCR1 if HDMI mode is not enabled */ - if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) { + if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { val = pll->state.hw_state.cfgcr1; I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); /* 4. Reab back to ensure writes completed */ diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 9ab596941372..3c2d9cf22ed5 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) } /* WaProgramL3SqcReg1DefaultForPerf:bxt */ - if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) - I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | - L3_HIGH_PRIO_CREDITS(2)); + if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { + u32 val = I915_READ(GEN8_L3SQCREG1); + val &= ~L3_PRIO_CREDITS_MASK; + val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); + I915_WRITE(GEN8_L3SQCREG1, val); + } /* WaToEnableHwFixForPushConstHWBug:bxt */ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ed662937ec3c..0a09f8ff6aff 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, int high_prio_credits) { u32 misccpctl; + u32 val; /* WaTempDisableDOPClkGating:bdw */ misccpctl = I915_READ(GEN7_MISCCPCTL); I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); - I915_WRITE(GEN8_L3SQCREG1, - L3_GENERAL_PRIO_CREDITS(general_prio_credits) | - L3_HIGH_PRIO_CREDITS(high_prio_credits)); + val = I915_READ(GEN8_L3SQCREG1); + val &= ~L3_PRIO_CREDITS_MASK; + val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits); + val |= L3_HIGH_PRIO_CREDITS(high_prio_credits); + I915_WRITE(GEN8_L3SQCREG1, val); /* * Wait at least 100 clocks before re-enabling clock gating. diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b3a087cb0860..49577eba8e7e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, { enum i915_power_well_id id = power_well->id; bool wait_fuses = power_well->hsw.has_fuses; - enum skl_power_gate pg; + enum skl_power_gate uninitialized_var(pg); u32 val; if (wait_fuses) { diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index dbb31a014419..deaf869374ea 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -248,7 +248,7 @@ disable_clks: clk_disable_unprepare(ahb_clk); disable_gdsc: regulator_disable(gdsc_reg); - pm_runtime_put_autosuspend(dev); + pm_runtime_put_sync(dev); put_clk: clk_put(ahb_clk); put_gdsc: diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index c2bdad88447e..824067d2d427 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = { .caps = MDP_LM_CAP_WB }, }, .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, }, .dspp = { .count = 3, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 6fcb58ab718c..440977677001 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); - pm_runtime_put_autosuspend(&pdev->dev); - set_cursor: ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); if (ret) { diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index f15821a0d900..ea5bb0e1632c 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj, struct dma_fence *fence; int i, ret; - if (!exclusive) { - /* NOTE: _reserve_shared() must happen before _add_shared_fence(), - * which makes this a slightly strange place to call it. OTOH this - * is a convenient can-fail point to hook it in. (And similar to - * how etnaviv and nouveau handle this.) - */ - ret = reservation_object_reserve_shared(msm_obj->resv); - if (ret) - return ret; - } - fobj = reservation_object_get_list(msm_obj->resv); if (!fobj || (fobj->shared_count == 0)) { fence = reservation_object_get_excl(msm_obj->resv); @@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, } vaddr = msm_gem_get_vaddr(obj); - if (!vaddr) { + if (IS_ERR(vaddr)) { msm_gem_put_iova(obj, aspace); drm_gem_object_unreference(obj); - return ERR_PTR(-ENOMEM); + return ERR_CAST(vaddr); } if (bo) diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5d0a75d4b249..93535cac0676 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -221,7 +221,7 @@ fail: return ret; } -static int submit_fence_sync(struct msm_gem_submit *submit) +static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) { int i, ret = 0; @@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit) struct msm_gem_object *msm_obj = submit->bos[i].obj; bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; + if (!write) { + /* NOTE: _reserve_shared() must happen before + * _add_shared_fence(), which makes this a slightly + * strange place to call it. OTOH this is a + * convenient can-fail point to hook it in. + */ + ret = reservation_object_reserve_shared(msm_obj->resv); + if (ret) + return ret; + } + + if (no_implicit) + continue; + ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); if (ret) break; @@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto out; - if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { - ret = submit_fence_sync(submit); - if (ret) - goto out; - } + ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT)); + if (ret) + goto out; ret = submit_pin_objects(submit); if (ret) diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index ffbff27600e0..6a887032c66a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_gem_put_iova(gpu->rb->bo, gpu->aspace); msm_ringbuffer_destroy(gpu->rb); } - if (gpu->aspace) { + + if (!IS_ERR_OR_NULL(gpu->aspace)) { gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, NULL, 0); msm_gem_address_space_put(gpu->aspace); diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 0366b8092f97..ec56794ad039 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); + /* Note that smp_load_acquire() is not strictly required + * as CIRC_SPACE_TO_END() does not access the tail more + * than once. + */ n = min(sz, circ_space_to_end(&rd->fifo)); memcpy(fptr, ptr, n); - fifo->head = (fifo->head + n) & (BUF_SZ - 1); + smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1)); sz -= n; ptr += n; @@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf, if (ret) goto out; + /* Note that smp_load_acquire() is not strictly required + * as CIRC_CNT_TO_END() does not access the head more than + * once. + */ n = min_t(int, sz, circ_count_to_end(&rd->fifo)); if (copy_to_user(buf, fptr, n)) { ret = -EFAULT; goto out; } - fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); + smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1)); *ppos += n; wake_up_all(&rd->fifo_event); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index f7707849bb53..2b12d82aac15 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -223,7 +223,7 @@ void nouveau_fbcon_accel_save_disable(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->fbcon) { + if (drm->fbcon && drm->fbcon->helper.fbdev) { drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; } @@ -233,7 +233,7 @@ void nouveau_fbcon_accel_restore(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->fbcon) { + if (drm->fbcon && drm->fbcon->helper.fbdev) { drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; } } @@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev) struct nouveau_fbdev *fbcon = drm->fbcon; if (fbcon && drm->channel) { console_lock(); - fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; + if (fbcon->helper.fbdev) + fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; console_unlock(); nouveau_channel_idle(drm->channel); nvif_object_fini(&fbcon->twod); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2dbf62a2ac41..e4751f92b342 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -3265,11 +3265,14 @@ nv50_mstm = { void nv50_mstm_service(struct nv50_mstm *mstm) { - struct drm_dp_aux *aux = mstm->mgr.aux; + struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL; bool handled = true; int ret; u8 esi[8] = {}; + if (!aux) + return; + while (handled) { ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); if (ret != 8) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c index 8e2e24a74774..44e116f7880d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c @@ -39,5 +39,5 @@ int g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) { return nvkm_xtensa_new_(&g84_bsp, device, index, - true, 0x103000, pengine); + device->chipset != 0x92, 0x103000, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index d06ad2c372bf..455da298227f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c @@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); } + mmu->func->flush(vm); + nvkm_memory_del(&pgt); } } diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 6a573d21d3cc..658fa2d3e40c 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts) return -EINVAL; } + /* + * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M / + * i.MX53 channel arbitration locking doesn't seem to work properly. + * Allow enabling the lock feature on IPUv3H / i.MX6 only. + */ + if (bursts && ipu->ipu_type != IPUV3H) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { if (channel->num == idmac_lock_en_info[i].chnum) break; diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index c35f74c83065..c860a7997cb5 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c @@ -73,6 +73,14 @@ #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) +#define IPU_PRE_STORE_ENG_STATUS 0x120 +#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff +#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0 +#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff +#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16 +#define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30) +#define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31) + #define IPU_PRE_STORE_ENG_SIZE 0x130 #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) @@ -93,6 +101,7 @@ struct ipu_pre { dma_addr_t buffer_paddr; void *buffer_virt; bool in_use; + unsigned int safe_window_end; }; static DEFINE_MUTEX(ipu_pre_list_mutex); @@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, u32 active_bpp = info->cpp[0] >> 1; u32 val; + /* calculate safe window for ctrl register updates */ + pre->safe_window_end = height - 2; + writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); @@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) { + unsigned long timeout = jiffies + msecs_to_jiffies(5); + unsigned short current_yblock; + u32 val; + writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); + + do { + if (time_after(jiffies, timeout)) { + dev_warn(pre->dev, "timeout waiting for PRE safe window\n"); + return; + } + + val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS); + current_yblock = + (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) & + IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK; + } while (current_yblock == 0 || current_yblock >= pre->safe_window_end); + writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); } diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index ecc9ea44dc50..0013ca9f72c8 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c @@ -14,6 +14,7 @@ #include <drm/drm_fourcc.h> #include <linux/clk.h> #include <linux/err.h> +#include <linux/iopoll.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/module.h> @@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, val = IPU_PRG_REG_UPDATE_REG_UPDATE; writel(val, prg->regs + IPU_PRG_REG_UPDATE); + /* wait for both double buffers to be filled */ + readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val, + (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) && + (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)), + 5, 1000); + clk_disable_unprepare(prg->clk_ipg); chan->enabled = true; diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 0a3117cc29e7..374301fcbc86 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -281,6 +281,7 @@ config HID_ELECOM Support for ELECOM devices: - BM084 Bluetooth Mouse - DEFT Trackball (Wired and wireless) + - HUGE Trackball (Wired and wireless) config HID_ELO tristate "ELO USB 4000/4500 touchscreen" diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9bc91160819b..330ca983828b 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) }, #endif #if IS_ENABLED(CONFIG_HID_ELO) { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c index e2c7465df69f..54aeea57d209 100644 --- a/drivers/hid/hid-elecom.c +++ b/drivers/hid/hid-elecom.c @@ -3,6 +3,7 @@ * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> + * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org> */ /* @@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, break; case USB_DEVICE_ID_ELECOM_DEFT_WIRED: case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: - /* The DEFT trackball has eight buttons, but its descriptor only - * reports five, disabling the three Fn buttons on the top of - * the mouse. + case USB_DEVICE_ID_ELECOM_HUGE_WIRED: + case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS: + /* The DEFT/HUGE trackball has eight buttons, but its descriptor + * only reports five, disabling the three Fn buttons on the top + * of the mouse. * * Apply the following diff to the descriptor: * @@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, * End Collection, End Collection, */ if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { - hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); + hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n"); rdesc[13] = 8; /* Button/Variable Report Count */ rdesc[21] = 8; /* Button/Variable Usage Maximum */ rdesc[29] = 0; /* Button/Constant Report Count */ @@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) }, { } }; MODULE_DEVICE_TABLE(hid, elecom_devices); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index a98919199858..be2e005c3c51 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -368,6 +368,8 @@ #define USB_DEVICE_ID_ELECOM_BM084 0x0061 #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff +#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c +#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 089bad8a9a21..045b5da9b992 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid) unsigned int rsize = 0; char *rdesc; int ret, n; + int num_descriptors; + size_t offset = offsetof(struct hid_descriptor, desc); quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); @@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid) return -ENODEV; } + if (hdesc->bLength < sizeof(struct hid_descriptor)) { + dbg_hid("hid descriptor is too short\n"); + return -EINVAL; + } + hid->version = le16_to_cpu(hdesc->bcdHID); hid->country = hdesc->bCountryCode; - for (n = 0; n < hdesc->bNumDescriptors; n++) + num_descriptors = min_t(int, hdesc->bNumDescriptors, + (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor)); + + for (n = 0; n < num_descriptors; n++) if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index efd5db743319..894b67ac2cae 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel) */ return; } + mutex_lock(&vmbus_connection.channel_mutex); /* * Close all the sub-channels first and then close the * primary channel. @@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel) cur_channel = list_entry(cur, struct vmbus_channel, sc_list); vmbus_close_internal(cur_channel); if (cur_channel->rescind) { - mutex_lock(&vmbus_connection.channel_mutex); - hv_process_channel_removal(cur_channel, + hv_process_channel_removal( cur_channel->offermsg.child_relid); - mutex_unlock(&vmbus_connection.channel_mutex); } } /* * Now close the primary. */ vmbus_close_internal(channel); + mutex_unlock(&vmbus_connection.channel_mutex); } EXPORT_SYMBOL_GPL(vmbus_close); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index bcbb031f7263..018d2e0f8ec5 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel) spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); - + channel->rescind = true; list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, msglistentry) { @@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid) true); } -void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) +void hv_process_channel_removal(u32 relid) { unsigned long flags; - struct vmbus_channel *primary_channel; + struct vmbus_channel *primary_channel, *channel; - BUG_ON(!channel->rescind); BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); + /* + * Make sure channel is valid as we may have raced. + */ + channel = relid2channel(relid); + if (!channel) + return; + + BUG_ON(!channel->rescind); if (channel->target_cpu != get_cpu()) { put_cpu(); smp_call_function_single(channel->target_cpu, @@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) if (!fnew) { if (channel->sc_creation_callback != NULL) channel->sc_creation_callback(newchannel); + newchannel->probe_done = true; return; } @@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) { struct vmbus_channel_rescind_offer *rescind; struct vmbus_channel *channel; - unsigned long flags; struct device *dev; rescind = (struct vmbus_channel_rescind_offer *)hdr; @@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) return; } - spin_lock_irqsave(&channel->lock, flags); - channel->rescind = true; - spin_unlock_irqrestore(&channel->lock, flags); - - /* - * Now that we have posted the rescind state, perform - * rescind related cleanup. - */ - vmbus_rescind_cleanup(channel); - /* * Now wait for offer handling to complete. */ @@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) if (channel->device_obj) { if (channel->chn_rescind_callback) { channel->chn_rescind_callback(channel); + vmbus_rescind_cleanup(channel); return; } /* @@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) */ dev = get_device(&channel->device_obj->device); if (dev) { + vmbus_rescind_cleanup(channel); vmbus_device_unregister(channel->device_obj); put_device(dev); } @@ -921,16 +920,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) * 1. Close all sub-channels first * 2. Then close the primary channel. */ + mutex_lock(&vmbus_connection.channel_mutex); + vmbus_rescind_cleanup(channel); if (channel->state == CHANNEL_OPEN_STATE) { /* * The channel is currently not open; * it is safe for us to cleanup the channel. */ - mutex_lock(&vmbus_connection.channel_mutex); - hv_process_channel_removal(channel, - channel->offermsg.child_relid); - mutex_unlock(&vmbus_connection.channel_mutex); + hv_process_channel_removal(rescind->child_relid); } + mutex_unlock(&vmbus_connection.channel_mutex); } } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index a9d49f6f6501..937801ac2fe0 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device) struct vmbus_channel *channel = hv_dev->channel; mutex_lock(&vmbus_connection.channel_mutex); - hv_process_channel_removal(channel, - channel->offermsg.child_relid); + hv_process_channel_removal(channel->offermsg.child_relid); mutex_unlock(&vmbus_connection.channel_mutex); kfree(hv_dev); diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 54a47b40546f..f96830ffd9f1 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1021,7 +1021,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx, } dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n", - rinfo->sda_gpio, rinfo->scl_gpio); + rinfo->scl_gpio, rinfo->sda_gpio); rinfo->prepare_recovery = i2c_imx_prepare_recovery; rinfo->unprepare_recovery = i2c_imx_unprepare_recovery; @@ -1100,7 +1100,7 @@ static int i2c_imx_probe(struct platform_device *pdev) } /* Request IRQ */ - ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, + ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED, pdev->name, i2c_imx); if (ret) { dev_err(&pdev->dev, "can't claim irq %d\n", irq); diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 22ffcb73c185..b51adffa4841 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc, data->word = dma_buffer[0] | (dma_buffer[1] << 8); break; case I2C_SMBUS_BLOCK_DATA: - case I2C_SMBUS_I2C_BLOCK_DATA: if (desc->rxbytes != dma_buffer[0] + 1) return -EMSGSIZE; memcpy(data->block, dma_buffer, desc->rxbytes); break; + case I2C_SMBUS_I2C_BLOCK_DATA: + memcpy(&data->block[1], dma_buffer, desc->rxbytes); + data->block[0] = desc->rxbytes; + break; } return 0; } diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 1ebb5e947e0b..23c2ea2baedc 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -360,6 +360,7 @@ static int omap_i2c_init(struct omap_i2c_dev *omap) unsigned long fclk_rate = 12000000; unsigned long internal_clk = 0; struct clk *fclk; + int error; if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) { /* @@ -378,6 +379,13 @@ static int omap_i2c_init(struct omap_i2c_dev *omap) * do this bit unconditionally. */ fclk = clk_get(omap->dev, "fck"); + if (IS_ERR(fclk)) { + error = PTR_ERR(fclk); + dev_err(omap->dev, "could not get fck: %i\n", error); + + return error; + } + fclk_rate = clk_get_rate(fclk); clk_put(fclk); @@ -410,6 +418,12 @@ static int omap_i2c_init(struct omap_i2c_dev *omap) else internal_clk = 4000; fclk = clk_get(omap->dev, "fck"); + if (IS_ERR(fclk)) { + error = PTR_ERR(fclk); + dev_err(omap->dev, "could not get fck: %i\n", error); + + return error; + } fclk_rate = clk_get_rate(fclk) / 1000; clk_put(fclk); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 0ecdb47a23ab..174579d32e5f 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -85,6 +85,9 @@ /* SB800 constants */ #define SB800_PIIX4_SMB_IDX 0xcd6 +#define KERNCZ_IMC_IDX 0x3e +#define KERNCZ_IMC_DATA 0x3f + /* * SB800 port is selected by bits 2:1 of the smb_en register (0x2c) * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f. @@ -94,6 +97,12 @@ #define SB800_PIIX4_PORT_IDX_ALT 0x2e #define SB800_PIIX4_PORT_IDX_SEL 0x2f #define SB800_PIIX4_PORT_IDX_MASK 0x06 +#define SB800_PIIX4_PORT_IDX_SHIFT 1 + +/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ +#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 +#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 +#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 /* insmod parameters */ @@ -149,6 +158,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = { */ static DEFINE_MUTEX(piix4_mutex_sb800); static u8 piix4_port_sel_sb800; +static u8 piix4_port_mask_sb800; +static u8 piix4_port_shift_sb800; static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { " port 0", " port 2", " port 3", " port 4" }; @@ -159,6 +170,7 @@ struct i2c_piix4_adapdata { /* SB800 */ bool sb800_main; + bool notify_imc; u8 port; /* Port number, shifted */ }; @@ -347,7 +359,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, /* Find which register is used for port selection */ if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) { - piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; + switch (PIIX4_dev->device) { + case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS: + piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ; + piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ; + piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ; + break; + case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS: + default: + piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; + piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; + piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; + break; + } } else { mutex_lock(&piix4_mutex_sb800); outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX); @@ -355,6 +379,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, piix4_port_sel_sb800 = (port_sel & 0x01) ? SB800_PIIX4_PORT_IDX_ALT : SB800_PIIX4_PORT_IDX; + piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; + piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; mutex_unlock(&piix4_mutex_sb800); } @@ -572,6 +598,67 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, return 0; } +static uint8_t piix4_imc_read(uint8_t idx) +{ + outb_p(idx, KERNCZ_IMC_IDX); + return inb_p(KERNCZ_IMC_DATA); +} + +static void piix4_imc_write(uint8_t idx, uint8_t value) +{ + outb_p(idx, KERNCZ_IMC_IDX); + outb_p(value, KERNCZ_IMC_DATA); +} + +static int piix4_imc_sleep(void) +{ + int timeout = MAX_TIMEOUT; + + if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc")) + return -EBUSY; + + /* clear response register */ + piix4_imc_write(0x82, 0x00); + /* request ownership flag */ + piix4_imc_write(0x83, 0xB4); + /* kick off IMC Mailbox command 96 */ + piix4_imc_write(0x80, 0x96); + + while (timeout--) { + if (piix4_imc_read(0x82) == 0xfa) { + release_region(KERNCZ_IMC_IDX, 2); + return 0; + } + usleep_range(1000, 2000); + } + + release_region(KERNCZ_IMC_IDX, 2); + return -ETIMEDOUT; +} + +static void piix4_imc_wakeup(void) +{ + int timeout = MAX_TIMEOUT; + + if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc")) + return; + + /* clear response register */ + piix4_imc_write(0x82, 0x00); + /* release ownership flag */ + piix4_imc_write(0x83, 0xB5); + /* kick off IMC Mailbox command 96 */ + piix4_imc_write(0x80, 0x96); + + while (timeout--) { + if (piix4_imc_read(0x82) == 0xfa) + break; + usleep_range(1000, 2000); + } + + release_region(KERNCZ_IMC_IDX, 2); +} + /* * Handles access to multiple SMBus ports on the SB800. * The port is selected by bits 2:1 of the smb_en register (0x2c). @@ -612,12 +699,47 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, return -EBUSY; } + /* + * Notify the IMC (Integrated Micro Controller) if required. + * Among other responsibilities, the IMC is in charge of monitoring + * the System fans and temperature sensors, and act accordingly. + * All this is done through SMBus and can/will collide + * with our transactions if they are long (BLOCK_DATA). + * Therefore we need to request the ownership flag during those + * transactions. + */ + if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) { + int ret; + + ret = piix4_imc_sleep(); + switch (ret) { + case -EBUSY: + dev_warn(&adap->dev, + "IMC base address index region 0x%x already in use.\n", + KERNCZ_IMC_IDX); + break; + case -ETIMEDOUT: + dev_warn(&adap->dev, + "Failed to communicate with the IMC.\n"); + break; + default: + break; + } + + /* If IMC communication fails do not retry */ + if (ret) { + dev_warn(&adap->dev, + "Continuing without IMC notification.\n"); + adapdata->notify_imc = false; + } + } + outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); port = adapdata->port; - if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port) - outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port, + if ((smba_en_lo & piix4_port_mask_sb800) != port) + outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port, SB800_PIIX4_SMB_IDX + 1); retval = piix4_access(adap, addr, flags, read_write, @@ -628,6 +750,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, /* Release the semaphore */ outb_p(smbslvcnt | 0x20, SMBSLVCNT); + if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) + piix4_imc_wakeup(); + mutex_unlock(&piix4_mutex_sb800); return retval; @@ -679,7 +804,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS]; static struct i2c_adapter *piix4_aux_adapter; static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, - bool sb800_main, u8 port, + bool sb800_main, u8 port, bool notify_imc, const char *name, struct i2c_adapter **padap) { struct i2c_adapter *adap; @@ -706,7 +831,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, adapdata->smba = smba; adapdata->sb800_main = sb800_main; - adapdata->port = port << 1; + adapdata->port = port << piix4_port_shift_sb800; + adapdata->notify_imc = notify_imc; /* set up the sysfs linkage to our parent device */ adap->dev.parent = &dev->dev; @@ -728,14 +854,15 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, return 0; } -static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba) +static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba, + bool notify_imc) { struct i2c_piix4_adapdata *adapdata; int port; int retval; for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) { - retval = piix4_add_adapter(dev, smba, true, port, + retval = piix4_add_adapter(dev, smba, true, port, notify_imc, piix4_main_port_names_sb800[port], &piix4_main_adapters[port]); if (retval < 0) @@ -769,6 +896,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && dev->revision >= 0x40) || dev->vendor == PCI_VENDOR_ID_AMD) { + bool notify_imc = false; is_sb800 = true; if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) { @@ -778,6 +906,20 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) return -EBUSY; } + if (dev->vendor == PCI_VENDOR_ID_AMD && + dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) { + u8 imc; + + /* + * Detect if IMC is active or not, this method is + * described on coreboot's AMD IMC notes + */ + pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3), + 0x40, &imc); + if (imc & 0x80) + notify_imc = true; + } + /* base address location etc changed in SB800 */ retval = piix4_setup_sb800(dev, id, 0); if (retval < 0) { @@ -789,7 +931,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) * Try to register multiplexed main SMBus adapter, * give up if we can't */ - retval = piix4_add_adapters_sb800(dev, retval); + retval = piix4_add_adapters_sb800(dev, retval, notify_imc); if (retval < 0) { release_region(SB800_PIIX4_SMB_IDX, 2); return retval; @@ -800,7 +942,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) return retval; /* Try to register main SMBus adapter, give up if we can't */ - retval = piix4_add_adapter(dev, retval, false, 0, "", + retval = piix4_add_adapter(dev, retval, false, 0, false, "", &piix4_main_adapters[0]); if (retval < 0) return retval; @@ -827,7 +969,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) if (retval > 0) { /* Try to add the aux adapter if it exists, * piix4_add_adapter will clean up if this fails */ - piix4_add_adapter(dev, retval, false, 0, + piix4_add_adapter(dev, retval, false, 0, false, is_sb800 ? piix4_aux_port_name_sb800 : "", &piix4_aux_adapter); } diff --git a/drivers/input/input.c b/drivers/input/input.c index d268fdc23c64..762bfb9487dc 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -933,58 +933,52 @@ int input_set_keycode(struct input_dev *dev, } EXPORT_SYMBOL(input_set_keycode); +bool input_match_device_id(const struct input_dev *dev, + const struct input_device_id *id) +{ + if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) + if (id->bustype != dev->id.bustype) + return false; + + if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) + if (id->vendor != dev->id.vendor) + return false; + + if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) + if (id->product != dev->id.product) + return false; + + if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) + if (id->version != dev->id.version) + return false; + + if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) || + !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) || + !bitmap_subset(id->relbit, dev->relbit, REL_MAX) || + !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) || + !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) || + !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) || + !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) || + !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) || + !bitmap_subset(id->swbit, dev->swbit, SW_MAX) || + !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) { + return false; + } + + return true; +} +EXPORT_SYMBOL(input_match_device_id); + static const struct input_device_id *input_match_device(struct input_handler *handler, struct input_dev *dev) { const struct input_device_id *id; for (id = handler->id_table; id->flags || id->driver_info; id++) { - - if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) - if (id->bustype != dev->id.bustype) - continue; - - if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) - if (id->vendor != dev->id.vendor) - continue; - - if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) - if (id->product != dev->id.product) - continue; - - if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) - if (id->version != dev->id.version) - continue; - - if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX)) - continue; - - if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX)) - continue; - - if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX)) - continue; - - if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX)) - continue; - - if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX)) - continue; - - if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX)) - continue; - - if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX)) - continue; - - if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX)) - continue; - - if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX)) - continue; - - if (!handler->match || handler->match(handler, dev)) + if (input_match_device_id(dev, id) && + (!handler->match || handler->match(handler, dev))) { return id; + } } return NULL; diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 29d677c714d2..7b29a8944039 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -747,6 +747,68 @@ static void joydev_cleanup(struct joydev *joydev) input_close_device(handle); } +/* + * These codes are copied from from hid-ids.h, unfortunately there is no common + * usb_ids/bt_ids.h header. + */ +#define USB_VENDOR_ID_SONY 0x054c +#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268 +#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4 +#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc +#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0 + +#define USB_VENDOR_ID_THQ 0x20d6 +#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17 + +#define ACCEL_DEV(vnd, prd) \ + { \ + .flags = INPUT_DEVICE_ID_MATCH_VENDOR | \ + INPUT_DEVICE_ID_MATCH_PRODUCT | \ + INPUT_DEVICE_ID_MATCH_PROPBIT, \ + .vendor = (vnd), \ + .product = (prd), \ + .propbit = { BIT_MASK(INPUT_PROP_ACCELEROMETER) }, \ + } + +static const struct input_device_id joydev_blacklist[] = { + /* Avoid touchpads and touchscreens */ + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | + INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, + }, + /* Avoid tablets, digitisers and similar devices */ + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | + INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) }, + }, + /* Disable accelerometers on composite devices */ + ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER), + ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER), + ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2), + ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE), + ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW), + { /* sentinel */ } +}; + +static bool joydev_dev_is_blacklisted(struct input_dev *dev) +{ + const struct input_device_id *id; + + for (id = joydev_blacklist; id->flags; id++) { + if (input_match_device_id(dev, id)) { + dev_dbg(&dev->dev, + "joydev: blacklisting '%s'\n", dev->name); + return true; + } + } + + return false; +} + static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) { DECLARE_BITMAP(jd_scratch, KEY_CNT); @@ -807,12 +869,8 @@ static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) static bool joydev_match(struct input_handler *handler, struct input_dev *dev) { - /* Avoid touchpads and touchscreens */ - if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_TOUCH, dev->keybit)) - return false; - - /* Avoid tablets, digitisers and similar devices */ - if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit)) + /* Disable blacklisted devices */ + if (joydev_dev_is_blacklisted(dev)) return false; /* Avoid absolute mice */ diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c index e37e335e406f..6da607d3b811 100644 --- a/drivers/input/keyboard/tca8418_keypad.c +++ b/drivers/input/keyboard/tca8418_keypad.c @@ -234,14 +234,7 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id) static int tca8418_configure(struct tca8418_keypad *keypad_data, u32 rows, u32 cols) { - int reg, error; - - /* Write config register, if this fails assume device not present */ - error = tca8418_write_byte(keypad_data, REG_CFG, - CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN); - if (error < 0) - return -ENODEV; - + int reg, error = 0; /* Assemble a mask for row and column registers */ reg = ~(~0 << rows); @@ -257,6 +250,12 @@ static int tca8418_configure(struct tca8418_keypad *keypad_data, error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8); error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16); + if (error) + return error; + + error = tca8418_write_byte(keypad_data, REG_CFG, + CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN); + return error; } @@ -268,6 +267,7 @@ static int tca8418_keypad_probe(struct i2c_client *client, struct input_dev *input; u32 rows = 0, cols = 0; int error, row_shift, max_keys; + u8 reg; /* Check i2c driver capabilities */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { @@ -301,10 +301,10 @@ static int tca8418_keypad_probe(struct i2c_client *client, keypad_data->client = client; keypad_data->row_shift = row_shift; - /* Initialize the chip or fail if chip isn't present */ - error = tca8418_configure(keypad_data, rows, cols); - if (error < 0) - return error; + /* Read key lock register, if this fails assume device not present */ + error = tca8418_read_byte(keypad_data, REG_KEY_LCK_EC, ®); + if (error) + return -ENODEV; /* Configure input device */ input = devm_input_allocate_device(dev); @@ -340,6 +340,11 @@ static int tca8418_keypad_probe(struct i2c_client *client, return error; } + /* Initialize the chip */ + error = tca8418_configure(keypad_data, rows, cols); + if (error < 0) + return error; + error = input_register_device(input); if (error) { dev_err(dev, "Unable to register input device, error: %d\n", diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index 6cee5adc3b5c..debeeaeb8812 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c @@ -403,6 +403,7 @@ static const struct platform_device_id axp_pek_id_match[] = { }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(platform, axp_pek_id_match); static struct platform_driver axp20x_pek_driver = { .probe = axp20x_pek_probe, @@ -417,4 +418,3 @@ module_platform_driver(axp20x_pek_driver); MODULE_DESCRIPTION("axp20x Power Button"); MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:axp20x-pek"); diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index 6bf82ea8c918..ae473123583b 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf) return NULL; } - while (buflen > 0) { + while (buflen >= sizeof(*union_desc)) { union_desc = (struct usb_cdc_union_desc *)buf; + if (union_desc->bLength > buflen) { + dev_err(&intf->dev, "Too large descriptor\n"); + return NULL; + } + if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { dev_dbg(&intf->dev, "Found union header\n"); - return union_desc; + + if (union_desc->bLength >= sizeof(*union_desc)) + return union_desc; + + dev_err(&intf->dev, + "Union descriptor to short (%d vs %zd\n)", + union_desc->bLength, sizeof(*union_desc)); + return NULL; } buflen -= union_desc->bLength; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 5af0b7d200bc..ee5466a374bf 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -1709,8 +1709,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse, .sensor_pdata = { .sensor_type = rmi_sensor_touchpad, .axis_align.flip_y = true, - /* to prevent cursors jumps: */ - .kernel_tracking = true, + .kernel_tracking = false, .topbuttonpad = topbuttonpad, }, .f30_data = { diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 32d2762448aa..b3bbad7d2282 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -72,6 +72,9 @@ struct goodix_ts_data { #define GOODIX_REG_CONFIG_DATA 0x8047 #define GOODIX_REG_ID 0x8140 +#define GOODIX_BUFFER_STATUS_READY BIT(7) +#define GOODIX_BUFFER_STATUS_TIMEOUT 20 + #define RESOLUTION_LOC 1 #define MAX_CONTACTS_LOC 5 #define TRIGGER_LOC 6 @@ -195,35 +198,53 @@ static int goodix_get_cfg_len(u16 id) static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) { + unsigned long max_timeout; int touch_num; int error; - error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data, - GOODIX_CONTACT_SIZE + 1); - if (error) { - dev_err(&ts->client->dev, "I2C transfer error: %d\n", error); - return error; - } + /* + * The 'buffer status' bit, which indicates that the data is valid, is + * not set as soon as the interrupt is raised, but slightly after. + * This takes around 10 ms to happen, so we poll for 20 ms. + */ + max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT); + do { + error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, + data, GOODIX_CONTACT_SIZE + 1); + if (error) { + dev_err(&ts->client->dev, "I2C transfer error: %d\n", + error); + return error; + } - if (!(data[0] & 0x80)) - return -EAGAIN; + if (data[0] & GOODIX_BUFFER_STATUS_READY) { + touch_num = data[0] & 0x0f; + if (touch_num > ts->max_touch_num) + return -EPROTO; + + if (touch_num > 1) { + data += 1 + GOODIX_CONTACT_SIZE; + error = goodix_i2c_read(ts->client, + GOODIX_READ_COOR_ADDR + + 1 + GOODIX_CONTACT_SIZE, + data, + GOODIX_CONTACT_SIZE * + (touch_num - 1)); + if (error) + return error; + } + + return touch_num; + } - touch_num = data[0] & 0x0f; - if (touch_num > ts->max_touch_num) - return -EPROTO; - - if (touch_num > 1) { - data += 1 + GOODIX_CONTACT_SIZE; - error = goodix_i2c_read(ts->client, - GOODIX_READ_COOR_ADDR + - 1 + GOODIX_CONTACT_SIZE, - data, - GOODIX_CONTACT_SIZE * (touch_num - 1)); - if (error) - return error; - } + usleep_range(1000, 2000); /* Poll every 1 - 2 ms */ + } while (time_before(jiffies, max_timeout)); - return touch_num; + /* + * The Goodix panel will send spurious interrupts after a + * 'finger up' event, which will always cause a timeout. + */ + return 0; } static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c index 157fdb4bb2e8..8c6c6178ec12 100644 --- a/drivers/input/touchscreen/stmfts.c +++ b/drivers/input/touchscreen/stmfts.c @@ -663,12 +663,10 @@ static int stmfts_probe(struct i2c_client *client, sdata->input->open = stmfts_input_open; sdata->input->close = stmfts_input_close; + input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_X); + input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_Y); touchscreen_parse_properties(sdata->input, true, &sdata->prop); - input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0, - sdata->prop.max_x, 0, 0); - input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0, - sdata->prop.max_y, 0, 0); input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0); input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0); diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c index 7953381d939a..f1043ae71dcc 100644 --- a/drivers/input/touchscreen/ti_am335x_tsc.c +++ b/drivers/input/touchscreen/ti_am335x_tsc.c @@ -161,7 +161,7 @@ static void titsc_step_config(struct titsc *ts_dev) break; case 5: config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 | - ts_dev->bit_xn | ts_dev->bit_yp; + STEPCONFIG_XNP | STEPCONFIG_YPN; break; case 8: config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp); diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 51f8215877f5..8e8874d23717 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void) int __init amd_iommu_init_dma_ops(void) { - swiotlb = iommu_pass_through ? 1 : 0; + swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0; iommu_detected = 1; /* * In case we don't initialize SWIOTLB (actually the common case - * when AMD IOMMU is enabled), make sure there are global - * dma_ops set as a fall-back for devices not handled by this - * driver (for example non-PCI devices). + * when AMD IOMMU is enabled and SME is not active), make sure there + * are global dma_ops set as a fall-back for devices not handled by + * this driver (for example non-PCI devices). When SME is active, + * make sure that swiotlb variable remains set so the global dma_ops + * continue to be SWIOTLB. */ if (!swiotlb) dma_ops = &nommu_dma_ops; @@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, mutex_unlock(&domain->api_lock); domain_flush_tlb_pde(domain); + domain_flush_complete(domain); return unmap_size; } diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index f596fcc32898..25c2c75f5332 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = { pm_runtime_force_resume) }; -static const struct of_device_id sysmmu_of_match[] __initconst = { +static const struct of_device_id sysmmu_of_match[] = { { .compatible = "samsung,exynos-sysmmu", }, { }, }; diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index eed6c397d840..f8a808d45034 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -1797,12 +1797,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, */ switch (msg->msg[1]) { case CEC_MSG_GET_CEC_VERSION: - case CEC_MSG_GIVE_DEVICE_VENDOR_ID: case CEC_MSG_ABORT: case CEC_MSG_GIVE_DEVICE_POWER_STATUS: - case CEC_MSG_GIVE_PHYSICAL_ADDR: case CEC_MSG_GIVE_OSD_NAME: + /* + * These messages reply with a directed message, so ignore if + * the initiator is Unregistered. + */ + if (!adap->passthrough && from_unregistered) + return 0; + /* Fall through */ + case CEC_MSG_GIVE_DEVICE_VENDOR_ID: case CEC_MSG_GIVE_FEATURES: + case CEC_MSG_GIVE_PHYSICAL_ADDR: /* * Skip processing these messages if the passthrough mode * is on. @@ -1810,7 +1817,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, if (adap->passthrough) goto skip_processing; /* Ignore if addressing is wrong */ - if (is_broadcast || from_unregistered) + if (is_broadcast) return 0; break; diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 2fcba1616168..9139d01ba7ed 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -141,22 +141,39 @@ struct dvb_frontend_private { static void dvb_frontend_invoke_release(struct dvb_frontend *fe, void (*release)(struct dvb_frontend *fe)); -static void dvb_frontend_free(struct kref *ref) +static void __dvb_frontend_free(struct dvb_frontend *fe) { - struct dvb_frontend *fe = - container_of(ref, struct dvb_frontend, refcount); struct dvb_frontend_private *fepriv = fe->frontend_priv; + if (!fepriv) + return; + dvb_free_device(fepriv->dvbdev); dvb_frontend_invoke_release(fe, fe->ops.release); kfree(fepriv); + fe->frontend_priv = NULL; +} + +static void dvb_frontend_free(struct kref *ref) +{ + struct dvb_frontend *fe = + container_of(ref, struct dvb_frontend, refcount); + + __dvb_frontend_free(fe); } static void dvb_frontend_put(struct dvb_frontend *fe) { - kref_put(&fe->refcount, dvb_frontend_free); + /* + * Check if the frontend was registered, as otherwise + * kref was not initialized yet. + */ + if (fe->frontend_priv) + kref_put(&fe->refcount, dvb_frontend_free); + else + __dvb_frontend_free(fe); } static void dvb_frontend_get(struct dvb_frontend *fe) diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c index 224283fe100a..4d086a7248e9 100644 --- a/drivers/media/dvb-frontends/dib3000mc.c +++ b/drivers/media/dvb-frontends/dib3000mc.c @@ -55,29 +55,57 @@ struct dib3000mc_state { static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg) { - u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff }; - u8 rb[2]; struct i2c_msg msg[2] = { - { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 }, - { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 }, + { .addr = state->i2c_addr >> 1, .flags = 0, .len = 2 }, + { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 }, }; + u16 word; + u8 *b; + + b = kmalloc(4, GFP_KERNEL); + if (!b) + return 0; + + b[0] = (reg >> 8) | 0x80; + b[1] = reg; + b[2] = 0; + b[3] = 0; + + msg[0].buf = b; + msg[1].buf = b + 2; if (i2c_transfer(state->i2c_adap, msg, 2) != 2) dprintk("i2c read error on %d\n",reg); - return (rb[0] << 8) | rb[1]; + word = (b[2] << 8) | b[3]; + kfree(b); + + return word; } static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val) { - u8 b[4] = { - (reg >> 8) & 0xff, reg & 0xff, - (val >> 8) & 0xff, val & 0xff, - }; struct i2c_msg msg = { - .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4 + .addr = state->i2c_addr >> 1, .flags = 0, .len = 4 }; - return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; + int rc; + u8 *b; + + b = kmalloc(4, GFP_KERNEL); + if (!b) + return -ENOMEM; + + b[0] = reg >> 8; + b[1] = reg; + b[2] = val >> 8; + b[3] = val; + + msg.buf = b; + + rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; + kfree(b); + + return rc; } static int dib3000mc_identify(struct dib3000mc_state *state) diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c index 7bec3e028bee..5553b89b804e 100644 --- a/drivers/media/dvb-frontends/dvb-pll.c +++ b/drivers/media/dvb-frontends/dvb-pll.c @@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct i2c_adapter *i2c, unsigned int pll_desc_id) { - u8 b1 [] = { 0 }; - struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, - .buf = b1, .len = 1 }; + u8 *b1; + struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 }; struct dvb_pll_priv *priv = NULL; int ret; const struct dvb_pll_desc *desc; + b1 = kmalloc(1, GFP_KERNEL); + if (!b1) + return NULL; + + b1[0] = 0; + msg.buf = b1; + if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) pll_desc_id = id[dvb_pll_devcount]; @@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, fe->ops.i2c_gate_ctrl(fe, 1); ret = i2c_transfer (i2c, &msg, 1); - if (ret != 1) + if (ret != 1) { + kfree(b1); return NULL; + } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); - if (priv == NULL) + if (!priv) { + kfree(b1); return NULL; + } priv->pll_i2c_address = pll_addr; priv->i2c = i2c; @@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, "insmod option" : "autodetected"); } + kfree(b1); + return fe; } EXPORT_SYMBOL(dvb_pll_attach); diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 7e7cc49b8674..3c4f7fa7b9d8 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -112,7 +112,7 @@ config VIDEO_PXA27x config VIDEO_QCOM_CAMSS tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver" - depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST select VIDEOBUF2_DMA_SG select V4L2_FWNODE diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c index b21b3c2dc77f..b22d2dfcd3c2 100644 --- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c +++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c @@ -2660,7 +2660,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd, * * Return -EINVAL or zero on success */ -int vfe_set_selection(struct v4l2_subdev *sd, +static int vfe_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_selection *sel) { diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index 68933d208063..9b2a401a4891 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -682,6 +682,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q) hfi_session_abort(inst); load_scale_clocks(core); + INIT_LIST_HEAD(&inst->registeredbufs); } venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR); diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c index 1edf667d562a..146ae6f25cdb 100644 --- a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c +++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c @@ -172,7 +172,8 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec) { u32 status = 0; - status = readb(cec->reg + S5P_CEC_STATUS_0); + status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf; + status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4; status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8; status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16; status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24; diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c index 58d200e7c838..8837e2678bde 100644 --- a/drivers/media/platform/s5p-cec/s5p_cec.c +++ b/drivers/media/platform/s5p-cec/s5p_cec.c @@ -92,7 +92,10 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv) dev_dbg(cec->dev, "irq received\n"); if (status & CEC_STATUS_TX_DONE) { - if (status & CEC_STATUS_TX_ERROR) { + if (status & CEC_STATUS_TX_NACK) { + dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n"); + cec->tx = STATE_NACK; + } else if (status & CEC_STATUS_TX_ERROR) { dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n"); cec->tx = STATE_ERROR; } else { @@ -135,6 +138,12 @@ static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv) cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); cec->tx = STATE_IDLE; break; + case STATE_NACK: + cec_transmit_done(cec->adap, + CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK, + 0, 1, 0, 0); + cec->tx = STATE_IDLE; + break; case STATE_ERROR: cec_transmit_done(cec->adap, CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR, diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h index 8bcd8dc1aeb9..86ded522ef27 100644 --- a/drivers/media/platform/s5p-cec/s5p_cec.h +++ b/drivers/media/platform/s5p-cec/s5p_cec.h @@ -35,6 +35,7 @@ #define CEC_STATUS_TX_TRANSFERRING (1 << 1) #define CEC_STATUS_TX_DONE (1 << 2) #define CEC_STATUS_TX_ERROR (1 << 3) +#define CEC_STATUS_TX_NACK (1 << 4) #define CEC_STATUS_TX_BYTES (0xFF << 8) #define CEC_STATUS_RX_RUNNING (1 << 16) #define CEC_STATUS_RX_RECEIVING (1 << 17) @@ -55,6 +56,7 @@ enum cec_state { STATE_IDLE, STATE_BUSY, STATE_DONE, + STATE_NACK, STATE_ERROR }; diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c index 2e487f9a2cc3..4983eeb39f36 100644 --- a/drivers/media/tuners/mt2060.c +++ b/drivers/media/tuners/mt2060.c @@ -38,41 +38,74 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val) { struct i2c_msg msg[2] = { - { .addr = priv->cfg->i2c_address, .flags = 0, .buf = ®, .len = 1 }, - { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 }, + { .addr = priv->cfg->i2c_address, .flags = 0, .len = 1 }, + { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .len = 1 }, }; + int rc = 0; + u8 *b; + + b = kmalloc(2, GFP_KERNEL); + if (!b) + return -ENOMEM; + + b[0] = reg; + b[1] = 0; + + msg[0].buf = b; + msg[1].buf = b + 1; if (i2c_transfer(priv->i2c, msg, 2) != 2) { printk(KERN_WARNING "mt2060 I2C read failed\n"); - return -EREMOTEIO; + rc = -EREMOTEIO; } - return 0; + *val = b[1]; + kfree(b); + + return rc; } // Writes a single register static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val) { - u8 buf[2] = { reg, val }; struct i2c_msg msg = { - .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2 + .addr = priv->cfg->i2c_address, .flags = 0, .len = 2 }; + u8 *buf; + int rc = 0; + + buf = kmalloc(2, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf[0] = reg; + buf[1] = val; + + msg.buf = buf; if (i2c_transfer(priv->i2c, &msg, 1) != 1) { printk(KERN_WARNING "mt2060 I2C write failed\n"); - return -EREMOTEIO; + rc = -EREMOTEIO; } - return 0; + kfree(buf); + return rc; } // Writes a set of consecutive registers static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) { int rem, val_len; - u8 xfer_buf[16]; + u8 *xfer_buf; + int rc = 0; struct i2c_msg msg = { - .addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf + .addr = priv->cfg->i2c_address, .flags = 0 }; + xfer_buf = kmalloc(16, GFP_KERNEL); + if (!xfer_buf) + return -ENOMEM; + + msg.buf = xfer_buf; + for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) { val_len = min_t(int, rem, priv->i2c_max_regs); msg.len = 1 + val_len; @@ -81,11 +114,13 @@ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) if (i2c_transfer(priv->i2c, &msg, 1) != 1) { printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len); - return -EREMOTEIO; + rc = -EREMOTEIO; + break; } } - return 0; + kfree(xfer_buf); + return rc; } // Initialisation sequences diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index c8307e8b4c16..0ccccbaf530d 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -127,6 +127,8 @@ #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ +#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ + #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 4ff40d319676..78b3172c8e6e 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, @@ -226,12 +228,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; /* - * For not wake-able HW runtime pm framework - * can't be used on pci device level. - * Use domain runtime pm callbacks instead. - */ - if (!pci_dev_run_wake(pdev)) - mei_me_set_pm_domain(dev); + * ME maps runtime suspend/resume to D0i states, + * hence we need to go around native PCI runtime service which + * eventually brings the device into D3cold/hot state, + * but the mei device cannot wake up from D3 unlike from D0i3. + * To get around the PCI device native runtime pm, + * ME uses runtime pm domain handlers which take precedence + * over the driver's pm handlers. + */ + mei_me_set_pm_domain(dev); if (mei_pg_is_enabled(dev)) pm_runtime_put_noidle(&pdev->dev); @@ -271,8 +276,7 @@ static void mei_me_shutdown(struct pci_dev *pdev) dev_dbg(&pdev->dev, "shutdown\n"); mei_stop(dev); - if (!pci_dev_run_wake(pdev)) - mei_me_unset_pm_domain(dev); + mei_me_unset_pm_domain(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); @@ -300,8 +304,7 @@ static void mei_me_remove(struct pci_dev *pdev) dev_dbg(&pdev->dev, "stop\n"); mei_stop(dev); - if (!pci_dev_run_wake(pdev)) - mei_me_unset_pm_domain(dev); + mei_me_unset_pm_domain(dev); mei_disable_interrupts(dev); diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index e38a5f144373..0566f9bfa7de 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c @@ -144,12 +144,14 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; /* - * For not wake-able HW runtime pm framework - * can't be used on pci device level. - * Use domain runtime pm callbacks instead. - */ - if (!pci_dev_run_wake(pdev)) - mei_txe_set_pm_domain(dev); + * TXE maps runtime suspend/resume to own power gating states, + * hence we need to go around native PCI runtime service which + * eventually brings the device into D3cold/hot state. + * But the TXE device cannot wake up from D3 unlike from own + * power gating. To get around PCI device native runtime pm, + * TXE uses runtime pm domain handlers which take precedence. + */ + mei_txe_set_pm_domain(dev); pm_runtime_put_noidle(&pdev->dev); @@ -186,8 +188,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev) dev_dbg(&pdev->dev, "shutdown\n"); mei_stop(dev); - if (!pci_dev_run_wake(pdev)) - mei_txe_unset_pm_domain(dev); + mei_txe_unset_pm_domain(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); @@ -215,8 +216,7 @@ static void mei_txe_remove(struct pci_dev *pdev) mei_stop(dev); - if (!pci_dev_run_wake(pdev)) - mei_txe_unset_pm_domain(dev); + mei_txe_unset_pm_domain(dev); mei_disable_interrupts(dev); free_irq(pdev->irq, dev); @@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device) else ret = -EAGAIN; - /* - * If everything is okay we're about to enter PCI low - * power state (D3) therefor we need to disable the - * interrupts towards host. - * However if device is not wakeable we do not enter - * D-low state and we need to keep the interrupt kicking - */ - if (!ret && pci_dev_run_wake(pdev)) - mei_disable_interrupts(dev); + /* keep irq on we are staying in D0 */ dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index d0ccc6729fd2..67d787fa3306 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -448,6 +448,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, int err; u32 val; + intel_host->d3_retune = true; + err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); if (err) { pr_debug("%s: DSM not supported, error %d\n", diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 13f0f219d8aa..a13a4896a8bd 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -182,22 +182,23 @@ /* FLEXCAN hardware feature flags * * Below is some version info we got: - * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re- - * Filter? connected? detection ception in MB - * MX25 FlexCAN2 03.00.00.00 no no no no - * MX28 FlexCAN2 03.00.04.00 yes yes no no - * MX35 FlexCAN2 03.00.00.00 no no no no - * MX53 FlexCAN2 03.00.00.00 yes no no no - * MX6s FlexCAN3 10.00.12.00 yes yes no yes - * VF610 FlexCAN3 ? no yes yes yes? + * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re- + * Filter? connected? Passive detection ception in MB + * MX25 FlexCAN2 03.00.00.00 no no ? no no + * MX28 FlexCAN2 03.00.04.00 yes yes no no no + * MX35 FlexCAN2 03.00.00.00 no no ? no no + * MX53 FlexCAN2 03.00.00.00 yes no no no no + * MX6s FlexCAN3 10.00.12.00 yes yes no no yes + * VF610 FlexCAN3 ? no yes ? yes yes? * * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. */ -#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ +#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */ #define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ +#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */ /* Structure of the message buffer */ struct flexcan_mb { @@ -281,14 +282,17 @@ struct flexcan_priv { }; static const struct flexcan_devtype_data fsl_p1010_devtype_data = { - .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE, + .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | + FLEXCAN_QUIRK_BROKEN_PERR_STATE, }; -static const struct flexcan_devtype_data fsl_imx28_devtype_data; +static const struct flexcan_devtype_data fsl_imx28_devtype_data = { + .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE, +}; static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, + FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE, }; static const struct flexcan_devtype_data fsl_vf610_devtype_data = { @@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr) } #endif +static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK); + + flexcan_write(reg_ctrl, ®s->ctrl); +} + +static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK); + + flexcan_write(reg_ctrl, ®s->ctrl); +} + static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) { if (!priv->reg_xceiver) @@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) struct flexcan_regs __iomem *regs = priv->regs; irqreturn_t handled = IRQ_NONE; u32 reg_iflag1, reg_esr; + enum can_state last_state = priv->can.state; reg_iflag1 = flexcan_read(®s->iflag1); @@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); } - /* state change interrupt */ - if (reg_esr & FLEXCAN_ESR_ERR_STATE) + /* state change interrupt or broken error state quirk fix is enabled */ + if ((reg_esr & FLEXCAN_ESR_ERR_STATE) || + (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE | + FLEXCAN_QUIRK_BROKEN_PERR_STATE))) flexcan_irq_state(dev, reg_esr); /* bus error IRQ - handle if bus error reporting is activated */ @@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) flexcan_irq_bus_err(dev, reg_esr); + /* availability of error interrupt among state transitions in case + * bus error reporting is de-activated and + * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled: + * +--------------------------------------------------------------+ + * | +----------------------------------------------+ [stopped / | + * | | | sleeping] -+ + * +-+-> active <-> warning <-> passive -> bus off -+ + * ___________^^^^^^^^^^^^_______________________________ + * disabled(1) enabled disabled + * + * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled + */ + if ((last_state != priv->can.state) && + (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) && + !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) { + switch (priv->can.state) { + case CAN_STATE_ERROR_ACTIVE: + if (priv->devtype_data->quirks & + FLEXCAN_QUIRK_BROKEN_WERR_STATE) + flexcan_error_irq_enable(priv); + else + flexcan_error_irq_disable(priv); + break; + + case CAN_STATE_ERROR_WARNING: + flexcan_error_irq_enable(priv); + break; + + case CAN_STATE_ERROR_PASSIVE: + case CAN_STATE_BUS_OFF: + flexcan_error_irq_disable(priv); + break; + + default: + break; + } + } + return handled; } @@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev) * on most Flexcan cores, too. Otherwise we don't get * any error warning or passive interrupts. */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE || + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE || priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; else diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index be928ce62d32..9fdb0f0bfa06 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, } cf->can_id = id & ESD_IDMASK; - cf->can_dlc = get_can_dlc(msg->msg.rx.dlc); + cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR); if (id & ESD_EXTID) cf->can_id |= CAN_EFF_FLAG; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index afcc1312dbaf..68ac3e88a8ce 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) gs_free_tx_context(txc); + atomic_dec(&dev->active_tx_urbs); + netif_wake_queue(netdev); } @@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb) urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); - - atomic_dec(&dev->active_tx_urbs); - - if (!netif_device_present(netdev)) - return; - - if (netif_queue_stopped(netdev)) - netif_wake_queue(netdev); } static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 0d97311a1b26..060cb18fa659 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -743,8 +743,8 @@ static void ena_get_channels(struct net_device *netdev, { struct ena_adapter *adapter = netdev_priv(netdev); - channels->max_rx = ENA_MAX_NUM_IO_QUEUES; - channels->max_tx = ENA_MAX_NUM_IO_QUEUES; + channels->max_rx = adapter->num_queues; + channels->max_tx = adapter->num_queues; channels->max_other = 0; channels->max_combined = 0; channels->rx_count = adapter->num_queues; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 47bdbf9bdefb..5417e4da64ca 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring, u64_stats_update_begin(&rx_ring->syncp); rx_ring->rx_stats.bad_csum++; u64_stats_update_end(&rx_ring->syncp); - netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, + netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "RX IPv4 header checksum error\n"); return; } @@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring, u64_stats_update_begin(&rx_ring->syncp); rx_ring->rx_stats.bad_csum++; u64_stats_update_end(&rx_ring->syncp); - netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, + netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "RX L4 checksum error\n"); skb->ip_summed = CHECKSUM_NONE; return; @@ -3051,7 +3051,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) if (ena_dev->mem_bar) devm_iounmap(&pdev->dev, ena_dev->mem_bar); - devm_iounmap(&pdev->dev, ena_dev->reg_bar); + if (ena_dev->reg_bar) + devm_iounmap(&pdev->dev, ena_dev->reg_bar); release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; pci_release_selected_regions(pdev, release_bars); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 0fdaaa643073..57e796870595 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -22,8 +22,12 @@ #define AQ_CFG_FORCE_LEGACY_INT 0U -#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U -#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU +#define AQ_CFG_INTERRUPT_MODERATION_OFF 0 +#define AQ_CFG_INTERRUPT_MODERATION_ON 1 +#define AQ_CFG_INTERRUPT_MODERATION_AUTO 0xFFFFU + +#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2) + #define AQ_CFG_IRQ_MASK 0x1FFU #define AQ_CFG_VECS_MAX 8U diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a761e91471df..d5e99b468870 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev, return aq_nic_set_link_ksettings(aq_nic, cmd); } -/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */ -static const unsigned int aq_ethtool_stat_queue_lines = 5U; -static const unsigned int aq_ethtool_stat_queue_chars = - 5U * ETH_GSTRING_LEN; static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { "InPackets", "InUCast", @@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { "InOctetsDma", "OutOctetsDma", "InDroppedDma", - "Queue[0] InPackets", - "Queue[0] OutPackets", - "Queue[0] InJumboPackets", - "Queue[0] InLroPackets", - "Queue[0] InErrors", - "Queue[1] InPackets", - "Queue[1] OutPackets", - "Queue[1] InJumboPackets", - "Queue[1] InLroPackets", - "Queue[1] InErrors", - "Queue[2] InPackets", - "Queue[2] OutPackets", - "Queue[2] InJumboPackets", - "Queue[2] InLroPackets", - "Queue[2] InErrors", - "Queue[3] InPackets", - "Queue[3] OutPackets", - "Queue[3] InJumboPackets", - "Queue[3] InLroPackets", - "Queue[3] InErrors", - "Queue[4] InPackets", - "Queue[4] OutPackets", - "Queue[4] InJumboPackets", - "Queue[4] InLroPackets", - "Queue[4] InErrors", - "Queue[5] InPackets", - "Queue[5] OutPackets", - "Queue[5] InJumboPackets", - "Queue[5] InLroPackets", - "Queue[5] InErrors", - "Queue[6] InPackets", - "Queue[6] OutPackets", - "Queue[6] InJumboPackets", - "Queue[6] InLroPackets", - "Queue[6] InErrors", - "Queue[7] InPackets", - "Queue[7] OutPackets", - "Queue[7] InJumboPackets", - "Queue[7] InLroPackets", - "Queue[7] InErrors", +}; + +static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = { + "Queue[%d] InPackets", + "Queue[%d] OutPackets", + "Queue[%d] Restarts", + "Queue[%d] InJumboPackets", + "Queue[%d] InLroPackets", + "Queue[%d] InErrors", }; static void aq_ethtool_stats(struct net_device *ndev, struct ethtool_stats *stats, u64 *data) { struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); -/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */ - BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8); - memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64)); + memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) + + ARRAY_SIZE(aq_ethtool_queue_stat_names) * + cfg->vecs) * sizeof(u64)); aq_nic_get_stats(aq_nic, data); } @@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev, strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", sizeof(drvinfo->bus_info)); - drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) - - (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines; + drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) + + cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names); drvinfo->testinfo_len = 0; drvinfo->regdump_len = regs_count; drvinfo->eedump_len = 0; @@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev, static void aq_ethtool_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { + int i, si; struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); - - if (stringset == ETH_SS_STATS) - memcpy(data, *aq_ethtool_stat_names, - sizeof(aq_ethtool_stat_names) - - (AQ_CFG_VECS_MAX - cfg->vecs) * - aq_ethtool_stat_queue_chars); + u8 *p = data; + + if (stringset == ETH_SS_STATS) { + memcpy(p, *aq_ethtool_stat_names, + sizeof(aq_ethtool_stat_names)); + p = p + sizeof(aq_ethtool_stat_names); + for (i = 0; i < cfg->vecs; i++) { + for (si = 0; + si < ARRAY_SIZE(aq_ethtool_queue_stat_names); + si++) { + snprintf(p, ETH_GSTRING_LEN, + aq_ethtool_queue_stat_names[si], i); + p += ETH_GSTRING_LEN; + } + } + } } static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) @@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) switch (stringset) { case ETH_SS_STATS: - ret = ARRAY_SIZE(aq_ethtool_stat_names) - - (AQ_CFG_VECS_MAX - cfg->vecs) * - aq_ethtool_stat_queue_lines; + ret = ARRAY_SIZE(aq_ethtool_stat_names) + + cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names); break; default: ret = -EOPNOTSUPP; @@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev, return err; } +int aq_ethtool_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON || + cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) { + coal->rx_coalesce_usecs = cfg->rx_itr; + coal->tx_coalesce_usecs = cfg->tx_itr; + coal->rx_max_coalesced_frames = 0; + coal->tx_max_coalesced_frames = 0; + } else { + coal->rx_coalesce_usecs = 0; + coal->tx_coalesce_usecs = 0; + coal->rx_max_coalesced_frames = 1; + coal->tx_max_coalesced_frames = 1; + } + return 0; +} + +int aq_ethtool_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + /* This is not yet supported + */ + if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce) + return -EOPNOTSUPP; + + /* Atlantic only supports timing based coalescing + */ + if (coal->rx_max_coalesced_frames > 1 || + coal->rx_coalesce_usecs_irq || + coal->rx_max_coalesced_frames_irq) + return -EOPNOTSUPP; + + if (coal->tx_max_coalesced_frames > 1 || + coal->tx_coalesce_usecs_irq || + coal->tx_max_coalesced_frames_irq) + return -EOPNOTSUPP; + + /* We do not support frame counting. Check this + */ + if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs)) + return -EOPNOTSUPP; + if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs)) + return -EOPNOTSUPP; + + if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX || + coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX) + return -EINVAL; + + cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON; + + cfg->rx_itr = coal->rx_coalesce_usecs; + cfg->tx_itr = coal->tx_coalesce_usecs; + + return aq_nic_update_interrupt_moderation_settings(aq_nic); +} + const struct ethtool_ops aq_ethtool_ops = { .get_link = aq_ethtool_get_link, .get_regs_len = aq_ethtool_get_regs_len, @@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = { .get_ethtool_stats = aq_ethtool_stats, .get_link_ksettings = aq_ethtool_get_link_ksettings, .set_link_ksettings = aq_ethtool_set_link_ksettings, + .get_coalesce = aq_ethtool_get_coalesce, + .set_coalesce = aq_ethtool_set_coalesce, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index bf9b3f020e10..0207927dc8a6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -151,8 +151,7 @@ struct aq_hw_ops { [ETH_ALEN], u32 count); - int (*hw_interrupt_moderation_set)(struct aq_hw_s *self, - bool itr_enabled); + int (*hw_interrupt_moderation_set)(struct aq_hw_s *self); int (*hw_rss_set)(struct aq_hw_s *self, struct aq_rss_parameters *rss_params); @@ -163,6 +162,8 @@ struct aq_hw_ops { int (*hw_get_regs)(struct aq_hw_s *self, struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); + int (*hw_update_stats)(struct aq_hw_s *self); + int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, unsigned int *p_count); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 0a5bb4114eb4..483e97691eea 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -16,6 +16,7 @@ #include "aq_pci_func.h" #include "aq_nic_internal.h" +#include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/timer.h> @@ -24,6 +25,18 @@ #include <linux/tcp.h> #include <net/ip.h> +static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO; +module_param_named(aq_itr, aq_itr, uint, 0644); +MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode"); + +static unsigned int aq_itr_tx; +module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644); +MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate"); + +static unsigned int aq_itr_rx; +module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); +MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); + static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) { struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; @@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self) cfg->is_polling = AQ_CFG_IS_POLLING_DEF; - cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; - cfg->itr = cfg->is_interrupt_moderation ? - AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; + cfg->itr = aq_itr; + cfg->tx_itr = aq_itr_tx; + cfg->rx_itr = aq_itr_rx; cfg->is_rss = AQ_CFG_IS_RSS_DEF; cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; @@ -126,10 +139,12 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) if (err) return err; - if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) + if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) { pr_info("%s: link change old %d new %d\n", AQ_CFG_DRV_NAME, self->link_status.mbps, self->aq_hw->aq_link_status.mbps); + aq_nic_update_interrupt_moderation_settings(self); + } self->link_status = self->aq_hw->aq_link_status; if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { @@ -164,8 +179,8 @@ static void aq_nic_service_timer_cb(unsigned long param) if (err) goto err_exit; - self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, - self->aq_nic_cfg.is_interrupt_moderation); + if (self->aq_hw_ops.hw_update_stats) + self->aq_hw_ops.hw_update_stats(self->aq_hw); memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); @@ -334,6 +349,7 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) } if (netif_running(ndev)) netif_tx_disable(ndev); + netif_carrier_off(self->ndev); for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; self->aq_vecs++) { @@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self) if (err < 0) goto err_exit; - err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, - self->aq_nic_cfg.is_interrupt_moderation); - if (err < 0) + err = aq_nic_update_interrupt_moderation_settings(self); + if (err) goto err_exit; setup_timer(&self->service_timer, &aq_nic_service_timer_cb, (unsigned long)self); @@ -645,6 +660,11 @@ err_exit: return err; } +int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self) +{ + return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw); +} + int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) { int err = 0; @@ -899,6 +919,7 @@ int aq_nic_stop(struct aq_nic_s *self) unsigned int i = 0U; netif_tx_disable(self->ndev); + netif_carrier_off(self->ndev); del_timer_sync(&self->service_timer); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 0ddd556ff901..4309983acdd6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -40,6 +40,8 @@ struct aq_nic_cfg_s { u32 vecs; /* vecs==allocated irqs */ u32 irq_type; u32 itr; + u16 rx_itr; + u16 tx_itr; u32 num_rss_queues; u32 mtu; u32 ucp_0x364; @@ -49,7 +51,6 @@ struct aq_nic_cfg_s { u16 is_mc_list_enabled; u16 mc_list_count; bool is_autoneg; - bool is_interrupt_moderation; bool is_polling; bool is_rss; bool is_lro; @@ -104,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self, struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); u32 aq_nic_get_fw_version(struct aq_nic_s *self); int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); +int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self); #endif /* AQ_NIC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 4c6c882c6a1c..cadaa646c89f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self) int err = 0; unsigned int bar = 0U; unsigned int port = 0U; + unsigned int numvecs = 0U; err = pci_enable_device(self->pdev); if (err < 0) @@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self) } } - /*enable interrupts */ + numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs); + numvecs = min(numvecs, num_online_cpus()); + + /* enable interrupts */ #if !AQ_CFG_FORCE_LEGACY_INT - err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs, - self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX); + err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX); if (err < 0) { err = pci_alloc_irq_vectors(self->pdev, 1, 1, @@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self) if (err < 0) goto err_exit; } -#endif +#endif /* AQ_CFG_FORCE_LEGACY_INT */ /* net device init */ for (port = 0; port < self->ports; ++port) { @@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self) aq_nic_ndev_free(self->port[port]); } + if (self->mmio) + iounmap(self->mmio); + kfree(self); err_exit:; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 305ff8ffac2c..5fecc9a099ef 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -373,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); aq_vec_add_stats(self, &stats_rx, &stats_tx); + /* This data should mimic aq_ethtool_queue_stat_names structure + */ data[count] += stats_rx.packets; data[++count] += stats_tx.packets; + data[++count] += stats_tx.queue_restarts; data[++count] += stats_rx.jumbo_packets; data[++count] += stats_rx.lro_packets; data[++count] += stats_rx.errors; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index c5a02df7a48b..07b3c49a16a4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -765,24 +765,23 @@ err_exit: return err; } -static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, - bool itr_enabled) +static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self) { unsigned int i = 0U; + u32 itr_rx; - if (itr_enabled && self->aq_nic_cfg->itr) { - if (self->aq_nic_cfg->itr != 0xFFFFU) { + if (self->aq_nic_cfg->itr) { + if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) { u32 itr_ = (self->aq_nic_cfg->itr >> 1); itr_ = min(AQ_CFG_IRQ_MASK, itr_); - PHAL_ATLANTIC_A0->itr_rx = 0x80000000U | - (itr_ << 0x10); + itr_rx = 0x80000000U | (itr_ << 0x10); } else { u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); if (n < self->aq_link_status.mbps) { - PHAL_ATLANTIC_A0->itr_rx = 0U; + itr_rx = 0U; } else { static unsigned int hw_timers_tbl_[] = { 0x01CU, /* 10Gbit */ @@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, hw_atl_utils_mbps_2_speed_index( self->aq_link_status.mbps); - PHAL_ATLANTIC_A0->itr_rx = - 0x80000000U | + itr_rx = 0x80000000U | (hw_timers_tbl_[speed_index] << 0x10U); } @@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); } } else { - PHAL_ATLANTIC_A0->itr_rx = 0U; + itr_rx = 0U; } for (i = HW_ATL_A0_RINGS_MAX; i--;) - reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i); + reg_irq_thr_set(self, itr_rx, i); return aq_hw_err_from_flags(self); } @@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = { .hw_rss_set = hw_atl_a0_hw_rss_set, .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, .hw_get_regs = hw_atl_utils_hw_get_regs, + .hw_update_stats = hw_atl_utils_update_stats, .hw_get_hw_stats = hw_atl_utils_get_hw_stats, .hw_get_fw_version = hw_atl_utils_get_fw_version, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 21784cc39dab..ec68c20efcbd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -788,39 +788,45 @@ err_exit: return err; } -static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, - bool itr_enabled) +static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) { unsigned int i = 0U; + u32 itr_tx = 2U; + u32 itr_rx = 2U; - if (itr_enabled && self->aq_nic_cfg->itr) { + switch (self->aq_nic_cfg->itr) { + case AQ_CFG_INTERRUPT_MODERATION_ON: + case AQ_CFG_INTERRUPT_MODERATION_AUTO: tdm_tx_desc_wr_wb_irq_en_set(self, 0U); tdm_tdm_intr_moder_en_set(self, 1U); rdm_rx_desc_wr_wb_irq_en_set(self, 0U); rdm_rdm_intr_moder_en_set(self, 1U); - PHAL_ATLANTIC_B0->itr_tx = 2U; - PHAL_ATLANTIC_B0->itr_rx = 2U; + if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) { + /* HW timers are in 2us units */ + int tx_max_timer = self->aq_nic_cfg->tx_itr / 2; + int tx_min_timer = tx_max_timer / 2; - if (self->aq_nic_cfg->itr != 0xFFFFU) { - unsigned int max_timer = self->aq_nic_cfg->itr / 2U; - unsigned int min_timer = self->aq_nic_cfg->itr / 32U; + int rx_max_timer = self->aq_nic_cfg->rx_itr / 2; + int rx_min_timer = rx_max_timer / 2; - max_timer = min(0x1FFU, max_timer); - min_timer = min(0xFFU, min_timer); + tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer); + tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer); + rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer); + rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer); - PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U; - PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U; - PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U; - PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U; + itr_tx |= tx_min_timer << 0x8U; + itr_tx |= tx_max_timer << 0x10U; + itr_rx |= rx_min_timer << 0x8U; + itr_rx |= rx_max_timer << 0x10U; } else { static unsigned int hw_atl_b0_timers_table_tx_[][2] = { - {0xffU, 0xffU}, /* 10Gbit */ - {0xffU, 0x1ffU}, /* 5Gbit */ - {0xffU, 0x1ffU}, /* 5Gbit 5GS */ - {0xffU, 0x1ffU}, /* 2.5Gbit */ - {0xffU, 0x1ffU}, /* 1Gbit */ - {0xffU, 0x1ffU}, /* 100Mbit */ + {0xfU, 0xffU}, /* 10Gbit */ + {0xfU, 0x1ffU}, /* 5Gbit */ + {0xfU, 0x1ffU}, /* 5Gbit 5GS */ + {0xfU, 0x1ffU}, /* 2.5Gbit */ + {0xfU, 0x1ffU}, /* 1Gbit */ + {0xfU, 0x1ffU}, /* 100Mbit */ }; static unsigned int hw_atl_b0_timers_table_rx_[][2] = { @@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, hw_atl_utils_mbps_2_speed_index( self->aq_link_status.mbps); - PHAL_ATLANTIC_B0->itr_tx |= - hw_atl_b0_timers_table_tx_[speed_index] - [0] << 0x8U; /* set min timer value */ - PHAL_ATLANTIC_B0->itr_tx |= - hw_atl_b0_timers_table_tx_[speed_index] - [1] << 0x10U; /* set max timer value */ - - PHAL_ATLANTIC_B0->itr_rx |= - hw_atl_b0_timers_table_rx_[speed_index] - [0] << 0x8U; /* set min timer value */ - PHAL_ATLANTIC_B0->itr_rx |= - hw_atl_b0_timers_table_rx_[speed_index] - [1] << 0x10U; /* set max timer value */ + /* Update user visible ITR settings */ + self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_ + [speed_index][1] * 2; + self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_ + [speed_index][1] * 2; + + itr_tx |= hw_atl_b0_timers_table_tx_ + [speed_index][0] << 0x8U; + itr_tx |= hw_atl_b0_timers_table_tx_ + [speed_index][1] << 0x10U; + + itr_rx |= hw_atl_b0_timers_table_rx_ + [speed_index][0] << 0x8U; + itr_rx |= hw_atl_b0_timers_table_rx_ + [speed_index][1] << 0x10U; } - } else { + break; + case AQ_CFG_INTERRUPT_MODERATION_OFF: tdm_tx_desc_wr_wb_irq_en_set(self, 1U); tdm_tdm_intr_moder_en_set(self, 0U); rdm_rx_desc_wr_wb_irq_en_set(self, 1U); rdm_rdm_intr_moder_en_set(self, 0U); - PHAL_ATLANTIC_B0->itr_tx = 0U; - PHAL_ATLANTIC_B0->itr_rx = 0U; + itr_tx = 0U; + itr_rx = 0U; + break; } for (i = HW_ATL_B0_RINGS_MAX; i--;) { - reg_tx_intr_moder_ctrl_set(self, - PHAL_ATLANTIC_B0->itr_tx, i); - reg_rx_intr_moder_ctrl_set(self, - PHAL_ATLANTIC_B0->itr_rx, i); + reg_tx_intr_moder_ctrl_set(self, itr_tx, i); + reg_rx_intr_moder_ctrl_set(self, itr_rx, i); } return aq_hw_err_from_flags(self); @@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = { .hw_rss_set = hw_atl_b0_hw_rss_set, .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, .hw_get_regs = hw_atl_utils_hw_get_regs, + .hw_update_stats = hw_atl_utils_update_stats, .hw_get_hw_stats = hw_atl_utils_get_hw_stats, .hw_get_fw_version = hw_atl_utils_get_fw_version, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index fcf89e25a773..9aa2c6edfca2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -139,6 +139,9 @@ #define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U +#define HW_ATL_INTR_MODER_MAX 0x1FF +#define HW_ATL_INTR_MODER_MIN 0xFF + /* Hardware tx descriptor */ struct __packed hw_atl_txd_s { u64 buf_addr; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index bf734b32e44b..1fe016fc4bc7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -255,6 +255,15 @@ err_exit: return err; } +int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox_header *pmbox) +{ + return hw_atl_utils_fw_downld_dwords(self, + PHAL_ATLANTIC->mbox_addr, + (u32 *)(void *)pmbox, + sizeof(*pmbox) / sizeof(u32)); +} + void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, struct hw_aq_atl_utils_mbox *pmbox) { @@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, if (err < 0) goto err_exit; - if (pmbox != &PHAL_ATLANTIC->mbox) - memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox)); - if (IS_CHIP_FEATURE(REVISION_A0)) { unsigned int mtu = self->aq_nic_cfg ? self->aq_nic_cfg->mtu : 1514U; @@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, { int err = 0; u32 transaction_id = 0; + struct hw_aq_atl_utils_mbox_header mbox; if (state == MPI_RESET) { - hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); + hw_atl_utils_mpi_read_mbox(self, &mbox); - transaction_id = PHAL_ATLANTIC->mbox.transaction_id; + transaction_id = mbox.transaction_id; AQ_HW_WAIT_FOR(transaction_id != - (hw_atl_utils_mpi_read_stats - (self, &PHAL_ATLANTIC->mbox), - PHAL_ATLANTIC->mbox.transaction_id), - 1000U, 100U); + (hw_atl_utils_mpi_read_mbox(self, &mbox), + mbox.transaction_id), + 1000U, 100U); if (err < 0) goto err_exit; } @@ -492,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self, return 0; } +int hw_atl_utils_update_stats(struct aq_hw_s *self) +{ + struct hw_atl_s *hw_self = PHAL_ATLANTIC; + struct hw_aq_atl_utils_mbox mbox; + + if (!self->aq_link_status.mbps) + return 0; + + hw_atl_utils_mpi_read_stats(self, &mbox); + +#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \ + mbox.stats._N_ - hw_self->last_stats._N_) + + AQ_SDELTA(uprc); + AQ_SDELTA(mprc); + AQ_SDELTA(bprc); + AQ_SDELTA(erpt); + + AQ_SDELTA(uptc); + AQ_SDELTA(mptc); + AQ_SDELTA(bptc); + AQ_SDELTA(erpr); + + AQ_SDELTA(ubrc); + AQ_SDELTA(ubtc); + AQ_SDELTA(mbrc); + AQ_SDELTA(mbtc); + AQ_SDELTA(bbrc); + AQ_SDELTA(bbtc); + AQ_SDELTA(dpc); + +#undef AQ_SDELTA + + memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats)); + + return 0; +} + int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, u64 *data, unsigned int *p_count) { - struct hw_atl_stats_s *stats = NULL; + struct hw_atl_s *hw_self = PHAL_ATLANTIC; + struct hw_atl_stats_s *stats = &hw_self->curr_stats; int i = 0; - hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); - - stats = &PHAL_ATLANTIC->mbox.stats; - data[i] = stats->uprc + stats->mprc + stats->bprc; data[++i] = stats->uprc; data[++i] = stats->mprc; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index e0360a6b2202..c99cc690e425 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc { }; }; -struct __packed hw_aq_atl_utils_mbox { +struct __packed hw_aq_atl_utils_mbox_header { u32 version; u32 transaction_id; - int error; + u32 error; +}; + +struct __packed hw_aq_atl_utils_mbox { + struct hw_aq_atl_utils_mbox_header header; struct hw_atl_stats_s stats; }; struct __packed hw_atl_s { struct aq_hw_s base; - struct hw_aq_atl_utils_mbox mbox; + struct hw_atl_stats_s last_stats; + struct hw_atl_stats_s curr_stats; u64 speed; - u32 itr_tx; - u32 itr_rx; unsigned int chip_features; u32 fw_ver_actual; atomic_t dpc; @@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e { void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); +int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox_header *pmbox); + void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, struct hw_aq_atl_utils_mbox *pmbox); @@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self); int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); +int hw_atl_utils_update_stats(struct aq_hw_s *self); + int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, u64 *data, unsigned int *p_count); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b31bdec26fce..24d55724ceff 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -215,6 +215,8 @@ static const u16 bnxt_async_events_arr[] = { ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, }; +static struct workqueue_struct *bnxt_pf_wq; + static bool bnxt_vf_pciid(enum board_idx idx) { return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); @@ -1025,12 +1027,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, return 0; } +static void bnxt_queue_sp_work(struct bnxt *bp) +{ + if (BNXT_PF(bp)) + queue_work(bnxt_pf_wq, &bp->sp_task); + else + schedule_work(&bp->sp_task); +} + +static void bnxt_cancel_sp_work(struct bnxt *bp) +{ + if (BNXT_PF(bp)) + flush_workqueue(bnxt_pf_wq); + else + cancel_work_sync(&bp->sp_task); +} + static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { if (!rxr->bnapi->in_reset) { rxr->bnapi->in_reset = true; set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } rxr->rx_next_cons = 0xffff; } @@ -1718,7 +1736,7 @@ static int bnxt_async_event_process(struct bnxt *bp, default: goto async_event_process_exit; } - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); async_event_process_exit: bnxt_ulp_async_events(bp, cmpl); return 0; @@ -1752,7 +1770,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); break; case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: @@ -3449,6 +3467,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); } +int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, + int timeout) +{ + return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); +} + int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) { int rc; @@ -6328,7 +6352,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } if (link_re_init) { + mutex_lock(&bp->link_lock); rc = bnxt_update_phy_setting(bp); + mutex_unlock(&bp->link_lock); if (rc) netdev_warn(bp->dev, "failed to update phy settings\n"); } @@ -6648,7 +6674,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) vnic->rx_mask = mask; set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } } @@ -6921,7 +6947,7 @@ static void bnxt_tx_timeout(struct net_device *dev) netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -6953,7 +6979,7 @@ static void bnxt_timer(unsigned long data) if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && bp->stats_coal_ticks) { set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); @@ -7026,30 +7052,28 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) bnxt_hwrm_port_qstats(bp); - /* These functions below will clear BNXT_STATE_IN_SP_TASK. They - * must be the last functions to be called before exiting. - */ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { - int rc = 0; + int rc; + mutex_lock(&bp->link_lock); if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event)) bnxt_hwrm_phy_qcaps(bp); - bnxt_rtnl_lock_sp(bp); - if (test_bit(BNXT_STATE_OPEN, &bp->state)) - rc = bnxt_update_link(bp, true); - bnxt_rtnl_unlock_sp(bp); + rc = bnxt_update_link(bp, true); + mutex_unlock(&bp->link_lock); if (rc) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", rc); } if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { - bnxt_rtnl_lock_sp(bp); - if (test_bit(BNXT_STATE_OPEN, &bp->state)) - bnxt_get_port_module_status(bp); - bnxt_rtnl_unlock_sp(bp); + mutex_lock(&bp->link_lock); + bnxt_get_port_module_status(bp); + mutex_unlock(&bp->link_lock); } + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They + * must be the last functions to be called before exiting. + */ if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) bnxt_reset(bp, false); @@ -7457,7 +7481,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, spin_unlock_bh(&bp->ntp_fltr_lock); set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); return new_fltr->sw_id; @@ -7540,7 +7564,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev, if (bp->vxlan_port_cnt == 1) { bp->vxlan_port = ti->port; set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } break; case UDP_TUNNEL_TYPE_GENEVE: @@ -7557,7 +7581,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev, return; } - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } static void bnxt_udp_tunnel_del(struct net_device *dev, @@ -7596,7 +7620,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev, return; } - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, @@ -7744,7 +7768,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); bnxt_shutdown_tc(bp); - cancel_work_sync(&bp->sp_task); + bnxt_cancel_sp_work(bp); bp->sp_event = 0; bnxt_clear_int_mode(bp); @@ -7772,6 +7796,7 @@ static int bnxt_probe_phy(struct bnxt *bp) rc); return rc; } + mutex_init(&bp->link_lock); rc = bnxt_update_link(bp, false); if (rc) { @@ -7970,7 +7995,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp) enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; - if (pcie_get_minimum_link(bp->pdev, &speed, &width) || + if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) || speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); else @@ -8162,8 +8187,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else device_set_wakeup_capable(&pdev->dev, false); - if (BNXT_PF(bp)) + if (BNXT_PF(bp)) { + if (!bnxt_pf_wq) { + bnxt_pf_wq = + create_singlethread_workqueue("bnxt_pf_wq"); + if (!bnxt_pf_wq) { + dev_err(&pdev->dev, "Unable to create workqueue.\n"); + goto init_err_pci_clean; + } + } bnxt_init_tc(bp); + } rc = register_netdev(dev); if (rc) @@ -8399,4 +8433,17 @@ static struct pci_driver bnxt_pci_driver = { #endif }; -module_pci_driver(bnxt_pci_driver); +static int __init bnxt_init(void) +{ + return pci_register_driver(&bnxt_pci_driver); +} + +static void __exit bnxt_exit(void) +{ + pci_unregister_driver(&bnxt_pci_driver); + if (bnxt_pf_wq) + destroy_workqueue(bnxt_pf_wq); +} + +module_init(bnxt_init); +module_exit(bnxt_exit); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 7b888d4b2b55..c911e69ff25f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1290,6 +1290,10 @@ struct bnxt { unsigned long *ntp_fltr_bmap; int ntp_fltr_count; + /* To protect link related settings during link changes and + * ethtool settings changes. + */ + struct mutex link_lock; struct bnxt_link_info link_info; struct ethtool_eee eee; u32 lpi_tmr_lo; @@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *); int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); int _hwrm_send_message(struct bnxt *, void *, u32, int); +int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout); int hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index aa1f3a2c7a78..fed37cd9ae1d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { u8 *pri2cos = &resp->pri0_cos_queue_id; int i, j; @@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) } } } + mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) int rc, i; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); return rc; + } data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { @@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) } } } + mutex_unlock(&bp->hwrm_cmd_lock); return 0; } @@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc) int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); return rc; + } pri_mask = le32_to_cpu(resp->flags); pfc->pfc_en = pri_mask; + mutex_unlock(&bp->hwrm_cmd_lock); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8eff05a3e0e4..3cbe771b3352 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev, u32 ethtool_speed; ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); + mutex_lock(&bp->link_lock); bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); @@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev, base->port = PORT_FIBRE; } base->phy_address = link_info->phy_addr; + mutex_unlock(&bp->link_lock); return 0; } @@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev, if (!BNXT_SINGLE_PF(bp)) return -EOPNOTSUPP; + mutex_lock(&bp->link_lock); if (base->autoneg == AUTONEG_ENABLE) { BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, advertising); @@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev, rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); set_setting_exit: + mutex_unlock(&bp->link_lock); return rc; } @@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, req.dir_ordinal = cpu_to_le16(ordinal); req.dir_ext = cpu_to_le16(ext); req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc == 0) { if (index) *index = le16_to_cpu(output->dir_idx); @@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, if (data_length) *data_length = le32_to_cpu(output->dir_data_length); } + mutex_unlock(&bp->hwrm_cmd_lock); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index d37925a8a65b..5ee18660bc33 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) int rc = 0, vfs_supported; int min_rx_rings, min_tx_rings, min_rss_ctxs; int tx_ok = 0, rx_ok = 0, rss_ok = 0; + int avail_cp, avail_stat; /* Check if we can enable requested num of vf's. At a mininum * we require 1 RX 1 TX rings for each VF. In this minimum conf @@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) */ vfs_supported = *num_vfs; + avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings; + avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs; + avail_cp = min_t(int, avail_cp, avail_stat); + while (vfs_supported) { min_rx_rings = vfs_supported; min_tx_rings = vfs_supported; @@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) min_rx_rings) rx_ok = 1; } - if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) + if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings || + avail_cp < min_rx_rings) rx_ok = 0; - if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) + if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings && + avail_cp >= min_tx_rings) tx_ok = 1; if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 963803bc6633..eafae3eb4fed 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp, struct lio *lio = container_of(ptp, struct lio, ptp_info); struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; - ns = timespec_to_ns(ts); + ns = timespec64_to_ns(ts); spin_lock_irqsave(&lio->ptp_lock, flags); lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index b991703319f9..11eba8277132 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1110,11 +1110,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, * places them in a descriptor array, scrq_arr */ -static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, - union sub_crq *scrq_arr) +static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, + union sub_crq *scrq_arr) { union sub_crq hdr_desc; int tmp_len = len; + int num_descs = 0; u8 *data, *cur; int tmp; @@ -1143,7 +1144,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, tmp_len -= tmp; *scrq_arr = hdr_desc; scrq_arr++; + num_descs++; } + + return num_descs; } /** @@ -1161,16 +1165,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, int *num_entries, u8 hdr_field) { int hdr_len[3] = {0, 0, 0}; - int tot_len, len; + int tot_len; u8 *hdr_data = txbuff->hdr_data; tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, txbuff->hdr_data); - len = tot_len; - len -= 24; - if (len > 0) - num_entries += len % 29 ? len / 29 + 1 : len / 29; - create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, + *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, txbuff->indir_arr + 1); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 151d9cfb6ea4..0ccab0a5d717 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, } /** - * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking + * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a23306f04e00..edbc94c4353d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1038,6 +1038,32 @@ reset_latency: } /** + * i40e_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *old_buff) +{ + struct i40e_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_bi[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +/** * i40e_rx_is_programming_status - check for programming status descriptor * @qw: qword representing status_error_len in CPU ordering * @@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc, u64 qw) { - u32 ntc = rx_ring->next_to_clean + 1; + struct i40e_rx_buffer *rx_buffer; + u32 ntc = rx_ring->next_to_clean; u8 id; /* fetch, update, and store next to clean */ + rx_buffer = &rx_ring->rx_bi[ntc++]; ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring->next_to_clean = ntc; prefetch(I40E_RX_DESC(rx_ring, ntc)); + /* place unused page back on the ring */ + i40e_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; @@ -1648,32 +1683,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, } /** - * i40e_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on - * @old_buff: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - **/ -static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *old_buff) -{ - struct i40e_rx_buffer *new_buff; - u16 nta = rx_ring->next_to_alloc; - - new_buff = &rx_ring->rx_bi[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* transfer page from old buffer to new buffer */ - new_buff->dma = old_buff->dma; - new_buff->page = old_buff->page; - new_buff->page_offset = old_buff->page_offset; - new_buff->pagecnt_bias = old_buff->pagecnt_bias; -} - -/** * i40e_page_is_reusable - check if any reuse is possible * @page: page struct to check * diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 9d5e7cf288be..f3315bc874ad 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -96,6 +96,7 @@ struct mlxsw_core { const struct mlxsw_bus *bus; void *bus_priv; const struct mlxsw_bus_info *bus_info; + struct workqueue_struct *emad_wq; struct list_head rx_listener_list; struct list_head event_listener_list; struct { @@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) { unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); - mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); + queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); } static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, @@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener = static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) { + struct workqueue_struct *emad_wq; u64 tid; int err; if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) return 0; + emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); + if (!emad_wq) + return -ENOMEM; + mlxsw_core->emad_wq = emad_wq; + /* Set the upper 32 bits of the transaction ID field to a random * number. This allows us to discard EMADs addressed to other * devices. @@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err_emad_trap_set: mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); + destroy_workqueue(mlxsw_core->emad_wq); return err; } @@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) mlxsw_core->emad.use_emad = false; mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); + destroy_workqueue(mlxsw_core->emad_wq); } static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index d44e673a4c4e..a3f31f425550 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -6778,6 +6778,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index, mlxsw_reg_mgpc_opcode_set(payload, opcode); } +/* TIGCR - Tunneling IPinIP General Configuration Register + * ------------------------------------------------------- + * The TIGCR register is used for setting up the IPinIP Tunnel configuration. + */ +#define MLXSW_REG_TIGCR_ID 0xA801 +#define MLXSW_REG_TIGCR_LEN 0x10 + +MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN); + +/* reg_tigcr_ipip_ttlc + * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet + * header. + * Access: RW + */ +MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1); + +/* reg_tigcr_ipip_ttl_uc + * The TTL for IPinIP Tunnel encapsulation of unicast packets if + * reg_tigcr_ipip_ttlc is unset. + * Access: RW + */ +MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8); + +static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc) +{ + MLXSW_REG_ZERO(tigcr, payload); + mlxsw_reg_tigcr_ttlc_set(payload, ttlc); + mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc); +} + /* SBPR - Shared Buffer Pools Register * ----------------------------------- * The SBPR configures and retrieves the shared buffer pools and configuration. @@ -7262,6 +7292,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mcc), MLXSW_REG(mcda), MLXSW_REG(mgpc), + MLXSW_REG(tigcr), MLXSW_REG(sbpr), MLXSW_REG(sbcm), MLXSW_REG(sbpm), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 12d471d2a90b..5f2d100e3718 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6432,11 +6432,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->router->rifs); } +static int +mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp) +{ + char tigcr_pl[MLXSW_REG_TIGCR_LEN]; + + mlxsw_reg_tigcr_pack(tigcr_pl, true, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl); +} + static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); - return 0; + return mlxsw_sp_ipip_config_tigcr(mlxsw_sp); } static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index d2f73feb8497..2c9109b09faf 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) { void *frag; - if (!dp->xdp_prog) + if (!dp->xdp_prog) { frag = netdev_alloc_frag(dp->fl_bufsz); - else - frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); + } else { + struct page *page; + + page = alloc_page(GFP_KERNEL | __GFP_COLD); + frag = page ? page_address(page) : NULL; + } if (!frag) { nn_dp_warn(dp, "Failed to alloc receive page frag\n"); return NULL; @@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) { void *frag; - if (!dp->xdp_prog) + if (!dp->xdp_prog) { frag = napi_alloc_frag(dp->fl_bufsz); - else - frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); + } else { + struct page *page; + + page = alloc_page(GFP_ATOMIC | __GFP_COLD); + frag = page ? page_address(page) : NULL; + } if (!frag) { nn_dp_warn(dp, "Failed to alloc receive page frag\n"); return NULL; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 07969f06df10..dc016dfec64d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) do { start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); - *data++ = nn->r_vecs[i].rx_pkts; + data[0] = nn->r_vecs[i].rx_pkts; tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; tmp[2] = nn->r_vecs[i].hw_csum_rx_error; @@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) do { start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); - *data++ = nn->r_vecs[i].tx_pkts; - *data++ = nn->r_vecs[i].tx_busy; + data[1] = nn->r_vecs[i].tx_pkts; + data[2] = nn->r_vecs[i].tx_busy; tmp[3] = nn->r_vecs[i].hw_csum_tx; tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; tmp[5] = nn->r_vecs[i].tx_gather; tmp[6] = nn->r_vecs[i].tx_lso; } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); + data += 3; + for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) gathered_stats[j] += tmp[j]; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e03fcf914690..a3c949ea7d1a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl8168_driver_start(tp); } - device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); - if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index e0ef02f9503b..4b286e27c4ca 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) goto exit; i++; - } while ((ret == 1) || (i < 10)); + } while ((ret == 1) && (i < 10)); if (i == 10) ret = -EBUSY; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 67af0bdd7f10..7516ca210855 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr) err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, !(value & DMA_BUS_MODE_SFT_RESET), - 100000, 10000); + 10000, 100000); if (err) return -EBUSY; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0e1b0a3d7b76..c7a894ead274 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, struct dma_desc *np, struct sk_buff *skb) { struct skb_shared_hwtstamps *shhwtstamp = NULL; + struct dma_desc *desc = p; u64 ns; if (!priv->hwts_rx_en) return; + /* For GMAC4, the valid timestamp is from CTX next desc. */ + if (priv->plat->has_gmac4) + desc = np; /* Check if timestamp is available */ - if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { - /* For GMAC4, the valid timestamp is from CTX next desc. */ - if (priv->plat->has_gmac4) - ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); - else - ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); - + if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) { + ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); @@ -1815,12 +1814,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; - unsigned int entry = tx_q->dirty_tx; + unsigned int entry; netif_tx_lock(priv->dev); priv->xstats.tx_clean++; + entry = tx_q->dirty_tx; while (entry != tx_q->cur_tx) { struct sk_buff *skb = tx_q->tx_skbuff[entry]; struct dma_desc *p; @@ -3358,6 +3358,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) * them in stmmac_rx_refill() function so that * device can reuse it. */ + dev_kfree_skb_any(rx_q->rx_skbuff[entry]); rx_q->rx_skbuff[entry] = NULL; dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[entry], diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 01f7355ad277..5ec39f113127 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) { -#ifdef __BIG_ENDIAN - return (vni[0] == tun_id[2]) && - (vni[1] == tun_id[1]) && - (vni[2] == tun_id[0]); -#else return !memcmp(vni, &tun_id[5], 3); -#endif } static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 8948b6adc0c5..2c98152d1e1b 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -743,6 +743,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, sg_init_table(sg, ret); ret = skb_to_sgvec(skb, sg, 0, skb->len); if (unlikely(ret < 0)) { + aead_request_free(req); macsec_txsa_put(tx_sa); kfree_skb(skb); return ERR_PTR(ret); @@ -955,6 +956,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, sg_init_table(sg, ret); ret = skb_to_sgvec(skb, sg, 0, skb->len); if (unlikely(ret < 0)) { + aead_request_free(req); kfree_skb(skb); return ERR_PTR(ret); } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2a2d058cdd40..ea29da91ea5a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2252,6 +2252,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (!dev) return -ENOMEM; + err = dev_get_valid_name(net, dev, name); + if (err) + goto err_free_dev; dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index c9c711dcd0e6..a89b5685e68b 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk, struct device *dev = i2400m_dev(i2400m); struct { struct i2400m_bootrom_header cmd; - u8 cmd_payload[chunk_len]; + u8 cmd_payload[]; } __packed *buf; struct i2400m_bootrom_header ack; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 5cbe0ae55a07..d6dff347f896 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -486,7 +486,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, dev->tx_queue_len = XENVIF_QUEUE_LENGTH; - dev->min_mtu = 0; + dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; /* diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 523387e71a80..8b8689c6d887 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1316,7 +1316,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netdev->features |= netdev->hw_features; netdev->ethtool_ops = &xennet_ethtool_ops; - netdev->min_mtu = 0; + netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; SET_NETDEV_DEV(netdev, &dev->dev); diff --git a/drivers/of/base.c b/drivers/of/base.c index 260d33c0f26c..63897531cd75 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index) { if (!dn || dn != of_stdout || console_set_on_cmdline) return false; - return !add_preferred_console(name, index, - kstrdup(of_stdout_options, GFP_KERNEL)); + + /* + * XXX: cast `options' to char pointer to suppress complication + * warnings: printk, UART and console drivers expect char pointer. + */ + return !add_preferred_console(name, index, (char *)of_stdout_options); } EXPORT_SYMBOL_GPL(of_console_check); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index d94dd8b77abd..98258583abb0 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -44,7 +44,7 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id) return -EINVAL; } -static void of_mdiobus_register_phy(struct mii_bus *mdio, +static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child, u32 addr) { struct phy_device *phy; @@ -60,9 +60,13 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio, else phy = get_phy_device(mdio, addr, is_c45); if (IS_ERR(phy)) - return; + return PTR_ERR(phy); - rc = irq_of_parse_and_map(child, 0); + rc = of_irq_get(child, 0); + if (rc == -EPROBE_DEFER) { + phy_device_free(phy); + return rc; + } if (rc > 0) { phy->irq = rc; mdio->irq[addr] = rc; @@ -84,22 +88,23 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio, if (rc) { phy_device_free(phy); of_node_put(child); - return; + return rc; } dev_dbg(&mdio->dev, "registered phy %s at address %i\n", child->name, addr); + return 0; } -static void of_mdiobus_register_device(struct mii_bus *mdio, - struct device_node *child, u32 addr) +static int of_mdiobus_register_device(struct mii_bus *mdio, + struct device_node *child, u32 addr) { struct mdio_device *mdiodev; int rc; mdiodev = mdio_device_create(mdio, addr); if (IS_ERR(mdiodev)) - return; + return PTR_ERR(mdiodev); /* Associate the OF node with the device structure so it * can be looked up later. @@ -112,11 +117,12 @@ static void of_mdiobus_register_device(struct mii_bus *mdio, if (rc) { mdio_device_free(mdiodev); of_node_put(child); - return; + return rc; } dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n", child->name, addr); + return 0; } /* The following is a list of PHY compatible strings which appear in @@ -219,9 +225,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) } if (of_mdiobus_child_is_phy(child)) - of_mdiobus_register_phy(mdio, child, addr); + rc = of_mdiobus_register_phy(mdio, child, addr); else - of_mdiobus_register_device(mdio, child, addr); + rc = of_mdiobus_register_device(mdio, child, addr); + if (rc) + goto unregister; } if (!scanphys) @@ -242,12 +250,19 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) dev_info(&mdio->dev, "scan phy %s at address %i\n", child->name, addr); - if (of_mdiobus_child_is_phy(child)) - of_mdiobus_register_phy(mdio, child, addr); + if (of_mdiobus_child_is_phy(child)) { + rc = of_mdiobus_register_phy(mdio, child, addr); + if (rc) + goto unregister; + } } } return 0; + +unregister: + mdiobus_unregister(mdio); + return rc; } EXPORT_SYMBOL(of_mdiobus_register); diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index d507c3569a88..32771c2ced7b 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -25,7 +25,7 @@ #include <linux/sort.h> #include <linux/slab.h> -#define MAX_RESERVED_REGIONS 16 +#define MAX_RESERVED_REGIONS 32 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; static int reserved_mem_count; diff --git a/drivers/of/property.c b/drivers/of/property.c index fbb72116e9d4..264c355ba1ff 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode) struct device_node *np; /* Get the parent of the port */ - np = of_get_next_parent(to_of_node(fwnode)); + np = of_get_parent(to_of_node(fwnode)); if (!np) return NULL; diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 89f4e3d072d7..26ed0c08f209 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c @@ -935,6 +935,8 @@ static int advk_pcie_probe(struct platform_device *pdev) bridge->sysdata = pcie; bridge->busnr = 0; bridge->ops = &advk_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; ret = pci_scan_root_bus_bridge(bridge); if (ret < 0) { diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 9c40da54f88a..1987fec1f126 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -233,6 +233,7 @@ struct tegra_msi { struct msi_controller chip; DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; + unsigned long pages; struct mutex lock; u64 phys; int irq; @@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) goto err; } - /* - * The PCI host bridge on Tegra contains some logic that intercepts - * MSI writes, which means that the MSI target address doesn't have - * to point to actual physical memory. Rather than allocating one 4 - * KiB page of system memory that's never used, we can simply pick - * an arbitrary address within an area reserved for system memory - * in the FPCI address map. - * - * However, in order to avoid confusion, we pick an address that - * doesn't map to physical memory. The FPCI address map reserves a - * 1012 GiB region for system memory and memory-mapped I/O. Since - * none of the Tegra SoCs that contain this PCI host bridge can - * address more than 16 GiB of system memory, the last 4 KiB of - * these 1012 GiB is a good candidate. - */ - msi->phys = 0xfcfffff000; + /* setup AFI/FPCI range */ + msi->pages = __get_free_pages(GFP_KERNEL, 0); + msi->phys = virt_to_phys((void *)msi->pages); afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); @@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) afi_writel(pcie, 0, AFI_MSI_EN_VEC6); afi_writel(pcie, 0, AFI_MSI_EN_VEC7); + free_pages(msi->pages, 0); + if (msi->irq > 0) free_irq(msi->irq, pcie); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 1778cf4f81c7..82cd8b08d71f 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -100,6 +100,7 @@ config PINCTRL_AMD tristate "AMD GPIO pin control" depends on GPIOLIB select GPIOLIB_IRQCHIP + select PINMUX select PINCONF select GENERIC_PINCONF help diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 0944310225db..ff782445dfb7 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc, unsigned long events; unsigned offset; unsigned gpio; - unsigned int type; events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); events &= mask; events &= pc->enabled_irq_map[bank]; for_each_set_bit(offset, &events, 32) { gpio = (32 * bank) + offset; - /* FIXME: no clue why the code looks up the type here */ - type = pc->irq_type[gpio]; - generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, gpio)); } diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 04e929fd0ffe..fadbca907c7c 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) struct gpio_chip *chip = &pctrl->chip; bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); int ret, i, offset; + int irq_base; *chip = chv_gpio_chip; @@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) /* Clear all interrupts */ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); - ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, + if (!need_valid_mask) { + irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, + chip->ngpio, NUMA_NO_NODE); + if (irq_base < 0) { + dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); + return irq_base; + } + } else { + irq_base = 0; + } + + ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base, handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_err(pctrl->dev, "failed to add IRQ chip\n"); diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index d0e5d6ee882c..e2c1988cd7c0 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -523,7 +523,7 @@ int __init parse_cec_param(char *str) if (*str == '=') str++; - if (!strncmp(str, "cec_disable", 7)) + if (!strcmp(str, "cec_disable")) ce_arr.disabled = 1; else return 0; diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index df63e44526ac..bf04479456a0 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -109,6 +109,7 @@ config QCOM_Q6V5_PIL depends on OF && ARCH_QCOM depends on QCOM_SMEM depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n select MFD_SYSCON select QCOM_RPROC_COMMON select QCOM_SCM @@ -120,6 +121,7 @@ config QCOM_WCNSS_PIL tristate "Qualcomm WCNSS Peripheral Image Loader" depends on OF && ARCH_QCOM depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n depends on QCOM_SMEM select QCOM_MDT_LOADER select QCOM_RPROC_COMMON diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 612d91403341..633268e9d550 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, if (!(att->flags & ATT_OWN)) continue; - if (b > IMX7D_RPROC_MEM_MAX) + if (b >= IMX7D_RPROC_MEM_MAX) break; priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, att->sa, att->size); - if (IS_ERR(priv->mem[b].cpu_addr)) { + if (!priv->mem[b].cpu_addr) { dev_err(dev, "devm_ioremap_resource failed\n"); - err = PTR_ERR(priv->mem[b].cpu_addr); - return err; + return -ENOMEM; } priv->mem[b].sys_addr = att->sa; priv->mem[b].size = att->size; @@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, return err; } - if (b > IMX7D_RPROC_MEM_MAX) + if (b >= IMX7D_RPROC_MEM_MAX) break; priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c index c60904ff40b8..3907bbc9c6cf 100644 --- a/drivers/reset/reset-socfpga.c +++ b/drivers/reset/reset-socfpga.c @@ -40,8 +40,9 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev, struct socfpga_reset_data *data = container_of(rcdev, struct socfpga_reset_data, rcdev); - int bank = id / BITS_PER_LONG; - int offset = id % BITS_PER_LONG; + int reg_width = sizeof(u32); + int bank = id / (reg_width * BITS_PER_BYTE); + int offset = id % (reg_width * BITS_PER_BYTE); unsigned long flags; u32 reg; @@ -61,8 +62,9 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev, struct socfpga_reset_data, rcdev); - int bank = id / BITS_PER_LONG; - int offset = id % BITS_PER_LONG; + int reg_width = sizeof(u32); + int bank = id / (reg_width * BITS_PER_BYTE); + int offset = id % (reg_width * BITS_PER_BYTE); unsigned long flags; u32 reg; @@ -81,8 +83,9 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev, { struct socfpga_reset_data *data = container_of(rcdev, struct socfpga_reset_data, rcdev); - int bank = id / BITS_PER_LONG; - int offset = id % BITS_PER_LONG; + int reg_width = sizeof(u32); + int bank = id / (reg_width * BITS_PER_BYTE); + int offset = id % (reg_width * BITS_PER_BYTE); u32 reg; reg = readl(data->membase + (bank * BANK_INCREMENT)); @@ -132,7 +135,7 @@ static int socfpga_reset_probe(struct platform_device *pdev) spin_lock_init(&data->lock); data->rcdev.owner = THIS_MODULE; - data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG; + data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE); data->rcdev.ops = &socfpga_reset_ops; data->rcdev.of_node = pdev->dev.of_node; diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 5a5e927ea50f..5dcc9bf1c5bc 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink, unsigned long flags; intent = kzalloc(sizeof(*intent), GFP_KERNEL); - if (!intent) return NULL; intent->data = kzalloc(size, GFP_KERNEL); if (!intent->data) - return NULL; + goto free_intent; spin_lock_irqsave(&channel->intent_lock, flags); ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); if (ret < 0) { spin_unlock_irqrestore(&channel->intent_lock, flags); - return NULL; + goto free_data; } spin_unlock_irqrestore(&channel->intent_lock, flags); @@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink, intent->reuse = reuseable; return intent; + +free_data: + kfree(intent->data); +free_intent: + kfree(intent); + return NULL; } static void qcom_glink_handle_rx_done(struct qcom_glink *glink, @@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); if (ret) - return ret; + goto unlock; ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); if (!ret) { @@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, ret = channel->intent_req_result ? 0 : -ECANCELED; } +unlock: mutex_unlock(&channel->intent_req_lock); return ret; } diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 520325867e2b..31d31aad3de1 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -383,11 +383,11 @@ static void fc_rport_work(struct work_struct *work) fc_rport_enter_flogi(rdata); mutex_unlock(&rdata->rp_mutex); } else { + mutex_unlock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "work delete\n"); mutex_lock(&lport->disc.disc_mutex); list_del_rcu(&rdata->peers); mutex_unlock(&lport->disc.disc_mutex); - mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, fc_rport_destroy); } } else { diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index c62e8d111fd9..f8dc1601efd5 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1728,7 +1728,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { reason = FAILURE_SESSION_IN_RECOVERY; - sc->result = DID_REQUEUE; + sc->result = DID_REQUEUE << 16; goto fault; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5b2437a5ea44..937209805baf 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3175,6 +3175,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->can_queue, base_vha->req, base_vha->mgmt_svr_loop_id, host->sg_tablesize); + INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); + if (ha->mqenable) { bool mq = false; bool startit = false; @@ -3223,7 +3225,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) */ qla2xxx_wake_dpc(base_vha); - INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index bf53356f41f0..f796bd61f3f0 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1376,13 +1376,19 @@ static void __scsi_remove_target(struct scsi_target *starget) spin_lock_irqsave(shost->host_lock, flags); restart: list_for_each_entry(sdev, &shost->__devices, siblings) { + /* + * We cannot call scsi_device_get() here, as + * we might've been called from rmmod() causing + * scsi_device_get() to fail the module_is_live() + * check. + */ if (sdev->channel != starget->channel || sdev->id != starget->id || - scsi_device_get(sdev)) + !get_device(&sdev->sdev_gendev)) continue; spin_unlock_irqrestore(shost->host_lock, flags); scsi_remove_device(sdev); - scsi_device_put(sdev); + put_device(&sdev->sdev_gendev); spin_lock_irqsave(shost->host_lock, flags); goto restart; } diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index cbd4495d0ff9..8c46a6d536af 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3320,6 +3320,9 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd) { struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + if (WARN_ON_ONCE(!rport)) + return FAST_IO_FAIL; + return fc_block_rport(rport); } EXPORT_SYMBOL(fc_block_scsi_eh); diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c index d96f4512224f..b55e5ebba8b4 100644 --- a/drivers/staging/media/imx/imx-media-dev.c +++ b/drivers/staging/media/imx/imx-media-dev.c @@ -400,10 +400,10 @@ static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd) struct media_link, list); ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source); if (ret) - break; + return ret; } - return ret; + return 0; } /* async subdev complete notifier */ diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 2fe216b276e2..84a8ac2a779f 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -694,10 +694,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) tty_set_termios_ldisc(tty, disc); retval = tty_ldisc_open(tty, tty->ldisc); if (retval) { - if (!WARN_ON(disc == N_TTY)) { - tty_ldisc_put(tty->ldisc); - tty->ldisc = NULL; - } + tty_ldisc_put(tty->ldisc); + tty->ldisc = NULL; } return retval; } @@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit) if (tty->ldisc) { if (reinit) { - if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0) - tty_ldisc_reinit(tty, N_TTY); + if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 && + tty_ldisc_reinit(tty, N_TTY) < 0) + WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0); } else tty_ldisc_kill(tty); } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index dd74c99d6ce1..5d061b3d8224 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2026,6 +2026,8 @@ static DEVICE_ATTR_RO(suspended); static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) { struct usb_composite_dev *cdev = get_gadget_data(gadget); + struct usb_gadget_strings *gstr = cdev->driver->strings[0]; + struct usb_string *dev_str = gstr->strings; /* composite_disconnect() must already have been called * by the underlying peripheral controller driver! @@ -2045,6 +2047,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) composite_dev_cleanup(cdev); + if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer) + dev_str[USB_GADGET_MANUFACTURER_IDX].s = ""; + kfree(cdev->def_manufacturer); kfree(cdev); set_gadget_data(gadget, NULL); diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index a22a892de7b7..aeb9f3c40521 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1143,11 +1143,12 @@ static struct configfs_attribute *interf_grp_attrs[] = { NULL }; -int usb_os_desc_prepare_interf_dir(struct config_group *parent, - int n_interf, - struct usb_os_desc **desc, - char **names, - struct module *owner) +struct config_group *usb_os_desc_prepare_interf_dir( + struct config_group *parent, + int n_interf, + struct usb_os_desc **desc, + char **names, + struct module *owner) { struct config_group *os_desc_group; struct config_item_type *os_desc_type, *interface_type; @@ -1159,7 +1160,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL); if (!vlabuf) - return -ENOMEM; + return ERR_PTR(-ENOMEM); os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group); os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type); @@ -1184,7 +1185,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, configfs_add_default_group(&d->group, os_desc_group); } - return 0; + return os_desc_group; } EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir); diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h index 36c468c4f5e9..540d5e92ed22 100644 --- a/drivers/usb/gadget/configfs.h +++ b/drivers/usb/gadget/configfs.h @@ -5,11 +5,12 @@ void unregister_gadget_item(struct config_item *item); -int usb_os_desc_prepare_interf_dir(struct config_group *parent, - int n_interf, - struct usb_os_desc **desc, - char **names, - struct module *owner); +struct config_group *usb_os_desc_prepare_interf_dir( + struct config_group *parent, + int n_interf, + struct usb_os_desc **desc, + char **names, + struct module *owner); static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) { diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index e1d5853ef1e4..c7c5b3ce1d98 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -908,6 +908,7 @@ static void rndis_free_inst(struct usb_function_instance *f) free_netdev(opts->net); } + kfree(opts->rndis_interf_group); /* single VLA chunk */ kfree(opts); } @@ -916,6 +917,7 @@ static struct usb_function_instance *rndis_alloc_inst(void) struct f_rndis_opts *opts; struct usb_os_desc *descs[1]; char *names[1]; + struct config_group *rndis_interf_group; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) @@ -940,8 +942,14 @@ static struct usb_function_instance *rndis_alloc_inst(void) names[0] = "rndis"; config_group_init_type_name(&opts->func_inst.group, "", &rndis_func_type); - usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, - names, THIS_MODULE); + rndis_interf_group = + usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, + names, THIS_MODULE); + if (IS_ERR(rndis_interf_group)) { + rndis_free_inst(&opts->func_inst); + return ERR_CAST(rndis_interf_group); + } + opts->rndis_interf_group = rndis_interf_group; return &opts->func_inst; } diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h index a35ee3c2545d..efdb7ac381d9 100644 --- a/drivers/usb/gadget/function/u_rndis.h +++ b/drivers/usb/gadget/function/u_rndis.h @@ -26,6 +26,7 @@ struct f_rndis_opts { bool bound; bool borrowed_net; + struct config_group *rndis_interf_group; struct usb_os_desc rndis_os_desc; char rndis_ext_compat_id[16]; diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index b17618a55f1b..f04e91ef9e7c 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -419,6 +419,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd) static void set_link_state(struct dummy_hcd *dum_hcd) { struct dummy *dum = dum_hcd->dum; + unsigned int power_bit; dum_hcd->active = 0; if (dum->pullup) @@ -429,17 +430,19 @@ static void set_link_state(struct dummy_hcd *dum_hcd) return; set_link_state_by_speed(dum_hcd); + power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ? + USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER); if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 || dum_hcd->active) dum_hcd->resuming = 0; /* Currently !connected or in reset */ - if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 || + if ((dum_hcd->port_status & power_bit) == 0 || (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) { - unsigned disconnect = USB_PORT_STAT_CONNECTION & + unsigned int disconnect = power_bit & dum_hcd->old_status & (~dum_hcd->port_status); - unsigned reset = USB_PORT_STAT_RESET & + unsigned int reset = USB_PORT_STAT_RESET & (~dum_hcd->old_status) & dum_hcd->port_status; /* Report reset and disconnect events to the driver */ diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index eee82ca55b7b..b3fc602b2e24 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -202,12 +202,13 @@ found: return tmp; } - if (in) { + if (in) dev->in_pipe = usb_rcvbulkpipe(udev, in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + if (out) dev->out_pipe = usb_sndbulkpipe(udev, out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); - } + if (iso_in) { dev->iso_in = &iso_in->desc; dev->in_iso_pipe = usb_rcvisocpipe(udev, @@ -1964,6 +1965,9 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param, int status = 0; struct urb *urbs[param->sglen]; + if (!param->sglen || param->iterations > UINT_MAX / param->sglen) + return -EINVAL; + memset(&context, 0, sizeof(context)); context.count = param->iterations * param->sglen; context.dev = dev; @@ -2087,6 +2091,8 @@ usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param) if (param->iterations <= 0) return -EINVAL; + if (param->sglen > MAX_SGLEN) + return -EINVAL; /* * Just a bunch of test cases that every HCD is expected to handle. * diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 5fe4a5704bde..ccc2bf5274b4 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c @@ -329,6 +329,14 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy) unsigned long val; void __iomem *base = phy->regs; + /* + * The USB driver may have already initiated the phy clock + * disable so wait to see if the clock turns off and if not + * then proceed with gating the clock. + */ + if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) == 0) + return; + if (phy->is_legacy_phy) { val = readl(base + USB_SUSP_CTRL); val |= USB_SUSP_SET; @@ -351,6 +359,15 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy) unsigned long val; void __iomem *base = phy->regs; + /* + * The USB driver may have already initiated the phy clock + * enable so wait to see if the clock turns on and if not + * then proceed with ungating the clock. + */ + if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, + USB_PHY_CLK_VALID) == 0) + return; + if (phy->is_legacy_phy) { val = readl(base + USB_SUSP_CTRL); val |= USB_SUSP_CLR; diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 68f26904c316..50285b01da92 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -857,9 +857,9 @@ static void xfer_work(struct work_struct *work) fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); usbhs_pipe_running(pipe, 1); - usbhsf_dma_start(pipe, fifo); usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); dma_async_issue_pending(chan); + usbhsf_dma_start(pipe, fifo); usbhs_pipe_enable(pipe); xfer_work_end: diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index fdf89800ebc3..43a862a90a77 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c @@ -186,6 +186,7 @@ static int usb_console_setup(struct console *co, char *options) tty_kref_put(tty); reset_open_count: port->port.count = 0; + info->port = NULL; usb_autopm_put_interface(serial->interface); error_get_interface: usb_serial_put(serial); @@ -265,7 +266,7 @@ static struct console usbcons = { void usb_serial_console_disconnect(struct usb_serial *serial) { - if (serial->port[0] == usbcons_info.port) { + if (serial->port[0] && serial->port[0] == usbcons_info.port) { usb_serial_console_exit(); usb_serial_put(serial); } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 2d945c9f975c..412f812522ee 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ + { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ @@ -352,6 +353,7 @@ static struct usb_serial_driver * const serial_drivers[] = { #define CP210X_PARTNUM_CP2104 0x04 #define CP210X_PARTNUM_CP2105 0x05 #define CP210X_PARTNUM_CP2108 0x08 +#define CP210X_PARTNUM_UNKNOWN 0xFF /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ struct cp210x_comm_status { @@ -1491,8 +1493,11 @@ static int cp210x_attach(struct usb_serial *serial) result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, CP210X_GET_PARTNUM, &priv->partnum, sizeof(priv->partnum)); - if (result < 0) - goto err_free_priv; + if (result < 0) { + dev_warn(&serial->interface->dev, + "querying part number failed\n"); + priv->partnum = CP210X_PARTNUM_UNKNOWN; + } usb_set_serial_data(serial, priv); @@ -1505,10 +1510,6 @@ static int cp210x_attach(struct usb_serial *serial) } return 0; -err_free_priv: - kfree(priv); - - return result; } static void cp210x_disconnect(struct usb_serial *serial) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 1cec03799cdf..49d1b2d4606d 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, + { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 4fcf1cecb6d7..f9d15bd62785 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -610,6 +610,13 @@ #define ADI_GNICEPLUS_PID 0xF001 /* + * Cypress WICED USB UART + */ +#define CYPRESS_VID 0x04B4 +#define CYPRESS_WICED_BT_USB_PID 0x009B +#define CYPRESS_WICED_WL_USB_PID 0xF900 + +/* * Microchip Technology, Inc. * * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 54bfef13966a..ba672cf4e888 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb); /* TP-LINK Incorporated products */ #define TPLINK_VENDOR_ID 0x2357 +#define TPLINK_PRODUCT_LTE 0x000D #define TPLINK_PRODUCT_MA180 0x0201 /* Changhong products */ @@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, + { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index ebc0beea69d6..eb9928963a53 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ + {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ + {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ + {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */ /* Huawei devices */ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ |