diff options
Diffstat (limited to 'drivers')
133 files changed, 1601 insertions, 710 deletions
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 6bc9cbc01ad6..00b39802d7ec 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -105,7 +105,7 @@ static void round_robin_cpu(unsigned int tsk_index) mutex_lock(&round_robin_lock); cpumask_clear(tmp); for_each_cpu(cpu, pad_busy_cpus) - cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); + cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu)); cpumask_andnot(tmp, cpu_online_mask, tmp); /* avoid HT sibilings if possible */ if (cpumask_empty(tmp)) diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index 23716dd8a7ec..5928d0746a27 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv, writel((cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, hpriv->mmio + AHCI_WINDOW_CTRL(i)); - writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i)); + writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i)); writel(((cs->size - 1) & 0xffff0000), hpriv->mmio + AHCI_WINDOW_SIZE(i)); } diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 80a80548ad0a..27245957eee3 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = { }, {}, }; -MODULE_DEVICE_TABLE(of, octeon_i2c_match); +MODULE_DEVICE_TABLE(of, octeon_cf_match); static struct platform_driver octeon_cf_driver = { .probe = octeon_cf_probe, diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 9c2ba1c97c42..df0c66cb7ad3 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu) { int ret; - if (init_cache_level(cpu)) + if (init_cache_level(cpu) || !cache_leaves(cpu)) return -ENOENT; per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), diff --git a/drivers/base/init.c b/drivers/base/init.c index da033d3bab3c..48c0e220acc0 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -8,6 +8,7 @@ #include <linux/device.h> #include <linux/init.h> #include <linux/memory.h> +#include <linux/of.h> #include "base.h" @@ -34,4 +35,5 @@ void __init driver_init(void) cpu_dev_init(); memory_dev_init(); container_dev_init(); + of_core_init(); } diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 6491f45200a7..8b7d7f8e5851 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -61,7 +61,7 @@ static DEVICE_ATTR_RO(physical_package_id); define_id_show_func(core_id); static DEVICE_ATTR_RO(core_id); -define_siblings_show_func(thread_siblings, thread_cpumask); +define_siblings_show_func(thread_siblings, sibling_cpumask); static DEVICE_ATTR_RO(thread_siblings); static DEVICE_ATTR_RO(thread_siblings_list); diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index eb1fed5bd516..3ccef9eba6f9 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX config BLK_DEV_PMEM tristate "Persistent memory block device support" + depends on HAS_IOMEM help Saying Y here will allow you to use a contiguous range of reserved memory as one or more persistent block devices. diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 85b8036deaa3..683dff272562 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1750,6 +1750,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) struct nvme_iod *iod; dma_addr_t meta_dma = 0; void *meta = NULL; + void __user *metadata; if (copy_from_user(&io, uio, sizeof(io))) return -EFAULT; @@ -1763,6 +1764,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) meta_len = 0; } + metadata = (void __user *)(unsigned long)io.metadata; + write = io.opcode & 1; switch (io.opcode) { @@ -1786,13 +1789,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) if (meta_len) { meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, &meta_dma, GFP_KERNEL); + if (!meta) { status = -ENOMEM; goto unmap; } if (write) { - if (copy_from_user(meta, (void __user *)io.metadata, - meta_len)) { + if (copy_from_user(meta, metadata, meta_len)) { status = -EFAULT; goto unmap; } @@ -1819,8 +1822,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) nvme_free_iod(dev, iod); if (meta) { if (status == NVME_SC_SUCCESS && !write) { - if (copy_to_user((void __user *)io.metadata, meta, - meta_len)) + if (copy_to_user(metadata, meta, meta_len)) status = -EFAULT; } dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8dcbced0eafd..6e134f4759c0 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram) memset(&zram->stats, 0, sizeof(zram->stats)); zram->disksize = 0; zram->max_comp_streams = 1; + set_capacity(zram->disk, 0); + part_stat_set_all(&zram->disk->part0, 0); up_write(&zram->init_lock); /* I/O operation under all of CPU are done so let's free */ diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index fb9ec6221730..6f047dcb94c2 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -58,7 +58,6 @@ #include <linux/debugfs.h> #include <linux/log2.h> #include <linux/syscore_ops.h> -#include <linux/memblock.h> /* * DDR target is the same on all platforms. @@ -70,6 +69,7 @@ */ #define WIN_CTRL_OFF 0x0000 #define WIN_CTRL_ENABLE BIT(0) +/* Only on HW I/O coherency capable platforms */ #define WIN_CTRL_SYNCBARRIER BIT(1) #define WIN_CTRL_TGT_MASK 0xf0 #define WIN_CTRL_TGT_SHIFT 4 @@ -102,9 +102,7 @@ /* Relative to mbusbridge_base */ #define MBUS_BRIDGE_CTRL_OFF 0x0 -#define MBUS_BRIDGE_SIZE_MASK 0xffff0000 #define MBUS_BRIDGE_BASE_OFF 0x4 -#define MBUS_BRIDGE_BASE_MASK 0xffff0000 /* Maximum number of windows, for all known platforms */ #define MBUS_WINS_MAX 20 @@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus, ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | (attr << WIN_CTRL_ATTR_SHIFT) | (target << WIN_CTRL_TGT_SHIFT) | - WIN_CTRL_SYNCBARRIER | WIN_CTRL_ENABLE; + if (mbus->hw_io_coherency) + ctrl |= WIN_CTRL_SYNCBARRIER; writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF); writel(ctrl, addr + WIN_CTRL_OFF); @@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win) return MVEBU_MBUS_NO_REMAP; } -/* - * Use the memblock information to find the MBus bridge hole in the - * physical address space. - */ -static void __init -mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end) -{ - struct memblock_region *r; - uint64_t s = 0; - - for_each_memblock(memory, r) { - /* - * This part of the memory is above 4 GB, so we don't - * care for the MBus bridge hole. - */ - if (r->base >= 0x100000000) - continue; - - /* - * The MBus bridge hole is at the end of the RAM under - * the 4 GB limit. - */ - if (r->base + r->size > s) - s = r->base + r->size; - } - - *start = s; - *end = 0x100000000; -} - static void __init mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) { int i; int cs; - uint64_t mbus_bridge_base, mbus_bridge_end; mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR; - mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end); - for (i = 0, cs = 0; i < 4; i++) { - u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i)); - u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i)); - u64 end; - struct mbus_dram_window *w; - - /* Ignore entries that are not enabled */ - if (!(size & DDR_SIZE_ENABLED)) - continue; - - /* - * Ignore entries whose base address is above 2^32, - * since devices cannot DMA to such high addresses - */ - if (base & DDR_BASE_CS_HIGH_MASK) - continue; - - base = base & DDR_BASE_CS_LOW_MASK; - size = (size | ~DDR_SIZE_MASK) + 1; - end = base + size; - - /* - * Adjust base/size of the current CS to make sure it - * doesn't overlap with the MBus bridge hole. This is - * particularly important for devices that do DMA from - * DRAM to a SRAM mapped in a MBus window, such as the - * CESA cryptographic engine. - */ + u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i)); + u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i)); /* - * The CS is fully enclosed inside the MBus bridge - * area, so ignore it. + * We only take care of entries for which the chip + * select is enabled, and that don't have high base + * address bits set (devices can only access the first + * 32 bits of the memory). */ - if (base >= mbus_bridge_base && end <= mbus_bridge_end) - continue; + if ((size & DDR_SIZE_ENABLED) && + !(base & DDR_BASE_CS_HIGH_MASK)) { + struct mbus_dram_window *w; - /* - * Beginning of CS overlaps with end of MBus, raise CS - * base address, and shrink its size. - */ - if (base >= mbus_bridge_base && end > mbus_bridge_end) { - size -= mbus_bridge_end - base; - base = mbus_bridge_end; + w = &mvebu_mbus_dram_info.cs[cs++]; + w->cs_index = i; + w->mbus_attr = 0xf & ~(1 << i); + if (mbus->hw_io_coherency) + w->mbus_attr |= ATTR_HW_COHERENCY; + w->base = base & DDR_BASE_CS_LOW_MASK; + w->size = (size | ~DDR_SIZE_MASK) + 1; } - - /* - * End of CS overlaps with beginning of MBus, shrink - * CS size. - */ - if (base < mbus_bridge_base && end > mbus_bridge_base) - size -= end - mbus_bridge_base; - - w = &mvebu_mbus_dram_info.cs[cs++]; - w->cs_index = i; - w->mbus_attr = 0xf & ~(1 << i); - if (mbus->hw_io_coherency) - w->mbus_attr |= ATTR_HW_COHERENCY; - w->base = base; - w->size = size; } mvebu_mbus_dram_info.num_cs = cs; } diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index a3bebef255ad..0c98a9d51a24 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -33,7 +33,7 @@ #include <asm/io.h> #include <asm/msr.h> #include <asm/cpufeature.h> -#include <asm/i387.h> +#include <asm/fpu/api.h> diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c index 597fed423d7d..df2c1afa52b4 100644 --- a/drivers/clk/at91/clk-peripheral.c +++ b/drivers/clk/at91/clk-peripheral.c @@ -29,7 +29,7 @@ #define PERIPHERAL_RSHIFT_MASK 0x3 #define PERIPHERAL_RSHIFT(val) (((val) >> 16) & PERIPHERAL_RSHIFT_MASK) -#define PERIPHERAL_MAX_SHIFT 4 +#define PERIPHERAL_MAX_SHIFT 3 struct clk_peripheral { struct clk_hw hw; @@ -242,7 +242,7 @@ static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw, return *parent_rate; if (periph->range.max) { - for (; shift < PERIPHERAL_MAX_SHIFT; shift++) { + for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) { cur_rate = *parent_rate >> shift; if (cur_rate <= periph->range.max) break; @@ -254,7 +254,7 @@ static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw, best_diff = cur_rate - rate; best_rate = cur_rate; - for (; shift < PERIPHERAL_MAX_SHIFT; shift++) { + for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) { cur_rate = *parent_rate >> shift; if (cur_rate < rate) cur_diff = rate - cur_rate; @@ -289,7 +289,7 @@ static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw, if (periph->range.max && rate > periph->range.max) return -EINVAL; - for (shift = 0; shift < PERIPHERAL_MAX_SHIFT; shift++) { + for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) { if (parent_rate >> shift == rate) { periph->auto_div = false; periph->div = shift; diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c index 6ec79dbc0840..cbbe40377ad6 100644 --- a/drivers/clk/at91/clk-pll.c +++ b/drivers/clk/at91/clk-pll.c @@ -173,8 +173,7 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate, int i = 0; /* Check if parent_rate is a valid input rate */ - if (parent_rate < characteristics->input.min || - parent_rate > characteristics->input.max) + if (parent_rate < characteristics->input.min) return -ERANGE; /* @@ -187,6 +186,15 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate, if (!mindiv) mindiv = 1; + if (parent_rate > characteristics->input.max) { + tmpdiv = DIV_ROUND_UP(parent_rate, characteristics->input.max); + if (tmpdiv > PLL_DIV_MAX) + return -ERANGE; + + if (tmpdiv > mindiv) + mindiv = tmpdiv; + } + /* * Calculate the maximum divider which is limited by PLL register * layout (limited by the MUL or DIV field size). diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 69abb08cf146..eb8e5dc9076d 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -121,7 +121,7 @@ extern void __init of_at91sam9x5_clk_smd_setup(struct device_node *np, struct at91_pmc *pmc); #endif -#if defined(CONFIG_HAVE_AT91_SMD) +#if defined(CONFIG_HAVE_AT91_H32MX) extern void __init of_sama5d4_clk_h32mx_setup(struct device_node *np, struct at91_pmc *pmc); #endif diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index b0c18ed8d83f..0136dfcdabf0 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -699,13 +699,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) dmi_check_system(sw_any_bug_dmi_table); if (bios_with_sw_any_bug && !policy_is_shared(policy)) { policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; - cpumask_copy(policy->cpus, cpu_core_mask(cpu)); + cpumask_copy(policy->cpus, topology_core_cpumask(cpu)); } if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { cpumask_clear(policy->cpus); cpumask_set_cpu(cpu, policy->cpus); - cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu)); + cpumask_copy(data->freqdomain_cpus, + topology_sibling_cpumask(cpu)); policy->shared_type = CPUFREQ_SHARED_TYPE_HW; pr_info_once(PFX "overriding BIOS provided _PSD data\n"); } diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index 529cfd92158f..5dd95dab580d 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -172,7 +172,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) unsigned int i; #ifdef CONFIG_SMP - cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); + cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu)); #endif /* Errata workaround */ diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index f9ce7e4bf0fe..5c035d04d827 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -57,13 +57,6 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); static struct cpufreq_driver cpufreq_amd64_driver; -#ifndef CONFIG_SMP -static inline const struct cpumask *cpu_core_mask(int cpu) -{ - return cpumask_of(0); -} -#endif - /* Return a frequency in MHz, given an input fid */ static u32 find_freq_from_fid(u32 fid) { @@ -620,7 +613,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); data->powernow_table = powernow_table; - if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) + if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu) print_basics(data); for (j = 0; j < data->numps; j++) @@ -784,7 +777,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) CPUFREQ_TABLE_END; data->powernow_table = powernow_table; - if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) + if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu) print_basics(data); /* notify BIOS that we exist */ @@ -1090,7 +1083,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) if (rc != 0) goto err_out_exit_acpi; - cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); + cpumask_copy(pol->cpus, topology_core_cpumask(pol->cpu)); data->available_cores = pol->cpus; /* min/max the cpu is capable of */ diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index e56d632a8b21..37555c6b86a7 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c @@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) /* only run on CPU to be set, or on its sibling */ #ifdef CONFIG_SMP - cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); + cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu)); #endif policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index ba0532efd3ae..332c8ef8dae2 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1544,6 +1544,8 @@ static int ahash_init(struct ahash_request *req) state->current_buf = 0; state->buf_dma = 0; + state->buflen_0 = 0; + state->buflen_1 = 0; return 0; } diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 26a544b505f1..5095337205b8 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -56,7 +56,7 @@ /* Buffer, its dma address and lock */ struct buf_data { - u8 buf[RN_BUF_SIZE]; + u8 buf[RN_BUF_SIZE] ____cacheline_aligned; dma_addr_t addr; struct completion filled; u32 hw_desc[DESC_JOB_O_LEN]; diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index c178ed8c3908..da2d6777bd09 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -22,7 +22,7 @@ #include <asm/cpu_device_id.h> #include <asm/byteorder.h> #include <asm/processor.h> -#include <asm/i387.h> +#include <asm/fpu/api.h> /* * Number of data blocks actually fetched for each xcrypt insn. diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 95f7d27ce491..4e154c9b9206 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -23,7 +23,7 @@ #include <linux/kernel.h> #include <linux/scatterlist.h> #include <asm/cpu_device_id.h> -#include <asm/i387.h> +#include <asm/fpu/api.h> struct padlock_sha_desc { struct shash_desc fallback; diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index ab300ea19434..a9064e36e7b5 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@ -78,12 +78,14 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, int ret; struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); pagefault_enable(); - + preempt_enable(); + ret += crypto_cipher_setkey(ctx->fallback, key, keylen); return ret; } @@ -95,10 +97,12 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) if (in_interrupt()) { crypto_cipher_encrypt_one(ctx->fallback, dst, src); } else { + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); aes_p8_encrypt(src, dst, &ctx->enc_key); pagefault_enable(); + preempt_enable(); } } @@ -109,10 +113,12 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) if (in_interrupt()) { crypto_cipher_decrypt_one(ctx->fallback, dst, src); } else { + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); aes_p8_decrypt(src, dst, &ctx->dec_key); pagefault_enable(); + preempt_enable(); } } diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 1a559b7dddb5..477284abdd11 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -79,11 +79,13 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, int ret; struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); pagefault_enable(); + preempt_enable(); ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); return ret; @@ -106,6 +108,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, if (in_interrupt()) { ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); } else { + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); @@ -119,6 +122,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, } pagefault_enable(); + preempt_enable(); } return ret; @@ -141,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, if (in_interrupt()) { ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); } else { + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); @@ -154,6 +159,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, } pagefault_enable(); + preempt_enable(); } return ret; diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index d0ffe277af5c..f255ec4a04d4 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -114,11 +114,13 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, if (keylen != GHASH_KEY_LEN) return -EINVAL; + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_fp(); gcm_init_p8(ctx->htable, (const u64 *) key); pagefault_enable(); + preempt_enable(); return crypto_shash_setkey(ctx->fallback, key, keylen); } @@ -140,23 +142,27 @@ static int p8_ghash_update(struct shash_desc *desc, } memcpy(dctx->buffer + dctx->bytes, src, GHASH_DIGEST_SIZE - dctx->bytes); + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, GHASH_DIGEST_SIZE); pagefault_enable(); + preempt_enable(); src += GHASH_DIGEST_SIZE - dctx->bytes; srclen -= GHASH_DIGEST_SIZE - dctx->bytes; dctx->bytes = 0; } len = srclen & ~(GHASH_DIGEST_SIZE - 1); if (len) { + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, src, len); pagefault_enable(); + preempt_enable(); src += len; srclen -= len; } @@ -180,12 +186,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) if (dctx->bytes) { for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) dctx->buffer[i] = 0; + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_fp(); gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, GHASH_DIGEST_SIZE); pagefault_enable(); + preempt_enable(); dctx->bytes = 0; } memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 933e4b338459..7992164ea9ec 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -174,6 +174,8 @@ #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ #define AT_XDMAC_MAX_CHAN 0x20 +#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ +#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ #define AT_XDMAC_DMA_BUSWIDTHS\ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ @@ -192,20 +194,17 @@ struct at_xdmac_chan { struct dma_chan chan; void __iomem *ch_regs; u32 mask; /* Channel Mask */ - u32 cfg[2]; /* Channel Configuration Register */ - #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */ - #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */ + u32 cfg; /* Channel Configuration Register */ u8 perid; /* Peripheral ID */ u8 perif; /* Peripheral Interface */ u8 memif; /* Memory Interface */ - u32 per_src_addr; - u32 per_dst_addr; u32 save_cc; u32 save_cim; u32 save_cnda; u32 save_cndc; unsigned long status; struct tasklet_struct tasklet; + struct dma_slave_config sconfig; spinlock_t lock; @@ -415,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) struct at_xdmac_desc *desc = txd_to_at_desc(tx); struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); dma_cookie_t cookie; + unsigned long irqflags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); cookie = dma_cookie_assign(tx); dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", @@ -425,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) if (list_is_singular(&atchan->xfers_list)) at_xdmac_start_xfer(atchan, desc); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); return cookie; } @@ -494,61 +494,94 @@ static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec, return chan; } +static int at_xdmac_compute_chan_conf(struct dma_chan *chan, + enum dma_transfer_direction direction) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + int csize, dwidth; + + if (direction == DMA_DEV_TO_MEM) { + atchan->cfg = + AT91_XDMAC_DT_PERID(atchan->perid) + | AT_XDMAC_CC_DAM_INCREMENTED_AM + | AT_XDMAC_CC_SAM_FIXED_AM + | AT_XDMAC_CC_DIF(atchan->memif) + | AT_XDMAC_CC_SIF(atchan->perif) + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED + | AT_XDMAC_CC_DSYNC_PER2MEM + | AT_XDMAC_CC_MBSIZE_SIXTEEN + | AT_XDMAC_CC_TYPE_PER_TRAN; + csize = ffs(atchan->sconfig.src_maxburst) - 1; + if (csize < 0) { + dev_err(chan2dev(chan), "invalid src maxburst value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); + dwidth = ffs(atchan->sconfig.src_addr_width) - 1; + if (dwidth < 0) { + dev_err(chan2dev(chan), "invalid src addr width value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); + } else if (direction == DMA_MEM_TO_DEV) { + atchan->cfg = + AT91_XDMAC_DT_PERID(atchan->perid) + | AT_XDMAC_CC_DAM_FIXED_AM + | AT_XDMAC_CC_SAM_INCREMENTED_AM + | AT_XDMAC_CC_DIF(atchan->perif) + | AT_XDMAC_CC_SIF(atchan->memif) + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED + | AT_XDMAC_CC_DSYNC_MEM2PER + | AT_XDMAC_CC_MBSIZE_SIXTEEN + | AT_XDMAC_CC_TYPE_PER_TRAN; + csize = ffs(atchan->sconfig.dst_maxburst) - 1; + if (csize < 0) { + dev_err(chan2dev(chan), "invalid src maxburst value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); + dwidth = ffs(atchan->sconfig.dst_addr_width) - 1; + if (dwidth < 0) { + dev_err(chan2dev(chan), "invalid dst addr width value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); + } + + dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg); + + return 0; +} + +/* + * Only check that maxburst and addr width values are supported by the + * the controller but not that the configuration is good to perform the + * transfer since we don't know the direction at this stage. + */ +static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig) +{ + if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE) + || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) + return -EINVAL; + + if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH) + || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) + return -EINVAL; + + return 0; +} + static int at_xdmac_set_slave_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); - u8 dwidth; - int csize; - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] = - AT91_XDMAC_DT_PERID(atchan->perid) - | AT_XDMAC_CC_DAM_INCREMENTED_AM - | AT_XDMAC_CC_SAM_FIXED_AM - | AT_XDMAC_CC_DIF(atchan->memif) - | AT_XDMAC_CC_SIF(atchan->perif) - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED - | AT_XDMAC_CC_DSYNC_PER2MEM - | AT_XDMAC_CC_MBSIZE_SIXTEEN - | AT_XDMAC_CC_TYPE_PER_TRAN; - csize = at_xdmac_csize(sconfig->src_maxburst); - if (csize < 0) { - dev_err(chan2dev(chan), "invalid src maxburst value\n"); + if (at_xdmac_check_slave_config(sconfig)) { + dev_err(chan2dev(chan), "invalid slave configuration\n"); return -EINVAL; } - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize); - dwidth = ffs(sconfig->src_addr_width) - 1; - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); - - - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] = - AT91_XDMAC_DT_PERID(atchan->perid) - | AT_XDMAC_CC_DAM_FIXED_AM - | AT_XDMAC_CC_SAM_INCREMENTED_AM - | AT_XDMAC_CC_DIF(atchan->perif) - | AT_XDMAC_CC_SIF(atchan->memif) - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED - | AT_XDMAC_CC_DSYNC_MEM2PER - | AT_XDMAC_CC_MBSIZE_SIXTEEN - | AT_XDMAC_CC_TYPE_PER_TRAN; - csize = at_xdmac_csize(sconfig->dst_maxburst); - if (csize < 0) { - dev_err(chan2dev(chan), "invalid src maxburst value\n"); - return -EINVAL; - } - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize); - dwidth = ffs(sconfig->dst_addr_width) - 1; - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); - - /* Src and dst addr are needed to configure the link list descriptor. */ - atchan->per_src_addr = sconfig->src_addr; - atchan->per_dst_addr = sconfig->dst_addr; - dev_dbg(chan2dev(chan), - "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n", - __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG], - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG], - atchan->per_src_addr, atchan->per_dst_addr); + memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig)); return 0; } @@ -563,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct scatterlist *sg; int i; unsigned int xfer_size = 0; + unsigned long irqflags; + struct dma_async_tx_descriptor *ret = NULL; if (!sgl) return NULL; @@ -578,7 +613,10 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, flags); /* Protect dma_sconfig field that can be modified by set_slave_conf. */ - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); + + if (at_xdmac_compute_chan_conf(chan, direction)) + goto spin_unlock; /* Prepare descriptors. */ for_each_sg(sgl, sg, sg_len, i) { @@ -589,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); if (unlikely(!len)) { dev_err(chan2dev(chan), "sg data length is zero\n"); - spin_unlock_bh(&atchan->lock); - return NULL; + goto spin_unlock; } dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", __func__, i, len, mem); @@ -600,20 +637,18 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) list_splice_init(&first->descs_list, &atchan->free_descs_list); - spin_unlock_bh(&atchan->lock); - return NULL; + goto spin_unlock; } /* Linked list descriptor setup. */ if (direction == DMA_DEV_TO_MEM) { - desc->lld.mbr_sa = atchan->per_src_addr; + desc->lld.mbr_sa = atchan->sconfig.src_addr; desc->lld.mbr_da = mem; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; } else { desc->lld.mbr_sa = mem; - desc->lld.mbr_da = atchan->per_dst_addr; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; + desc->lld.mbr_da = atchan->sconfig.dst_addr; } + desc->lld.mbr_cfg = atchan->cfg; dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) @@ -645,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, xfer_size += len; } - spin_unlock_bh(&atchan->lock); first->tx_dma_desc.flags = flags; first->xfer_size = xfer_size; first->direction = direction; + ret = &first->tx_dma_desc; - return &first->tx_dma_desc; +spin_unlock: + spin_unlock_irqrestore(&atchan->lock, irqflags); + return ret; } static struct dma_async_tx_descriptor * @@ -664,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, struct at_xdmac_desc *first = NULL, *prev = NULL; unsigned int periods = buf_len / period_len; int i; + unsigned long irqflags; dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", __func__, &buf_addr, buf_len, period_len, @@ -679,32 +717,34 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, return NULL; } + if (at_xdmac_compute_chan_conf(chan, direction)) + return NULL; + for (i = 0; i < periods; i++) { struct at_xdmac_desc *desc = NULL; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); desc = at_xdmac_get_desc(atchan); if (!desc) { dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) list_splice_init(&first->descs_list, &atchan->free_descs_list); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); return NULL; } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); dev_dbg(chan2dev(chan), "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", __func__, desc, &desc->tx_dma_desc.phys); if (direction == DMA_DEV_TO_MEM) { - desc->lld.mbr_sa = atchan->per_src_addr; + desc->lld.mbr_sa = atchan->sconfig.src_addr; desc->lld.mbr_da = buf_addr + i * period_len; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; } else { desc->lld.mbr_sa = buf_addr + i * period_len; - desc->lld.mbr_da = atchan->per_dst_addr; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; + desc->lld.mbr_da = atchan->sconfig.dst_addr; } + desc->lld.mbr_cfg = atchan->cfg; desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 | AT_XDMAC_MBR_UBC_NDEN | AT_XDMAC_MBR_UBC_NSEN @@ -766,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | AT_XDMAC_CC_SIF(0) | AT_XDMAC_CC_MBSIZE_SIXTEEN | AT_XDMAC_CC_TYPE_MEM_TRAN; + unsigned long irqflags; dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", __func__, &src, &dest, len, flags); @@ -798,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); desc = at_xdmac_get_desc(atchan); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); if (!desc) { dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) @@ -886,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, int residue; u32 cur_nda, mask, value; u8 dwidth = 0; + unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) @@ -894,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, if (!txstate) return ret; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); @@ -904,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, */ if (!desc->active_xfer) { dma_set_residue(txstate, desc->xfer_size); - spin_unlock_bh(&atchan->lock); - return ret; + goto spin_unlock; } residue = desc->xfer_size; @@ -936,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, } residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; - spin_unlock_bh(&atchan->lock); - dma_set_residue(txstate, residue); dev_dbg(chan2dev(chan), "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); +spin_unlock: + spin_unlock_irqrestore(&atchan->lock, flags); return ret; } @@ -964,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) { struct at_xdmac_desc *desc; + unsigned long flags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); /* * If channel is enabled, do nothing, advance_work will be triggered @@ -980,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) at_xdmac_start_xfer(atchan, desc); } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); } static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) @@ -1116,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan, { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); int ret; + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); ret = at_xdmac_set_slave_config(chan, config); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return ret; } @@ -1130,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) return 0; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) cpu_relax(); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1150,18 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); if (!at_xdmac_chan_is_paused(atchan)) { - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1171,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) struct at_xdmac_desc *desc, *_desc; struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) cpu_relax(); @@ -1184,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) at_xdmac_remove_xfer(atchan, desc); clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1194,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac_desc *desc; int i; + unsigned long flags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); if (at_xdmac_chan_is_enabled(atchan)) { dev_err(chan2dev(chan), @@ -1226,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); spin_unlock: - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return i; } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2890d744bb1b..3ddfd1f6c23c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -487,7 +487,11 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->directions = device->directions; caps->residue_granularity = device->residue_granularity; - caps->cmd_pause = !!device->device_pause; + /* + * Some devices implement only pause (e.g. to get residuum) but no + * resume. However cmd_pause is advertised as pause AND resume. + */ + caps->cmd_pause = !!(device->device_pause && device->device_resume); caps->cmd_terminate = !!device->device_terminate_all; return 0; diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index 9b84def7a353..f42f71e37e73 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c @@ -384,7 +384,10 @@ static int hsu_dma_terminate_all(struct dma_chan *chan) spin_lock_irqsave(&hsuc->vchan.lock, flags); hsu_dma_stop_channel(hsuc); - hsuc->desc = NULL; + if (hsuc->desc) { + hsu_dma_desc_free(&hsuc->desc->vdesc); + hsuc->desc = NULL; + } vchan_get_all_descriptors(&hsuc->vchan, &head); spin_unlock_irqrestore(&hsuc->vchan.lock, flags); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index a7d9d3029b14..340f9e607cd8 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan) struct pl330_dmac *pl330 = pch->dmac; LIST_HEAD(list); + pm_runtime_get_sync(pl330->ddma.dev); spin_lock_irqsave(&pch->lock, flags); spin_lock(&pl330->lock); _stop(pch->thread); @@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan) list_splice_tail_init(&pch->work_list, &pl330->desc_pool); list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); + pm_runtime_mark_last_busy(pl330->ddma.dev); + pm_runtime_put_autosuspend(pl330->ddma.dev); return 0; } diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 8de4da5c9ab6..54071c148340 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -18,6 +18,11 @@ config EFI_VARS Subsequent efibootmgr releases may be found at: <http://github.com/vathpela/efibootmgr> +config EFI_ESRT + bool + depends on EFI && !IA64 + default y + config EFI_VARS_PSTORE tristate "Register efivars backend for pstore" depends on EFI_VARS && PSTORE diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index d8be608a9f3b..6fd3da938717 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -3,6 +3,7 @@ # obj-$(CONFIG_EFI) += efi.o vars.o reboot.o obj-$(CONFIG_EFI_VARS) += efivars.o +obj-$(CONFIG_EFI_ESRT) += esrt.o obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o obj-$(CONFIG_UEFI_CPER) += cper.o obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 3061bb8629dc..ca617f40574a 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -39,6 +39,7 @@ struct efi __read_mostly efi = { .fw_vendor = EFI_INVALID_TABLE_ADDR, .runtime = EFI_INVALID_TABLE_ADDR, .config_table = EFI_INVALID_TABLE_ADDR, + .esrt = EFI_INVALID_TABLE_ADDR, }; EXPORT_SYMBOL(efi); @@ -64,7 +65,7 @@ static int __init parse_efi_cmdline(char *str) } early_param("efi", parse_efi_cmdline); -static struct kobject *efi_kobj; +struct kobject *efi_kobj; static struct kobject *efivars_kobj; /* @@ -85,10 +86,15 @@ static ssize_t systab_show(struct kobject *kobj, str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); if (efi.acpi != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); - if (efi.smbios != EFI_INVALID_TABLE_ADDR) - str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); + /* + * If both SMBIOS and SMBIOS3 entry points are implemented, the + * SMBIOS3 entry point shall be preferred, so we list it first to + * let applications stop parsing after the first match. + */ if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); + if (efi.smbios != EFI_INVALID_TABLE_ADDR) + str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); if (efi.hcdp != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp); if (efi.boot_info != EFI_INVALID_TABLE_ADDR) @@ -232,6 +238,84 @@ err_put: subsys_initcall(efisubsys_init); +/* + * Find the efi memory descriptor for a given physical address. Given a + * physicall address, determine if it exists within an EFI Memory Map entry, + * and if so, populate the supplied memory descriptor with the appropriate + * data. + */ +int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) +{ + struct efi_memory_map *map = efi.memmap; + void *p, *e; + + if (!efi_enabled(EFI_MEMMAP)) { + pr_err_once("EFI_MEMMAP is not enabled.\n"); + return -EINVAL; + } + + if (!map) { + pr_err_once("efi.memmap is not set.\n"); + return -EINVAL; + } + if (!out_md) { + pr_err_once("out_md is null.\n"); + return -EINVAL; + } + if (WARN_ON_ONCE(!map->phys_map)) + return -EINVAL; + if (WARN_ON_ONCE(map->nr_map == 0) || WARN_ON_ONCE(map->desc_size == 0)) + return -EINVAL; + + e = map->phys_map + map->nr_map * map->desc_size; + for (p = map->phys_map; p < e; p += map->desc_size) { + efi_memory_desc_t *md; + u64 size; + u64 end; + + /* + * If a driver calls this after efi_free_boot_services, + * ->map will be NULL, and the target may also not be mapped. + * So just always get our own virtual map on the CPU. + * + */ + md = early_memremap((phys_addr_t)p, sizeof (*md)); + if (!md) { + pr_err_once("early_memremap(%p, %zu) failed.\n", + p, sizeof (*md)); + return -ENOMEM; + } + + if (!(md->attribute & EFI_MEMORY_RUNTIME) && + md->type != EFI_BOOT_SERVICES_DATA && + md->type != EFI_RUNTIME_SERVICES_DATA) { + early_memunmap(md, sizeof (*md)); + continue; + } + + size = md->num_pages << EFI_PAGE_SHIFT; + end = md->phys_addr + size; + if (phys_addr >= md->phys_addr && phys_addr < end) { + memcpy(out_md, md, sizeof(*out_md)); + early_memunmap(md, sizeof (*md)); + return 0; + } + + early_memunmap(md, sizeof (*md)); + } + pr_err_once("requested map not found.\n"); + return -ENOENT; +} + +/* + * Calculate the highest address of an efi memory descriptor. + */ +u64 __init efi_mem_desc_end(efi_memory_desc_t *md) +{ + u64 size = md->num_pages << EFI_PAGE_SHIFT; + u64 end = md->phys_addr + size; + return end; +} /* * We can't ioremap data in EFI boot services RAM, because we've already mapped @@ -274,6 +358,7 @@ static __initdata efi_config_table_type_t common_tables[] = { {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios}, {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3}, {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga}, + {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt}, {NULL_GUID, NULL, NULL}, }; diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 7b2e0496e0c0..756eca8c4cf8 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -535,7 +535,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, * efivar_create_sysfs_entry - create a new entry in sysfs * @new_var: efivar entry to create * - * Returns 1 on failure, 0 on success + * Returns 0 on success, negative error code on failure */ static int efivar_create_sysfs_entry(struct efivar_entry *new_var) @@ -544,6 +544,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) char *short_name; unsigned long variable_name_size; efi_char16_t *variable_name; + int ret; variable_name = new_var->var.VariableName; variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); @@ -558,7 +559,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) short_name = kzalloc(short_name_size, GFP_KERNEL); if (!short_name) - return 1; + return -ENOMEM; /* Convert Unicode to normal chars (assume top bits are 0), ala UTF-8 */ @@ -574,11 +575,11 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) new_var->kobj.kset = efivars_kset; - i = kobject_init_and_add(&new_var->kobj, &efivar_ktype, + ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, NULL, "%s", short_name); kfree(short_name); - if (i) - return 1; + if (ret) + return ret; kobject_uevent(&new_var->kobj, KOBJ_ADD); efivar_entry_add(new_var, &efivar_sysfs_list); diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c new file mode 100644 index 000000000000..a5b95d61ae71 --- /dev/null +++ b/drivers/firmware/efi/esrt.c @@ -0,0 +1,471 @@ +/* + * esrt.c + * + * This module exports EFI System Resource Table (ESRT) entries into userspace + * through the sysfs file system. The ESRT provides a read-only catalog of + * system components for which the system accepts firmware upgrades via UEFI's + * "Capsule Update" feature. This module allows userland utilities to evaluate + * what firmware updates can be applied to this system, and potentially arrange + * for those updates to occur. + * + * Data is currently found below /sys/firmware/efi/esrt/... + */ +#define pr_fmt(fmt) "esrt: " fmt + +#include <linux/capability.h> +#include <linux/device.h> +#include <linux/efi.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/kobject.h> +#include <linux/list.h> +#include <linux/memblock.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include <asm/io.h> +#include <asm/early_ioremap.h> + +struct efi_system_resource_entry_v1 { + efi_guid_t fw_class; + u32 fw_type; + u32 fw_version; + u32 lowest_supported_fw_version; + u32 capsule_flags; + u32 last_attempt_version; + u32 last_attempt_status; +}; + +/* + * _count and _version are what they seem like. _max is actually just + * accounting info for the firmware when creating the table; it should never + * have been exposed to us. To wit, the spec says: + * The maximum number of resource array entries that can be within the + * table without reallocating the table, must not be zero. + * Since there's no guidance about what that means in terms of memory layout, + * it means nothing to us. + */ +struct efi_system_resource_table { + u32 fw_resource_count; + u32 fw_resource_count_max; + u64 fw_resource_version; + u8 entries[]; +}; + +static phys_addr_t esrt_data; +static size_t esrt_data_size; + +static struct efi_system_resource_table *esrt; + +struct esre_entry { + union { + struct efi_system_resource_entry_v1 *esre1; + } esre; + + struct kobject kobj; + struct list_head list; +}; + +/* global list of esre_entry. */ +static LIST_HEAD(entry_list); + +/* entry attribute */ +struct esre_attribute { + struct attribute attr; + ssize_t (*show)(struct esre_entry *entry, char *buf); + ssize_t (*store)(struct esre_entry *entry, + const char *buf, size_t count); +}; + +static struct esre_entry *to_entry(struct kobject *kobj) +{ + return container_of(kobj, struct esre_entry, kobj); +} + +static struct esre_attribute *to_attr(struct attribute *attr) +{ + return container_of(attr, struct esre_attribute, attr); +} + +static ssize_t esre_attr_show(struct kobject *kobj, + struct attribute *_attr, char *buf) +{ + struct esre_entry *entry = to_entry(kobj); + struct esre_attribute *attr = to_attr(_attr); + + /* Don't tell normal users what firmware versions we've got... */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + return attr->show(entry, buf); +} + +static const struct sysfs_ops esre_attr_ops = { + .show = esre_attr_show, +}; + +/* Generic ESRT Entry ("ESRE") support. */ +static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf) +{ + char *str = buf; + + efi_guid_to_str(&entry->esre.esre1->fw_class, str); + str += strlen(str); + str += sprintf(str, "\n"); + + return str - buf; +} + +static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400, + esre_fw_class_show, NULL); + +#define esre_attr_decl(name, size, fmt) \ +static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \ +{ \ + return sprintf(buf, fmt "\n", \ + le##size##_to_cpu(entry->esre.esre1->name)); \ +} \ +\ +static struct esre_attribute esre_##name = __ATTR(name, 0400, \ + esre_##name##_show, NULL) + +esre_attr_decl(fw_type, 32, "%u"); +esre_attr_decl(fw_version, 32, "%u"); +esre_attr_decl(lowest_supported_fw_version, 32, "%u"); +esre_attr_decl(capsule_flags, 32, "0x%x"); +esre_attr_decl(last_attempt_version, 32, "%u"); +esre_attr_decl(last_attempt_status, 32, "%u"); + +static struct attribute *esre1_attrs[] = { + &esre_fw_class.attr, + &esre_fw_type.attr, + &esre_fw_version.attr, + &esre_lowest_supported_fw_version.attr, + &esre_capsule_flags.attr, + &esre_last_attempt_version.attr, + &esre_last_attempt_status.attr, + NULL +}; +static void esre_release(struct kobject *kobj) +{ + struct esre_entry *entry = to_entry(kobj); + + list_del(&entry->list); + kfree(entry); +} + +static struct kobj_type esre1_ktype = { + .release = esre_release, + .sysfs_ops = &esre_attr_ops, + .default_attrs = esre1_attrs, +}; + + +static struct kobject *esrt_kobj; +static struct kset *esrt_kset; + +static int esre_create_sysfs_entry(void *esre, int entry_num) +{ + struct esre_entry *entry; + char name[20]; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + sprintf(name, "entry%d", entry_num); + + entry->kobj.kset = esrt_kset; + + if (esrt->fw_resource_version == 1) { + int rc = 0; + + entry->esre.esre1 = esre; + rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, + "%s", name); + if (rc) { + kfree(entry); + return rc; + } + } + + list_add_tail(&entry->list, &entry_list); + return 0; +} + +/* support for displaying ESRT fields at the top level */ +#define esrt_attr_decl(name, size, fmt) \ +static ssize_t esrt_##name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf)\ +{ \ + return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \ +} \ +\ +static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \ + esrt_##name##_show, NULL) + +esrt_attr_decl(fw_resource_count, 32, "%u"); +esrt_attr_decl(fw_resource_count_max, 32, "%u"); +esrt_attr_decl(fw_resource_version, 64, "%llu"); + +static struct attribute *esrt_attrs[] = { + &esrt_fw_resource_count.attr, + &esrt_fw_resource_count_max.attr, + &esrt_fw_resource_version.attr, + NULL, +}; + +static inline int esrt_table_exists(void) +{ + if (!efi_enabled(EFI_CONFIG_TABLES)) + return 0; + if (efi.esrt == EFI_INVALID_TABLE_ADDR) + return 0; + return 1; +} + +static umode_t esrt_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + if (!esrt_table_exists()) + return 0; + return attr->mode; +} + +static struct attribute_group esrt_attr_group = { + .attrs = esrt_attrs, + .is_visible = esrt_attr_is_visible, +}; + +/* + * remap the table, copy it to kmalloced pages, and unmap it. + */ +void __init efi_esrt_init(void) +{ + void *va; + struct efi_system_resource_table tmpesrt; + struct efi_system_resource_entry_v1 *v1_entries; + size_t size, max, entry_size, entries_size; + efi_memory_desc_t md; + int rc; + phys_addr_t end; + + pr_debug("esrt-init: loading.\n"); + if (!esrt_table_exists()) + return; + + rc = efi_mem_desc_lookup(efi.esrt, &md); + if (rc < 0) { + pr_err("ESRT header is not in the memory map.\n"); + return; + } + + max = efi_mem_desc_end(&md); + if (max < efi.esrt) { + pr_err("EFI memory descriptor is invalid. (esrt: %p max: %p)\n", + (void *)efi.esrt, (void *)max); + return; + } + + size = sizeof(*esrt); + max -= efi.esrt; + + if (max < size) { + pr_err("ESRT header doen't fit on single memory map entry. (size: %zu max: %zu)\n", + size, max); + return; + } + + va = early_memremap(efi.esrt, size); + if (!va) { + pr_err("early_memremap(%p, %zu) failed.\n", (void *)efi.esrt, + size); + return; + } + + memcpy(&tmpesrt, va, sizeof(tmpesrt)); + + if (tmpesrt.fw_resource_version == 1) { + entry_size = sizeof (*v1_entries); + } else { + pr_err("Unsupported ESRT version %lld.\n", + tmpesrt.fw_resource_version); + return; + } + + if (tmpesrt.fw_resource_count > 0 && max - size < entry_size) { + pr_err("ESRT memory map entry can only hold the header. (max: %zu size: %zu)\n", + max - size, entry_size); + goto err_memunmap; + } + + /* + * The format doesn't really give us any boundary to test here, + * so I'm making up 128 as the max number of individually updatable + * components we support. + * 128 should be pretty excessive, but there's still some chance + * somebody will do that someday and we'll need to raise this. + */ + if (tmpesrt.fw_resource_count > 128) { + pr_err("ESRT says fw_resource_count has very large value %d.\n", + tmpesrt.fw_resource_count); + goto err_memunmap; + } + + /* + * We know it can't be larger than N * sizeof() here, and N is limited + * by the previous test to a small number, so there's no overflow. + */ + entries_size = tmpesrt.fw_resource_count * entry_size; + if (max < size + entries_size) { + pr_err("ESRT does not fit on single memory map entry (size: %zu max: %zu)\n", + size, max); + goto err_memunmap; + } + + /* remap it with our (plausible) new pages */ + early_memunmap(va, size); + size += entries_size; + va = early_memremap(efi.esrt, size); + if (!va) { + pr_err("early_memremap(%p, %zu) failed.\n", (void *)efi.esrt, + size); + return; + } + + esrt_data = (phys_addr_t)efi.esrt; + esrt_data_size = size; + + end = esrt_data + size; + pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end); + memblock_reserve(esrt_data, esrt_data_size); + + pr_debug("esrt-init: loaded.\n"); +err_memunmap: + early_memunmap(va, size); +} + +static int __init register_entries(void) +{ + struct efi_system_resource_entry_v1 *v1_entries = (void *)esrt->entries; + int i, rc; + + if (!esrt_table_exists()) + return 0; + + for (i = 0; i < le32_to_cpu(esrt->fw_resource_count); i++) { + void *esre = NULL; + if (esrt->fw_resource_version == 1) { + esre = &v1_entries[i]; + } else { + pr_err("Unsupported ESRT version %lld.\n", + esrt->fw_resource_version); + return -EINVAL; + } + + rc = esre_create_sysfs_entry(esre, i); + if (rc < 0) { + pr_err("ESRT entry creation failed with error %d.\n", + rc); + return rc; + } + } + return 0; +} + +static void cleanup_entry_list(void) +{ + struct esre_entry *entry, *next; + + list_for_each_entry_safe(entry, next, &entry_list, list) { + kobject_put(&entry->kobj); + } +} + +static int __init esrt_sysfs_init(void) +{ + int error; + struct efi_system_resource_table __iomem *ioesrt; + + pr_debug("esrt-sysfs: loading.\n"); + if (!esrt_data || !esrt_data_size) + return -ENOSYS; + + ioesrt = ioremap(esrt_data, esrt_data_size); + if (!ioesrt) { + pr_err("ioremap(%pa, %zu) failed.\n", &esrt_data, + esrt_data_size); + return -ENOMEM; + } + + esrt = kmalloc(esrt_data_size, GFP_KERNEL); + if (!esrt) { + pr_err("kmalloc failed. (wanted %zu bytes)\n", esrt_data_size); + iounmap(ioesrt); + return -ENOMEM; + } + + memcpy_fromio(esrt, ioesrt, esrt_data_size); + + esrt_kobj = kobject_create_and_add("esrt", efi_kobj); + if (!esrt_kobj) { + pr_err("Firmware table registration failed.\n"); + error = -ENOMEM; + goto err; + } + + error = sysfs_create_group(esrt_kobj, &esrt_attr_group); + if (error) { + pr_err("Sysfs attribute export failed with error %d.\n", + error); + goto err_remove_esrt; + } + + esrt_kset = kset_create_and_add("entries", NULL, esrt_kobj); + if (!esrt_kset) { + pr_err("kset creation failed.\n"); + error = -ENOMEM; + goto err_remove_group; + } + + error = register_entries(); + if (error) + goto err_cleanup_list; + + memblock_remove(esrt_data, esrt_data_size); + + pr_debug("esrt-sysfs: loaded.\n"); + + return 0; +err_cleanup_list: + cleanup_entry_list(); + kset_unregister(esrt_kset); +err_remove_group: + sysfs_remove_group(esrt_kobj, &esrt_attr_group); +err_remove_esrt: + kobject_put(esrt_kobj); +err: + kfree(esrt); + esrt = NULL; + return error; +} + +static void __exit esrt_sysfs_exit(void) +{ + pr_debug("esrt-sysfs: unloading.\n"); + cleanup_entry_list(); + kset_unregister(esrt_kset); + sysfs_remove_group(esrt_kobj, &esrt_attr_group); + kfree(esrt); + esrt = NULL; + kobject_del(esrt_kobj); + kobject_put(esrt_kobj); +} + +module_init(esrt_sysfs_init); +module_exit(esrt_sysfs_exit); + +MODULE_AUTHOR("Peter Jones <pjones@redhat.com>"); +MODULE_DESCRIPTION("EFI System Resource Table support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 071c2c969eec..72791232e46b 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -186,8 +186,20 @@ struct ibft_kobject { static struct iscsi_boot_kset *boot_kset; +/* fully null address */ static const char nulls[16]; +/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */ +static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00 }; + +static int address_not_null(u8 *ip) +{ + return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16)); +} + /* * Helper functions to parse data properly. */ @@ -445,7 +457,7 @@ static umode_t ibft_check_nic_for(void *data, int type) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_IP_ADDR: - if (memcmp(nic->ip_addr, nulls, sizeof(nic->ip_addr))) + if (address_not_null(nic->ip_addr)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_SUBNET_MASK: @@ -456,21 +468,19 @@ static umode_t ibft_check_nic_for(void *data, int type) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_GATEWAY: - if (memcmp(nic->gateway, nulls, sizeof(nic->gateway))) + if (address_not_null(nic->gateway)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_PRIMARY_DNS: - if (memcmp(nic->primary_dns, nulls, - sizeof(nic->primary_dns))) + if (address_not_null(nic->primary_dns)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_SECONDARY_DNS: - if (memcmp(nic->secondary_dns, nulls, - sizeof(nic->secondary_dns))) + if (address_not_null(nic->secondary_dns)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_DHCP: - if (memcmp(nic->dhcp, nulls, sizeof(nic->dhcp))) + if (address_not_null(nic->dhcp)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_VLAN: @@ -536,23 +546,19 @@ static umode_t __init ibft_check_initiator_for(void *data, int type) rc = S_IRUGO; break; case ISCSI_BOOT_INI_ISNS_SERVER: - if (memcmp(init->isns_server, nulls, - sizeof(init->isns_server))) + if (address_not_null(init->isns_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_SLP_SERVER: - if (memcmp(init->slp_server, nulls, - sizeof(init->slp_server))) + if (address_not_null(init->slp_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_PRI_RADIUS_SERVER: - if (memcmp(init->pri_radius_server, nulls, - sizeof(init->pri_radius_server))) + if (address_not_null(init->pri_radius_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_SEC_RADIUS_SERVER: - if (memcmp(init->sec_radius_server, nulls, - sizeof(init->sec_radius_server))) + if (address_not_null(init->sec_radius_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_INITIATOR_NAME: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e469c4b2e8cc..c25728bc388a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.cpu_core_id_base); sysfs_show_32bit_prop(buffer, "simd_id_base", dev->node_props.simd_id_base); - sysfs_show_32bit_prop(buffer, "capability", - dev->node_props.capability); sysfs_show_32bit_prop(buffer, "max_waves_per_simd", dev->node_props.max_waves_per_simd); sysfs_show_32bit_prop(buffer, "lds_size_in_kb", @@ -736,6 +734,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->gpu->kfd2kgd->get_fw_version( dev->gpu->kgd, KGD_ENGINE_MEC1)); + sysfs_show_32bit_prop(buffer, "capability", + dev->node_props.capability); } return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index ffc305fc2076..eb7e61078a5b 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -217,7 +217,7 @@ static ssize_t status_store(struct device *device, mutex_unlock(&dev->mode_config.mutex); - return ret; + return ret ? ret : count; } static ssize_t status_show(struct device *device, diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 007c7d7d8295..dc55c51964ab 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1667,12 +1667,15 @@ static int i915_sr_status(struct seq_file *m, void *unused) if (HAS_PCH_SPLIT(dev)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; - else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) + else if (IS_CRESTLINE(dev) || IS_G4X(dev) || + IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; else if (IS_I915GM(dev)) sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; else if (IS_PINEVIEW(dev)) sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; + else if (IS_VALLEYVIEW(dev)) + sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; intel_runtime_pm_put(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 53394f998a1f..2d0995e7afc3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3003,8 +3003,8 @@ int i915_vma_unbind(struct i915_vma *vma) } else if (vma->ggtt_view.pages) { sg_free_table(vma->ggtt_view.pages); kfree(vma->ggtt_view.pages); - vma->ggtt_view.pages = NULL; } + vma->ggtt_view.pages = NULL; } drm_mm_remove_node(&vma->node); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a3190e793ed4..cc552a4c1f3b 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -32,6 +32,7 @@ #include "i915_trace.h" #include "intel_drv.h" #include <linux/dma_remapping.h> +#include <linux/uaccess.h> #define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_FENCE (1<<30) @@ -465,7 +466,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, } /* We can't wait for rendering with pagefaults disabled */ - if (obj->active && in_atomic()) + if (obj->active && pagefault_disabled()) return -EFAULT; if (use_cpu_reloc(obj)) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f27346e907b1..d714a4b5711e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -880,10 +880,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, DP_AUX_CH_CTL_RECEIVE_ERROR)) continue; if (status & DP_AUX_CH_CTL_DONE) - break; + goto done; } - if (status & DP_AUX_CH_CTL_DONE) - break; } if ((status & DP_AUX_CH_CTL_DONE) == 0) { @@ -892,6 +890,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, goto out; } +done: /* Check for timeout or receive error. * Timeouts occur when the sink is not connected */ diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 56e437e31580..ae628001fd97 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct intel_gmbus, adapter); struct drm_i915_private *dev_priv = bus->dev_priv; - int i, reg_offset; + int i = 0, inc, try = 0, reg_offset; int ret = 0; intel_aux_display_runtime_get(dev_priv); @@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter, reg_offset = dev_priv->gpio_mmio_base; +retry: I915_WRITE(GMBUS0 + reg_offset, bus->reg0); - for (i = 0; i < num; i++) { + for (; i < num; i += inc) { + inc = 1; if (gmbus_is_index_read(msgs, i, num)) { ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); - i += 1; /* set i to the index of the read xfer */ + inc = 2; /* an index read is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); } else { @@ -525,6 +527,18 @@ clear_err: adapter->name, msgs[i].addr, (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); + /* + * Passive adapters sometimes NAK the first probe. Retry the first + * message once on -ENXIO for GMBUS transfers; the bit banging algorithm + * has retries internally. See also the retry loop in + * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. + */ + if (ret == -ENXIO && i == 0 && try++ == 0) { + DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n", + adapter->name); + goto retry; + } + goto out; timeout: diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 09df74b8e917..424e62197787 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1134,6 +1134,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); + if (ring->status_page.obj) { + I915_WRITE(RING_HWS_PGA(ring->mmio_base), + (u32)ring->status_page.gfx_addr); + POSTING_READ(RING_HWS_PGA(ring->mmio_base)); + } + I915_WRITE(RING_MODE_GEN7(ring), _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 441e2502b889..005b5e04de4d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -901,13 +901,6 @@ static int chv_init_workarounds(struct intel_engine_cs *ring) GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4); - if (INTEL_REVID(dev) == SKL_REVID_C0 || - INTEL_REVID(dev) == SKL_REVID_D0) - /* WaBarrierPerformanceFixDisable:skl */ - WA_SET_BIT_MASKED(HDC_CHICKEN0, - HDC_FENCE_DEST_SLM_DISABLE | - HDC_BARRIER_PERFORMANCE_DISABLE); - return 0; } @@ -1024,6 +1017,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); + if (INTEL_REVID(dev) == SKL_REVID_C0 || + INTEL_REVID(dev) == SKL_REVID_D0) + /* WaBarrierPerformanceFixDisable:skl */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FENCE_DEST_SLM_DISABLE | + HDC_BARRIER_PERFORMANCE_DISABLE); + return skl_tune_iz_hashing(ring); } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e87d2f418de4..987b81f31b0e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) DRM_DEBUG_KMS("initialising analog device %d\n", device); - intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL); + intel_sdvo_connector = intel_sdvo_connector_alloc(); if (!intel_sdvo_connector) return false; diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 6e84df9369a6..ad4b9010dfb0 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1526,6 +1526,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector, return MODE_BANDWIDTH; } + if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 || + (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) { + return MODE_H_ILLEGAL; + } + if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 || mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 || diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index e597ffc26563..dac78ad24b31 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, else radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; - /* if there is no audio, set MINM_OVER_MAXP */ - if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; if (rdev->family < CHIP_RV770) radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; /* use frac fb div on APUs */ @@ -1798,9 +1795,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) if ((crtc->mode.clock == test_crtc->mode.clock) && (adjusted_clock == test_adjusted_clock) && (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && - (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) && - (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) == - drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector)))) + (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)) return test_radeon_crtc->pll_id; } } diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c index f04205170b8a..cfa3a84a2af0 100644 --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c @@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; - WREG32(HDMI0_ACR_PACKET_CONTROL + offset, + WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset, HDMI0_ACR_SOURCE | /* select SW CTS value */ HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b7ca4c514621..a7fdfa4f0857 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev, if (r) DRM_ERROR("ib ring test failed (%d).\n", r); + /* + * Turks/Thames GPU will freeze whole laptop if DPM is not restarted + * after the CP ring have chew one packet at least. Hence here we stop + * and restart DPM after the radeon_ib_ring_tests(). + */ + if (rdev->pm.dpm_enabled && + (rdev->pm.pm_method == PM_METHOD_DPM) && + (rdev->family == CHIP_TURKS) && + (rdev->flags & RADEON_IS_MOBILITY)) { + mutex_lock(&rdev->pm.mutex); + radeon_dpm_disable(rdev); + radeon_dpm_enable(rdev); + mutex_unlock(&rdev->pm.mutex); + } + if ((radeon_testing & 1)) { if (rdev->accel_working) radeon_test_moves(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 2b98ed3e684d..257b10be5cda 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -663,12 +663,17 @@ int radeon_dp_mst_probe(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; int ret; u8 msg[1]; if (!radeon_mst) return 0; + if (!ASIC_IS_DCE5(rdev)) + return 0; + if (dig_connector->dpcd[DP_DPCD_REV] < 0x12) return 0; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 7b2a7335cc5d..b0acf50d9558 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -576,6 +576,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (radeon_get_allowed_info_register(rdev, *value, value)) return -EINVAL; break; + case RADEON_INFO_VA_UNMAP_WORKING: + *value = true; + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index de42fc4a22b8..9c3377ca17b7 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, /* make sure object fit at this offset */ eoffset = soffset + size; if (soffset >= eoffset) { - return -EINVAL; + r = -EINVAL; + goto error_unreserve; } last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; if (last_pfn > rdev->vm_manager.max_pfn) { dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", last_pfn, rdev->vm_manager.max_pfn); - return -EINVAL; + r = -EINVAL; + goto error_unreserve; } } else { @@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, soffset, tmp->bo, tmp->it.start, tmp->it.last); mutex_unlock(&vm->mutex); - return -EINVAL; + r = -EINVAL; + goto error_unreserve; } } @@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); if (!tmp) { mutex_unlock(&vm->mutex); - return -ENOMEM; + r = -ENOMEM; + goto error_unreserve; } tmp->it.start = bo_va->it.start; tmp->it.last = bo_va->it.last; @@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, r = radeon_vm_clear_bo(rdev, pt); if (r) { radeon_bo_unref(&pt); - radeon_bo_reserve(bo_va->bo, false); return r; } @@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, mutex_unlock(&vm->mutex); return 0; + +error_unreserve: + radeon_bo_unreserve(bo_va->bo); + return r; } /** diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index ed303ba3a593..3e03379e7c5d 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -63,7 +63,8 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) #ifdef CONFIG_SMP -#define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu)) +#define for_each_sibling(i, cpu) \ + for_each_cpu(i, topology_sibling_cpumask(cpu)) #else #define for_each_sibling(i, cpu) for (i = 0; false; ) #endif diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index 8fe78d08e01c..7c6966434ee7 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver); MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver"); MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:i2c-hix5hd2"); +MODULE_ALIAS("platform:hix5hd2-i2c"); diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 958c8db4ec30..297e9c9ac943 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) return -ENOMEM; i2c->quirks = s3c24xx_get_device_quirks(pdev); + i2c->sysreg = ERR_PTR(-ENOENT); if (pdata) memcpy(i2c->pdata, pdata, sizeof(*pdata)); else diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c index 89d8aa1d2818..df12c57e6ce0 100644 --- a/drivers/iio/adc/twl6030-gpadc.c +++ b/drivers/iio/adc/twl6030-gpadc.c @@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = { module_platform_driver(twl6030_gpadc_driver); -MODULE_ALIAS("platform: " DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("Balaji T K <balajitk@ti.com>"); MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>"); MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com"); diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h index 0916bf6b6c31..73b189c1c0fb 100644 --- a/drivers/iio/imu/adis16400.h +++ b/drivers/iio/imu/adis16400.h @@ -139,6 +139,7 @@ #define ADIS16400_NO_BURST BIT(1) #define ADIS16400_HAS_SLOW_MODE BIT(2) #define ADIS16400_HAS_SERIAL_NUMBER BIT(3) +#define ADIS16400_BURST_DIAG_STAT BIT(4) struct adis16400_state; @@ -165,6 +166,7 @@ struct adis16400_state { int filt_int; struct adis adis; + unsigned long avail_scan_mask[2]; }; /* At the moment triggers are only used for ring buffer diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c index 6e727ffe5262..90c24a23c679 100644 --- a/drivers/iio/imu/adis16400_buffer.c +++ b/drivers/iio/imu/adis16400_buffer.c @@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, { struct adis16400_state *st = iio_priv(indio_dev); struct adis *adis = &st->adis; - uint16_t *tx; + unsigned int burst_length; + u8 *tx; if (st->variant->flags & ADIS16400_NO_BURST) return adis_update_scan_mode(indio_dev, scan_mask); @@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, kfree(adis->xfer); kfree(adis->buffer); + /* All but the timestamp channel */ + burst_length = (indio_dev->num_channels - 1) * sizeof(u16); + if (st->variant->flags & ADIS16400_BURST_DIAG_STAT) + burst_length += sizeof(u16); + adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL); if (!adis->xfer) return -ENOMEM; - adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16), - GFP_KERNEL); + adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); if (!adis->buffer) return -ENOMEM; - tx = adis->buffer + indio_dev->scan_bytes; - + tx = adis->buffer + burst_length; tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); tx[1] = 0; adis->xfer[0].tx_buf = tx; adis->xfer[0].bits_per_word = 8; adis->xfer[0].len = 2; - adis->xfer[1].tx_buf = tx; + adis->xfer[1].rx_buf = adis->buffer; adis->xfer[1].bits_per_word = 8; - adis->xfer[1].len = indio_dev->scan_bytes; + adis->xfer[1].len = burst_length; spi_message_init(&adis->msg); spi_message_add_tail(&adis->xfer[0], &adis->msg); @@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p) struct adis16400_state *st = iio_priv(indio_dev); struct adis *adis = &st->adis; u32 old_speed_hz = st->adis.spi->max_speed_hz; + void *buffer; int ret; if (!adis->buffer) @@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p) spi_setup(st->adis.spi); } - iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, + if (st->variant->flags & ADIS16400_BURST_DIAG_STAT) + buffer = adis->buffer + sizeof(u16); + else + buffer = adis->buffer; + + iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index fa795dcd5f75..2fd68f2219a7 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c @@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, *val = st->variant->temp_scale_nano / 1000000; *val2 = (st->variant->temp_scale_nano % 1000000); return IIO_VAL_INT_PLUS_MICRO; + case IIO_PRESSURE: + /* 20 uBar = 0.002kPascal */ + *val = 0; + *val2 = 2000; + return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } @@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, } } -#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \ +#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ - .channel = 0, \ + .channel = chn, \ .extend_name = name, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ @@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, } #define ADIS16400_SUPPLY_CHAN(addr, bits) \ - ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY) + ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0) #define ADIS16400_AUX_ADC_CHAN(addr, bits) \ - ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC) + ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1) #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \ .type = IIO_ANGL_VEL, \ @@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = { .channels = adis16448_channels, .num_channels = ARRAY_SIZE(adis16448_channels), .flags = ADIS16400_HAS_PROD_ID | - ADIS16400_HAS_SERIAL_NUMBER, + ADIS16400_HAS_SERIAL_NUMBER | + ADIS16400_BURST_DIAG_STAT, .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */ .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */ .temp_scale_nano = 73860000, /* 0.07386 C */ @@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = { .debugfs_reg_access = adis_debugfs_reg_access, }; -static const unsigned long adis16400_burst_scan_mask[] = { - ~0UL, - 0, -}; - static const char * const adis16400_status_error_msgs[] = { [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure", [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure", @@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = { BIT(ADIS16400_DIAG_STAT_POWER_LOW), }; +static void adis16400_setup_chan_mask(struct adis16400_state *st) +{ + const struct adis16400_chip_info *chip_info = st->variant; + unsigned i; + + for (i = 0; i < chip_info->num_channels; i++) { + const struct iio_chan_spec *ch = &chip_info->channels[i]; + + if (ch->scan_index >= 0 && + ch->scan_index != ADIS16400_SCAN_TIMESTAMP) + st->avail_scan_mask[0] |= BIT(ch->scan_index); + } +} + static int adis16400_probe(struct spi_device *spi) { struct adis16400_state *st; @@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi) indio_dev->info = &adis16400_info; indio_dev->modes = INDIO_DIRECT_MODE; - if (!(st->variant->flags & ADIS16400_NO_BURST)) - indio_dev->available_scan_masks = adis16400_burst_scan_mask; + if (!(st->variant->flags & ADIS16400_NO_BURST)) { + adis16400_setup_chan_mask(st); + indio_dev->available_scan_masks = st->avail_scan_mask; + } ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data); if (ret) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 3f40319a55da..575a072d765f 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -65,6 +65,8 @@ static int isert_rdma_accept(struct isert_conn *isert_conn); struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); +static void isert_release_work(struct work_struct *work); + static inline bool isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) { @@ -648,6 +650,7 @@ isert_init_conn(struct isert_conn *isert_conn) mutex_init(&isert_conn->mutex); spin_lock_init(&isert_conn->pool_lock); INIT_LIST_HEAD(&isert_conn->fr_pool); + INIT_WORK(&isert_conn->release_work, isert_release_work); } static void @@ -925,6 +928,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, { struct isert_np *isert_np = cma_id->context; struct isert_conn *isert_conn; + bool terminating = false; if (isert_np->np_cm_id == cma_id) return isert_np_cma_handler(cma_id->context, event); @@ -932,12 +936,25 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, isert_conn = cma_id->qp->qp_context; mutex_lock(&isert_conn->mutex); + terminating = (isert_conn->state == ISER_CONN_TERMINATING); isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->mutex); isert_info("conn %p completing wait\n", isert_conn); complete(&isert_conn->wait); + if (terminating) + goto out; + + mutex_lock(&isert_np->np_accept_mutex); + if (!list_empty(&isert_conn->accept_node)) { + list_del_init(&isert_conn->accept_node); + isert_put_conn(isert_conn); + queue_work(isert_release_wq, &isert_conn->release_work); + } + mutex_unlock(&isert_np->np_accept_mutex); + +out: return 0; } @@ -2380,7 +2397,6 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, page_off = offset % PAGE_SIZE; send_wr->sg_list = ib_sge; - send_wr->num_sge = sg_nents; send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; /* * Perform mapping of TCM scatterlist memory ib_sge dma_addr. @@ -2400,14 +2416,17 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, ib_sge->addr, ib_sge->length, ib_sge->lkey); page_off = 0; data_left -= ib_sge->length; + if (!data_left) + break; ib_sge++; isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); } + send_wr->num_sge = ++i; isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", send_wr->sg_list, send_wr->num_sge); - return sg_nents; + return send_wr->num_sge; } static int @@ -3366,7 +3385,6 @@ static void isert_wait_conn(struct iscsi_conn *conn) isert_wait4flush(isert_conn); isert_wait4logout(isert_conn); - INIT_WORK(&isert_conn->release_work, isert_release_work); queue_work(isert_release_wq, &isert_conn->release_work); } @@ -3374,6 +3392,7 @@ static void isert_free_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; + isert_wait4flush(isert_conn); isert_put_conn(isert_conn); } diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 7752bd59d4b7..a353b7de6d22 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1063,9 +1063,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse) right = (packet[1] & 0x02) >> 1; middle = (packet[1] & 0x04) >> 2; - /* Divide 2 since trackpoint's speed is too fast */ - input_report_rel(dev2, REL_X, (char)x / 2); - input_report_rel(dev2, REL_Y, -((char)y / 2)); + input_report_rel(dev2, REL_X, (char)x); + input_report_rel(dev2, REL_Y, -((char)y)); input_report_key(dev2, BTN_LEFT, left); input_report_key(dev2, BTN_RIGHT, right); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 79363b687195..ce3d40004458 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param) return true; /* - * Some models have a revision higher then 20. Meaning param[2] may - * be 10 or 20, skip the rates check for these. + * Some hw_version >= 4 models have a revision higher then 20. Meaning + * that param[2] may be 10 or 20, skip the rates check for these. */ - if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40) + if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f && + param[2] < 40) return true; for (i = 0; i < ARRAY_SIZE(rates); i++) @@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd) case 9: case 10: case 13: + case 14: etd->hw_version = 4; break; default: diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 630af73e98c4..35c8d0ceabee 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -151,6 +151,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = { 1024, 5112, 2024, 4832 }, { + (const char * const []){"LEN2000", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, + 1024, 5113, 2021, 4832 + }, + { (const char * const []){"LEN2001", NULL}, {ANY_BOARD_ID, ANY_BOARD_ID}, 1024, 5022, 2508, 4832 @@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0045", "LEN0047", "LEN0049", - "LEN2000", + "LEN2000", /* S540 */ "LEN2001", /* Edge E431 */ "LEN2002", /* Edge E531 */ "LEN2003", diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index cbe8c1f28a95..fffea87a014f 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2931,6 +2931,7 @@ static void *alloc_coherent(struct device *dev, size_t size, size = PAGE_ALIGN(size); dma_mask = dev->coherent_dma_mask; flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); + flag |= __GFP_ZERO; page = alloc_pages(flag | __GFP_NOWARN, get_order(size)); if (!page) { diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 68d43beccb7e..5ecfaf29933a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -422,6 +422,14 @@ static int dmar_map_gfx = 1; static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; +static int intel_iommu_ecs = 1; + +/* We only actually use ECS when PASID support (on the new bit 40) + * is also advertised. Some early implementations — the ones with + * PASID support on bit 28 — have issues even when we *only* use + * extended root/context tables. */ +#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ + ecap_pasid(iommu->ecap)) int intel_iommu_gfx_mapped; EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); @@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str) printk(KERN_INFO "Intel-IOMMU: disable supported super page\n"); intel_iommu_superpage = 0; + } else if (!strncmp(str, "ecs_off", 7)) { + printk(KERN_INFO + "Intel-IOMMU: disable extended context table support\n"); + intel_iommu_ecs = 0; } str += strcspn(str, ","); @@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu struct context_entry *context; u64 *entry; - if (ecap_ecs(iommu->ecap)) { + if (ecs_enabled(iommu)) { if (devfn >= 0x80) { devfn -= 0x80; entry = &root->hi; @@ -696,6 +708,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu return &context[devfn]; } +static int iommu_dummy(struct device *dev) +{ + return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; +} + static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) { struct dmar_drhd_unit *drhd = NULL; @@ -705,6 +722,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf u16 segment = 0; int i; + if (iommu_dummy(dev)) + return NULL; + if (dev_is_pci(dev)) { pdev = to_pci_dev(dev); segment = pci_domain_nr(pdev->bus); @@ -798,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu) if (context) free_pgtable_page(context); - if (!ecap_ecs(iommu->ecap)) + if (!ecs_enabled(iommu)) continue; context = iommu_context_addr(iommu, i, 0x80, 0); @@ -1133,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) unsigned long flag; addr = virt_to_phys(iommu->root_entry); - if (ecap_ecs(iommu->ecap)) + if (ecs_enabled(iommu)) addr |= DMA_RTADDR_RTT; raw_spin_lock_irqsave(&iommu->register_lock, flag); @@ -2969,11 +2989,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev) return __get_valid_domain_for_dev(dev); } -static int iommu_dummy(struct device *dev) -{ - return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; -} - /* Check if the dev needs to go through non-identity map and unmap process.*/ static int iommu_no_mapping(struct device *dev) { diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 57f09cb54464..269c2354c431 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -271,7 +271,7 @@ int gic_get_c0_fdc_int(void) GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); } -static void gic_handle_shared_int(void) +static void gic_handle_shared_int(bool chained) { unsigned int i, intr, virq; unsigned long *pcpu_mask; @@ -299,7 +299,10 @@ static void gic_handle_shared_int(void) while (intr != gic_shared_intrs) { virq = irq_linear_revmap(gic_irq_domain, GIC_SHARED_TO_HWIRQ(intr)); - do_IRQ(virq); + if (chained) + generic_handle_irq(virq); + else + do_IRQ(virq); /* go to next pending bit */ bitmap_clear(pending, intr, 1); @@ -431,7 +434,7 @@ static struct irq_chip gic_edge_irq_controller = { #endif }; -static void gic_handle_local_int(void) +static void gic_handle_local_int(bool chained) { unsigned long pending, masked; unsigned int intr, virq; @@ -445,7 +448,10 @@ static void gic_handle_local_int(void) while (intr != GIC_NUM_LOCAL_INTRS) { virq = irq_linear_revmap(gic_irq_domain, GIC_LOCAL_TO_HWIRQ(intr)); - do_IRQ(virq); + if (chained) + generic_handle_irq(virq); + else + do_IRQ(virq); /* go to next pending bit */ bitmap_clear(&pending, intr, 1); @@ -509,13 +515,14 @@ static struct irq_chip gic_all_vpes_local_irq_controller = { static void __gic_irq_dispatch(void) { - gic_handle_local_int(); - gic_handle_shared_int(); + gic_handle_local_int(false); + gic_handle_shared_int(false); } static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc) { - __gic_irq_dispatch(); + gic_handle_local_int(true); + gic_handle_shared_int(true); } #ifdef CONFIG_MIPS_GIC_IPI diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index 4a9ce5b50c5b..6b2b582433bd 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -104,7 +104,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) irqd_set_trigger_type(data, flow_type); irq_setup_alt_chip(data, flow_type); - for (i = 0; i <= gc->num_ct; i++, ct++) + for (i = 0; i < gc->num_ct; i++, ct++) if (ct->type & flow_type) ctrl_off = ct->regs.type; diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 30f2aef69d78..6a4cd771a2be 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -46,7 +46,7 @@ #include <asm/setup.h> #include <asm/lguest.h> #include <asm/uaccess.h> -#include <asm/i387.h> +#include <asm/fpu/internal.h> #include <asm/tlbflush.h> #include "../lg.h" @@ -251,7 +251,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) * we set it now, so we can trap and pass that trap to the Guest if it * uses the FPU. */ - if (cpu->ts && user_has_fpu()) + if (cpu->ts && fpregs_active()) stts(); /* @@ -283,7 +283,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); /* Clear the host TS bit if it was set above. */ - if (cpu->ts && user_has_fpu()) + if (cpu->ts && fpregs_active()) clts(); /* @@ -297,12 +297,12 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) /* * Similarly, if we took a trap because the Guest used the FPU, * we have to restore the FPU it expects to see. - * math_state_restore() may sleep and we may even move off to + * fpu__restore() may sleep and we may even move off to * a different CPU. So all the critical stuff should be done * before this. */ - else if (cpu->regs->trapnum == 7 && !user_has_fpu()) - math_state_restore(); + else if (cpu->regs->trapnum == 7 && !fpregs_active()) + fpu__restore(¤t->thread.fpu); } /*H:130 diff --git a/drivers/md/md.c b/drivers/md/md.c index 27506302eb7a..4dbed4a67aaf 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) err = -EBUSY; } spin_unlock(&mddev->lock); - return err; + return err ?: len; } err = mddev_lock(mddev); if (err) @@ -4217,13 +4217,14 @@ action_store(struct mddev *mddev, const char *page, size_t len) set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); else clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - flush_workqueue(md_misc_wq); - if (mddev->sync_thread) { - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - if (mddev_lock(mddev) == 0) { + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && + mddev_lock(mddev) == 0) { + flush_workqueue(md_misc_wq); + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); md_reap_sync_thread(mddev); - mddev_unlock(mddev); } + mddev_unlock(mddev); } } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) @@ -8261,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev) if (mddev_is_clustered(mddev)) md_cluster_ops->metadata_update_finish(mddev); clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e793ab6b3570..f55c3f35b746 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev) clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 553d54b87052..b6793d2e051f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7354,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev) clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev->sync_thread = md_register_thread(md_do_sync, mddev, diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 3ef0f90b128f..157099243d61 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -97,6 +97,7 @@ config MEDIA_CONTROLLER config MEDIA_CONTROLLER_DVB bool "Enable Media controller for DVB" depends on MEDIA_CONTROLLER + depends on BROKEN ---help--- Enable the media controller API support for DVB. diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index db84ddcfec84..9fd6c69a8bac 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -423,7 +423,7 @@ static void xgbe_tx_timer(unsigned long data) if (napi_schedule_prep(napi)) { /* Disable Tx and Rx interrupts */ if (pdata->per_channel_irq) - disable_irq(channel->dma_irq); + disable_irq_nosync(channel->dma_irq); else xgbe_disable_rx_tx_ints(pdata); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 77363d680532..a3b1c07ae0af 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2464,6 +2464,7 @@ err_out_powerdown: ssb_bus_may_powerdown(sdev->bus); err_out_free_dev: + netif_napi_del(&bp->napi); free_netdev(dev); out: @@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev) b44_unregister_phy_one(bp); ssb_device_disable(sdev, 0); ssb_bus_may_powerdown(sdev->bus); + netif_napi_del(&bp->napi); free_netdev(dev); ssb_pcihost_set_power_state(sdev, PCI_D3hot); ssb_set_drvdata(sdev, NULL); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index e7651b3c6c57..420949cc55aa 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) phy_name = "external RGMII (no delay)"; else phy_name = "external RGMII (TX delay)"; - reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); - reg |= RGMII_MODE_EN | id_mode_dis; - bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); break; @@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) return -EINVAL; } + /* This is an external PHY (xMII), so we need to enable the RGMII + * block for the interface to work + */ + if (priv->ext_phy) { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg |= RGMII_MODE_EN | id_mode_dis; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + } + if (init) dev_info(kdev, "configuring instance for %s\n", phy_name); diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 28d9ca675a27..68d47b196dae 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev, { struct enic *enic = netdev_priv(netdev); struct vnic_devcmd_fw_info *fw_info; + int err; - enic_dev_fw_info(enic, &fw_info); + err = enic_dev_fw_info(enic, &fw_info); + /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info + * For other failures, like devcmd failure, we return previously + * recorded info. + */ + if (err == -ENOMEM) + return; strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); @@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev, struct enic *enic = netdev_priv(netdev); struct vnic_stats *vstats; unsigned int i; - - enic_dev_stats_dump(enic, &vstats); + int err; + + err = enic_dev_stats_dump(enic, &vstats); + /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + * For other failures, like devcmd failure, we return previously + * recorded stats. + */ + if (err == -ENOMEM) + return; for (i = 0; i < enic_n_tx_stats; i++) *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 204bd182473b..eadae1b412c6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, { struct enic *enic = netdev_priv(netdev); struct vnic_stats *stats; + int err; - enic_dev_stats_dump(enic, &stats); + err = enic_dev_stats_dump(enic, &stats); + /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + * For other failures, like devcmd failure, we return previously + * recorded stats. + */ + if (err == -ENOMEM) + return net_stats; net_stats->tx_packets = stats->tx.tx_frames_ok; net_stats->tx_bytes = stats->tx.tx_bytes_ok; @@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ enic_calc_int_moderation(enic, &enic->rq[rq]); + enic_poll_unlock_napi(&enic->rq[rq]); if (work_done < work_to_do) { /* Some work done, but not enough to stay in polling, @@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) enic_set_int_moderation(enic, &enic->rq[rq]); vnic_intr_unmask(&enic->intr[intr]); } - enic_poll_unlock_napi(&enic->rq[rq]); return work_done; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index 36a2ed606c91..c4b2183bf352 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq, struct vnic_rq_buf *buf; u32 fetch_index; unsigned int count = rq->ring.desc_count; + int i; buf = rq->to_clean; - while (vnic_rq_desc_used(rq) > 0) { - + for (i = 0; i < rq->ring.desc_count; i++) { (*buf_clean)(rq, buf); - - buf = rq->to_clean = buf->next; - rq->ring.desc_avail++; + buf = buf->next; } + rq->ring.desc_avail = rq->ring.desc_count - 1; /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index fb140faeafb1..c5e1d0ac75f9 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1720,9 +1720,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) total_size = buf_len; get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; - get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, - get_fat_cmd.size, - &get_fat_cmd.dma); + get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + get_fat_cmd.size, + &get_fat_cmd.dma, GFP_ATOMIC); if (!get_fat_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure while reading FAT data\n"); @@ -1767,8 +1767,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) log_offset += buf_size; } err: - pci_free_consistent(adapter->pdev, get_fat_cmd.size, - get_fat_cmd.va, get_fat_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, + get_fat_cmd.va, get_fat_cmd.dma); spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2215,12 +2215,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, return -EINVAL; cmd.size = sizeof(struct be_cmd_resp_port_type); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); return -ENOMEM; } - memset(cmd.va, 0, cmd.size); spin_lock_bh(&adapter->mcc_lock); @@ -2245,7 +2245,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, } err: spin_unlock_bh(&adapter->mcc_lock); - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -2720,7 +2720,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) goto err; } cmd.size = sizeof(struct be_cmd_req_get_phy_info); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); status = -ENOMEM; @@ -2754,7 +2755,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) BE_SUPPORTED_SPEED_1GBPS; } } - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -2805,8 +2806,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); - attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, - &attribs_cmd.dma); + attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + attribs_cmd.size, + &attribs_cmd.dma, GFP_ATOMIC); if (!attribs_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); status = -ENOMEM; @@ -2833,8 +2835,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) err: mutex_unlock(&adapter->mbox_lock); if (attribs_cmd.va) - pci_free_consistent(adapter->pdev, attribs_cmd.size, - attribs_cmd.va, attribs_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size, + attribs_cmd.va, attribs_cmd.dma); return status; } @@ -2972,9 +2974,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); - get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, - get_mac_list_cmd.size, - &get_mac_list_cmd.dma); + get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + get_mac_list_cmd.size, + &get_mac_list_cmd.dma, + GFP_ATOMIC); if (!get_mac_list_cmd.va) { dev_err(&adapter->pdev->dev, @@ -3047,8 +3050,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, out: spin_unlock_bh(&adapter->mcc_lock); - pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, - get_mac_list_cmd.va, get_mac_list_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, + get_mac_list_cmd.va, get_mac_list_cmd.dma); return status; } @@ -3101,8 +3104,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_req_set_mac_list); - cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, - &cmd.dma, GFP_KERNEL); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_KERNEL); if (!cmd.va) return -ENOMEM; @@ -3291,7 +3294,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); status = -ENOMEM; @@ -3326,7 +3330,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) err: mutex_unlock(&adapter->mbox_lock); if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } @@ -3340,8 +3345,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, - &extfat_cmd.dma); + extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + extfat_cmd.size, &extfat_cmd.dma, + GFP_ATOMIC); if (!extfat_cmd.va) return -ENOMEM; @@ -3363,8 +3369,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); err: - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, - extfat_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); return status; } @@ -3377,8 +3383,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, - &extfat_cmd.dma); + extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + extfat_cmd.size, &extfat_cmd.dma, + GFP_ATOMIC); if (!extfat_cmd.va) { dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", @@ -3396,8 +3403,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) level = cfgs->module[0].trace_lvl[j].dbg_lvl; } } - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, - extfat_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); err: return level; } @@ -3595,7 +3602,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_get_func_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); status = -ENOMEM; @@ -3635,7 +3643,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) err: mutex_unlock(&adapter->mbox_lock); if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } @@ -3656,7 +3665,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_get_profile_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) return -ENOMEM; @@ -3702,7 +3712,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, res->vf_if_cap_flags = vf_res->cap_flags; err: if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } @@ -3717,7 +3728,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_req_set_profile_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) return -ENOMEM; @@ -3733,7 +3745,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, status = be_cmd_notify_wait(adapter, &wrb); if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index b765c24625bf..2835dee5dc39 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, int status = 0; read_cmd.size = LANCER_READ_FILE_CHUNK; - read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, - &read_cmd.dma); + read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, + &read_cmd.dma, GFP_ATOMIC); if (!read_cmd.va) { dev_err(&adapter->pdev->dev, @@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, break; } } - pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, - read_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va, + read_cmd.dma); return status; } @@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter) }; ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); - ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, - &ddrdma_cmd.dma, GFP_KERNEL); + ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + ddrdma_cmd.size, &ddrdma_cmd.dma, + GFP_KERNEL); if (!ddrdma_cmd.va) return -ENOMEM; @@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev, memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); - eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, - &eeprom_cmd.dma, GFP_KERNEL); + eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + eeprom_cmd.size, &eeprom_cmd.dma, + GFP_KERNEL); if (!eeprom_cmd.va) return -ENOMEM; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6f9ffb9026cd..e43cc8a73ea7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4605,8 +4605,8 @@ static int lancer_fw_download(struct be_adapter *adapter, flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + LANCER_FW_DOWNLOAD_CHUNK; - flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, - &flash_cmd.dma, GFP_KERNEL); + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, + &flash_cmd.dma, GFP_KERNEL); if (!flash_cmd.va) return -ENOMEM; @@ -4739,8 +4739,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) } flash_cmd.size = sizeof(struct be_cmd_write_flashrom); - flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, - GFP_KERNEL); + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, + GFP_KERNEL); if (!flash_cmd.va) return -ENOMEM; @@ -5291,16 +5291,15 @@ static int be_drv_init(struct be_adapter *adapter) int status = 0; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; - mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, - &mbox_mem_alloc->dma, - GFP_KERNEL); + mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, + &mbox_mem_alloc->dma, + GFP_KERNEL); if (!mbox_mem_alloc->va) return -ENOMEM; mbox_mem_align->size = sizeof(struct be_mcc_mailbox); mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); - memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); rx_filter->size = sizeof(struct be_cmd_req_rx_filter); rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 33c35d3b7420..5d47307121ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -317,6 +317,7 @@ struct i40e_pf { #endif #define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) #define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) +#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 34170eabca7d..da0faf478af0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } + /* By default we are in VEPA mode, if this is the first VF/VMDq + * VSI to be added switch to VEB mode. + */ + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, + BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } + vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); if (vsi) dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a54c14491e3b..5b5bea159bd5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6097,6 +6097,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; + if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + veb->bridge_mode = BRIDGE_MODE_VEB; + else + veb->bridge_mode = BRIDGE_MODE_VEPA; i40e_config_bridge_mode(veb); /* create the remaining VSIs attached to this VEB */ @@ -8031,7 +8035,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, } else if (mode != veb->bridge_mode) { /* Existing HW bridge but different mode needs reset */ veb->bridge_mode = mode; - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ + if (mode == BRIDGE_MODE_VEB) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + else + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); break; } } @@ -8343,11 +8352,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; - if (i40e_is_vsi_uplink_mode_veb(vsi)) { + if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && + (i40e_is_vsi_uplink_mode_veb(vsi))) { ctxt.info.valid_sections |= - cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = - cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; @@ -8746,6 +8756,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, __func__); return NULL; } + /* We come up by default in VEPA mode if SRIOV is not + * already enabled, in which case we can't force VEPA + * mode. + */ + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + veb->bridge_mode = BRIDGE_MODE_VEPA; + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + } i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { @@ -9856,6 +9874,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_switch_setup; } +#ifdef CONFIG_PCI_IOV + /* prep for VF support */ + if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + (pf->flags & I40E_FLAG_MSIX_ENABLED) && + !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + if (pci_num_vf(pdev)) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + } +#endif err = i40e_setup_pf_switch(pf, false); if (err) { dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 4bd3a80aba82..9d95042d5a0f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2410,14 +2410,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @tx_flags: collected send information - * @hdr_len: size of the packet header * * Note: Our HW can't scatter-gather more than 8 fragments to build * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, - const u8 hdr_len) +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) { struct skb_frag_struct *frag; bool linearize = false; @@ -2429,7 +2427,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, gso_segs = skb_shinfo(skb)->gso_segs; if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { - u16 j = 1; + u16 j = 0; if (num_frags < (I40E_MAX_BUFFER_TXD)) goto linearize_chk_done; @@ -2440,21 +2438,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, goto linearize_chk_done; } frag = &skb_shinfo(skb)->frags[0]; - size = hdr_len; /* we might still have more fragments per segment */ do { size += skb_frag_size(frag); frag++; j++; + if ((size >= skb_shinfo(skb)->gso_size) && + (j < I40E_MAX_BUFFER_TXD)) { + size = (size % skb_shinfo(skb)->gso_size); + j = (size) ? 1 : 0; + } if (j == I40E_MAX_BUFFER_TXD) { - if (size < skb_shinfo(skb)->gso_size) { - linearize = true; - break; - } - j = 1; - size -= skb_shinfo(skb)->gso_size; - if (size) - j++; - size += hdr_len; + linearize = true; + break; } num_frags--; } while (num_frags); @@ -2724,7 +2719,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; - if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (i40e_chk_linearize(skb, tx_flags)) if (skb_linearize(skb)) goto out_drop; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 78d1c4ff565e..4e9376da0518 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1018,11 +1018,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct i40e_pf *pf = pci_get_drvdata(pdev); - if (num_vfs) + if (num_vfs) { + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, + BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } return i40e_pci_sriov_enable(pdev, num_vfs); + } if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs(pf); + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); return -EINVAL; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index b077e02a0cc7..458fbb421090 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1619,14 +1619,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, * i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @tx_flags: collected send information - * @hdr_len: size of the packet header * * Note: Our HW can't scatter-gather more than 8 fragments to build * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, - const u8 hdr_len) +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) { struct skb_frag_struct *frag; bool linearize = false; @@ -1638,7 +1636,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, gso_segs = skb_shinfo(skb)->gso_segs; if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { - u16 j = 1; + u16 j = 0; if (num_frags < (I40E_MAX_BUFFER_TXD)) goto linearize_chk_done; @@ -1649,21 +1647,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, goto linearize_chk_done; } frag = &skb_shinfo(skb)->frags[0]; - size = hdr_len; /* we might still have more fragments per segment */ do { size += skb_frag_size(frag); frag++; j++; + if ((size >= skb_shinfo(skb)->gso_size) && + (j < I40E_MAX_BUFFER_TXD)) { + size = (size % skb_shinfo(skb)->gso_size); + j = (size) ? 1 : 0; + } if (j == I40E_MAX_BUFFER_TXD) { - if (size < skb_shinfo(skb)->gso_size) { - linearize = true; - break; - } - j = 1; - size -= skb_shinfo(skb)->gso_size; - if (size) - j++; - size += hdr_len; + linearize = true; + break; } num_frags--; } while (num_frags); @@ -1950,7 +1945,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; - if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (i40e_chk_linearize(skb, tx_flags)) if (skb_linearize(skb)) goto out_drop; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index e3b9b63ad010..c3a9392cbc19 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, igb->perout[i].start.tv_nsec = rq->perout.start.nsec; igb->perout[i].period.tv_sec = ts.tv_sec; igb->perout[i].period.tv_nsec = ts.tv_nsec; - wr32(trgttiml, rq->perout.start.sec); - wr32(trgttimh, rq->perout.start.nsec); + wr32(trgttimh, rq->perout.start.sec); + wr32(trgttiml, rq->perout.start.nsec); tsauxc |= tsauxc_mask; tsim |= tsim_mask; } else { diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 4b00545a3ace..65944dd8bf6b 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1304,7 +1304,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) if (!cpumask_test_cpu(cpu, thread_mask)) { ++count; cpumask_or(thread_mask, thread_mask, - topology_thread_cpumask(cpu)); + topology_sibling_cpumask(cpu)); } } diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c index cd29b1038c5e..15f9b7c9e4d3 100644 --- a/drivers/ntb/ntb_hw.c +++ b/drivers/ntb/ntb_hw.c @@ -1660,6 +1660,7 @@ static int ntb_atom_detect(struct ntb_device *ndev) u32 ppd; ndev->hw_type = BWD_HW; + ndev->limits.max_mw = BWD_MAX_MW; rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &ppd); if (rc) @@ -1778,7 +1779,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_warn(&pdev->dev, "Cannot remap BAR %d\n", MW_TO_BAR(i)); rc = -EIO; - goto err3; + goto err4; } } diff --git a/drivers/of/base.c b/drivers/of/base.c index 99764db0875a..f0650265febf 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -189,7 +189,7 @@ int __of_attach_node_sysfs(struct device_node *np) return 0; } -static int __init of_init(void) +void __init of_core_init(void) { struct device_node *np; @@ -198,7 +198,8 @@ static int __init of_init(void) of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); if (!of_kset) { mutex_unlock(&of_mutex); - return -ENOMEM; + pr_err("devicetree: failed to register existing nodes\n"); + return; } for_each_of_allnodes(np) __of_attach_node_sysfs(np); @@ -207,10 +208,7 @@ static int __init of_init(void) /* Symlink in /proc as required by userspace ABI */ if (of_root) proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); - - return 0; } -core_initcall(of_init); static struct property *__of_find_property(const struct device_node *np, const char *name, int *lenp) diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 3351ef408125..53826b84e0ec 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np) phandle = __of_get_property(np, "phandle", &sz); if (!phandle) phandle = __of_get_property(np, "linux,phandle", &sz); - if (IS_ENABLED(PPC_PSERIES) && !phandle) + if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle) phandle = __of_get_property(np, "ibm,phandle", &sz); np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 4fd0cacf7ca0..508cc56130e3 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -428,16 +428,19 @@ static void __assign_resources_sorted(struct list_head *head, * consistent. */ if (add_align > dev_res->res->start) { + resource_size_t r_size = resource_size(dev_res->res); + dev_res->res->start = add_align; - dev_res->res->end = add_align + - resource_size(dev_res->res); + dev_res->res->end = add_align + r_size - 1; list_for_each_entry(dev_res2, head, list) { align = pci_resource_alignment(dev_res2->dev, dev_res2->res); - if (add_align > align) + if (add_align > align) { list_move_tail(&dev_res->list, &dev_res2->list); + break; + } } } diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index a53bd5b52df9..fc9b9f0ea91e 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -38,7 +38,9 @@ config ARMADA375_USBCLUSTER_PHY config PHY_DM816X_USB tristate "TI dm816x USB PHY driver" depends on ARCH_OMAP2PLUS + depends on USB_SUPPORT select GENERIC_PHY + select USB_PHY help Enable this for dm816x USB to work. @@ -97,8 +99,9 @@ config OMAP_CONTROL_PHY config OMAP_USB2 tristate "OMAP USB2 PHY Driver" depends on ARCH_OMAP2PLUS - depends on USB_PHY + depends on USB_SUPPORT select GENERIC_PHY + select USB_PHY select OMAP_CONTROL_PHY depends on OMAP_OCP2SCP help @@ -122,8 +125,9 @@ config TI_PIPE3 config TWL4030_USB tristate "TWL4030 USB Transceiver Driver" depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS - depends on USB_PHY + depends on USB_SUPPORT select GENERIC_PHY + select USB_PHY help Enable this to support the USB OTG transceiver on TWL4030 family chips (including the TWL5030 and TPS659x0 devices). @@ -304,7 +308,7 @@ config PHY_STIH41X_USB config PHY_QCOM_UFS tristate "Qualcomm UFS PHY driver" - depends on OF && ARCH_MSM + depends on OF && ARCH_QCOM select GENERIC_PHY help Support for UFS PHY on QCOM chipsets. diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 3791838f4bd4..63bc12d7a73e 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -530,7 +530,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string) { struct phy *phy = phy_get(dev, string); - if (PTR_ERR(phy) == -ENODEV) + if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV)) phy = NULL; return phy; @@ -584,7 +584,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string) { struct phy *phy = devm_phy_get(dev, string); - if (PTR_ERR(phy) == -ENODEV) + if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV)) phy = NULL; return phy; diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c index 183ef4368101..c1a468686bdc 100644 --- a/drivers/phy/phy-omap-usb2.c +++ b/drivers/phy/phy-omap-usb2.c @@ -275,6 +275,7 @@ static int omap_usb2_probe(struct platform_device *pdev) phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); if (IS_ERR(phy->wkupclk)) { dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n"); + pm_runtime_disable(phy->dev); return PTR_ERR(phy->wkupclk); } else { dev_warn(&pdev->dev, diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c index 778276aba3aa..97d45f47d1ad 100644 --- a/drivers/phy/phy-rcar-gen2.c +++ b/drivers/phy/phy-rcar-gen2.c @@ -23,7 +23,7 @@ #define USBHS_LPSTS 0x02 #define USBHS_UGCTRL 0x80 #define USBHS_UGCTRL2 0x84 -#define USBHS_UGSTS 0x88 /* The manuals have 0x90 */ +#define USBHS_UGSTS 0x88 /* From technical update */ /* Low Power Status register (LPSTS) */ #define USBHS_LPSTS_SUSPM 0x4000 @@ -41,7 +41,7 @@ #define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030 /* USB General status register (UGSTS) */ -#define USBHS_UGSTS_LOCK 0x00000300 /* The manuals have 0x3 */ +#define USBHS_UGSTS_LOCK 0x00000100 /* From technical update */ #define PHYS_PER_CHANNEL 2 diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index bcdb22d5e215..3c1850332a90 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -4,6 +4,7 @@ config MTK_PMIC_WRAP tristate "MediaTek PMIC Wrapper Support" depends on ARCH_MEDIATEK + depends on RESET_CONTROLLER select REGMAP help Say yes here to add support for MediaTek PMIC Wrapper found diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index db5be1eec54c..f432291feee9 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -443,11 +443,6 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp, static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { int ret; - u32 val; - - val = pwrap_readl(wrp, PWRAP_WACS2_RDATA); - if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR) - pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); if (ret) @@ -462,11 +457,6 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { int ret; - u32 val; - - val = pwrap_readl(wrp, PWRAP_WACS2_RDATA); - if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR) - pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); if (ret) @@ -480,6 +470,8 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA)); + pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); + return 0; } @@ -563,45 +555,17 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp) static int pwrap_init_reg_clock(struct pmic_wrapper *wrp) { - unsigned long rate_spi; - int ck_mhz; - - rate_spi = clk_get_rate(wrp->clk_spi); - - if (rate_spi > 26000000) - ck_mhz = 26; - else if (rate_spi > 18000000) - ck_mhz = 18; - else - ck_mhz = 0; - - switch (ck_mhz) { - case 18: - if (pwrap_is_mt8135(wrp)) - pwrap_writel(wrp, 0xc, PWRAP_CSHEXT); - pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE); - pwrap_writel(wrp, 0xc, PWRAP_CSHEXT_READ); - pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START); - pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END); - break; - case 26: - if (pwrap_is_mt8135(wrp)) - pwrap_writel(wrp, 0x4, PWRAP_CSHEXT); + if (pwrap_is_mt8135(wrp)) { + pwrap_writel(wrp, 0x4, PWRAP_CSHEXT); pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE); pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ); pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START); pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END); - break; - case 0: - if (pwrap_is_mt8135(wrp)) - pwrap_writel(wrp, 0xf, PWRAP_CSHEXT); - pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_WRITE); - pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_READ); - pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_START); - pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_END); - break; - default: - return -EINVAL; + } else { + pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE); + pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ); + pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START); + pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END); } return 0; diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index 09428412139e..c5352ea4821e 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -621,8 +621,8 @@ static u32 ssb_pmu_get_alp_clock_clk0(struct ssb_chipcommon *cc) u32 crystalfreq; const struct pmu0_plltab_entry *e = NULL; - crystalfreq = chipco_read32(cc, SSB_CHIPCO_PMU_CTL) & - SSB_CHIPCO_PMU_CTL_XTALFREQ >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT; + crystalfreq = (chipco_read32(cc, SSB_CHIPCO_PMU_CTL) & + SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT; e = pmu0_plltab_find_entry(crystalfreq); BUG_ON(!e); return e->freq * 1000; @@ -634,7 +634,7 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc) switch (bus->chip_id) { case 0x5354: - ssb_pmu_get_alp_clock_clk0(cc); + return ssb_pmu_get_alp_clock_clk0(cc); default: ssb_err("ERROR: PMU alp clock unknown for device %04X\n", bus->chip_id); diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h index 3925db160650..513c81f43d6e 100644 --- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h +++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h @@ -189,22 +189,7 @@ static inline int ll_quota_off(struct super_block *sb, int off, int remount) #endif - -/* - * After 3.1, kernel's nameidata.intent.open.flags is different - * with lustre's lookup_intent.it_flags, as lustre's it_flags' - * lower bits equal to FMODE_xxx while kernel doesn't transliterate - * lower bits of nameidata.intent.open.flags to FMODE_xxx. - * */ #include <linux/version.h> -static inline int ll_namei_to_lookup_intent_flag(int flag) -{ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) - flag = (flag & ~O_ACCMODE) | OPEN_FMODE(flag); -#endif - return flag; -} - #include <linux/fs.h> # define ll_umode_t umode_t diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c index cc3ab351943e..f9262243f935 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c +++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c @@ -87,7 +87,7 @@ static void cfs_cpu_core_siblings(int cpu, cpumask_t *mask) /* return cpumask of HTs in the same core */ static void cfs_cpu_ht_siblings(int cpu, cpumask_t *mask) { - cpumask_copy(mask, topology_thread_cpumask(cpu)); + cpumask_copy(mask, topology_sibling_cpumask(cpu)); } static void cfs_node_to_cpumask(int node, cpumask_t *mask) diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index 5f918e3c4683..528af9011653 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -57,12 +57,6 @@ #define VM_FAULT_RETRY 0 #endif -/* Kernel 3.1 kills LOOKUP_CONTINUE, LOOKUP_PARENT is equivalent to it. - * seem kernel commit 49084c3bb2055c401f3493c13edae14d49128ca0 */ -#ifndef LOOKUP_CONTINUE -#define LOOKUP_CONTINUE LOOKUP_PARENT -#endif - /** Only used on client-side for indicating the tail of dir hash/offset. */ #define LL_DIR_END_OFF 0x7fffffffffffffffULL #define LL_DIR_END_OFF_32BIT 0x7fffffffUL diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c index 3711e671a4df..69b203651905 100644 --- a/drivers/staging/lustre/lustre/llite/symlink.c +++ b/drivers/staging/lustre/lustre/llite/symlink.c @@ -118,7 +118,7 @@ failed: return rc; } -static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd) +static const char *ll_follow_link(struct dentry *dentry, void **cookie) { struct inode *inode = d_inode(dentry); struct ptlrpc_request *request = NULL; @@ -126,32 +126,22 @@ static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd) char *symname = NULL; CDEBUG(D_VFSTRACE, "VFS Op\n"); - /* Limit the recursive symlink depth to 5 instead of default - * 8 links when kernel has 4k stack to prevent stack overflow. - * For 8k stacks we need to limit it to 7 for local servers. */ - if (THREAD_SIZE < 8192 && current->link_count >= 6) { - rc = -ELOOP; - } else if (THREAD_SIZE == 8192 && current->link_count >= 8) { - rc = -ELOOP; - } else { - ll_inode_size_lock(inode); - rc = ll_readlink_internal(inode, &request, &symname); - ll_inode_size_unlock(inode); - } + ll_inode_size_lock(inode); + rc = ll_readlink_internal(inode, &request, &symname); + ll_inode_size_unlock(inode); if (rc) { ptlrpc_req_finished(request); - request = NULL; - symname = ERR_PTR(rc); + return ERR_PTR(rc); } - nd_set_link(nd, symname); /* symname may contain a pointer to the request message buffer, * we delay request releasing until ll_put_link then. */ - return request; + *cookie = request; + return symname; } -static void ll_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) +static void ll_put_link(struct inode *unused, void *cookie) { ptlrpc_req_finished(cookie); } diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c index 8e61421515cb..344189ac5698 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c @@ -557,7 +557,7 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, * there are. */ /* weight is # of HTs */ - if (cpumask_weight(topology_thread_cpumask(0)) > 1) { + if (cpumask_weight(topology_sibling_cpumask(0)) > 1) { /* depress thread factor for hyper-thread */ factor = factor - (factor >> 1) + (factor >> 3); } @@ -2768,7 +2768,7 @@ int ptlrpc_hr_init(void) init_waitqueue_head(&ptlrpc_hr.hr_waitq); - weight = cpumask_weight(topology_thread_cpumask(0)); + weight = cpumask_weight(topology_sibling_cpumask(0)); cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { hrp->hrp_cpt = i; diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c index 5ff4716b72c3..784b5ecfa849 100644 --- a/drivers/staging/ozwpan/ozhcd.c +++ b/drivers/staging/ozwpan/ozhcd.c @@ -746,8 +746,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport) /* * Context: softirq */ -void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc, - int length, int offset, int total_size) +void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc, + u8 length, u16 offset, u16 total_size) { struct oz_port *port = hport; struct urb *urb; @@ -759,8 +759,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc, if (!urb) return; if (status == 0) { - int copy_len; - int required_size = urb->transfer_buffer_length; + unsigned int copy_len; + unsigned int required_size = urb->transfer_buffer_length; if (required_size > total_size) required_size = total_size; diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h index 4249fa374012..d2a6085345be 100644 --- a/drivers/staging/ozwpan/ozusbif.h +++ b/drivers/staging/ozwpan/ozusbif.h @@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd); /* Confirmation functions. */ -void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, - const u8 *desc, int length, int offset, int total_size); +void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, + const u8 *desc, u8 length, u16 offset, u16 total_size); void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data, int data_len); diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c index d434d8c6fff6..f660bb198c65 100644 --- a/drivers/staging/ozwpan/ozusbsvc1.c +++ b/drivers/staging/ozwpan/ozusbsvc1.c @@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx, struct oz_multiple_fixed *body = (struct oz_multiple_fixed *)data_hdr; u8 *data = body->data; - int n = (len - sizeof(struct oz_multiple_fixed)+1) + unsigned int n; + if (!body->unit_size || + len < sizeof(struct oz_multiple_fixed) - 1) + break; + n = (len - (sizeof(struct oz_multiple_fixed) - 1)) / body->unit_size; while (n--) { oz_hcd_data_ind(usb_ctx->hport, body->endpoint, @@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) case OZ_GET_DESC_RSP: { struct oz_get_desc_rsp *body = (struct oz_get_desc_rsp *)usb_hdr; - int data_len = elt->length - - sizeof(struct oz_get_desc_rsp) + 1; - u16 offs = le16_to_cpu(get_unaligned(&body->offset)); - u16 total_size = + u16 offs, total_size; + u8 data_len; + + if (elt->length < sizeof(struct oz_get_desc_rsp) - 1) + break; + data_len = elt->length - + (sizeof(struct oz_get_desc_rsp) - 1); + offs = le16_to_cpu(get_unaligned(&body->offset)); + total_size = le16_to_cpu(get_unaligned(&body->total_size)); oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c index f1d47a0676c3..ada8d5dafd49 100644 --- a/drivers/staging/rtl8712/rtl8712_led.c +++ b/drivers/staging/rtl8712/rtl8712_led.c @@ -898,11 +898,11 @@ static void SwLedControlMode1(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -921,11 +921,11 @@ static void SwLedControlMode1(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedLinkBlinkInProgress = true; @@ -946,15 +946,15 @@ static void SwLedControlMode1(struct _adapter *padapter, if (IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -975,11 +975,11 @@ static void SwLedControlMode1(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } pLed->bLedBlinkInProgress = true; @@ -998,19 +998,19 @@ static void SwLedControlMode1(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1025,23 +1025,23 @@ static void SwLedControlMode1(struct _adapter *padapter, break; case LED_CTL_STOP_WPS: if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); else pLed->bLedWPSBlinkInProgress = true; pLed->CurrLedState = LED_BLINK_WPS_STOP; @@ -1057,7 +1057,7 @@ static void SwLedControlMode1(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1073,23 +1073,23 @@ static void SwLedControlMode1(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1116,7 +1116,7 @@ static void SwLedControlMode2(struct _adapter *padapter, return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1154,11 +1154,11 @@ static void SwLedControlMode2(struct _adapter *padapter, pLed->CurrLedState = LED_ON; pLed->BlinkingLedState = LED_ON; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } @@ -1170,11 +1170,11 @@ static void SwLedControlMode2(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1214,15 +1214,15 @@ static void SwLedControlMode2(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1248,7 +1248,7 @@ static void SwLedControlMode3(struct _adapter *padapter, if (IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1286,11 +1286,11 @@ static void SwLedControlMode3(struct _adapter *padapter, pLed->CurrLedState = LED_ON; pLed->BlinkingLedState = LED_ON; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1300,11 +1300,11 @@ static void SwLedControlMode3(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1319,7 +1319,7 @@ static void SwLedControlMode3(struct _adapter *padapter, break; case LED_CTL_STOP_WPS: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } else pLed->bLedWPSBlinkInProgress = true; @@ -1336,7 +1336,7 @@ static void SwLedControlMode3(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->CurrLedState = LED_OFF; @@ -1357,15 +1357,15 @@ static void SwLedControlMode3(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1388,7 +1388,7 @@ static void SwLedControlMode4(struct _adapter *padapter, case LED_CTL_START_TO_LINK: if (pLed1->bLedWPSBlinkInProgress) { pLed1->bLedWPSBlinkInProgress = false; - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); pLed1->BlinkingLedState = LED_OFF; pLed1->CurrLedState = LED_OFF; if (pLed1->bLedOn) @@ -1400,11 +1400,11 @@ static void SwLedControlMode4(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } pLed->bLedStartToLinkBlinkInProgress = true; @@ -1426,7 +1426,7 @@ static void SwLedControlMode4(struct _adapter *padapter, if (LedAction == LED_CTL_LINK) { if (pLed1->bLedWPSBlinkInProgress) { pLed1->bLedWPSBlinkInProgress = false; - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); pLed1->BlinkingLedState = LED_OFF; pLed1->CurrLedState = LED_OFF; if (pLed1->bLedOn) @@ -1439,7 +1439,7 @@ static void SwLedControlMode4(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1460,11 +1460,11 @@ static void SwLedControlMode4(struct _adapter *padapter, if (IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1485,7 +1485,7 @@ static void SwLedControlMode4(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } pLed->bLedBlinkInProgress = true; @@ -1503,7 +1503,7 @@ static void SwLedControlMode4(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed1->bLedWPSBlinkInProgress) { pLed1->bLedWPSBlinkInProgress = false; - del_timer_sync(&(pLed1->BlinkTimer)); + del_timer(&pLed1->BlinkTimer); pLed1->BlinkingLedState = LED_OFF; pLed1->CurrLedState = LED_OFF; if (pLed1->bLedOn) @@ -1512,15 +1512,15 @@ static void SwLedControlMode4(struct _adapter *padapter, } if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1538,7 +1538,7 @@ static void SwLedControlMode4(struct _adapter *padapter, break; case LED_CTL_STOP_WPS: /*WPS connect success*/ if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1552,7 +1552,7 @@ static void SwLedControlMode4(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL: /*WPS authentication fail*/ if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1565,7 +1565,7 @@ static void SwLedControlMode4(struct _adapter *padapter, msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); /*LED1 settings*/ if (pLed1->bLedWPSBlinkInProgress) - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); else pLed1->bLedWPSBlinkInProgress = true; pLed1->CurrLedState = LED_BLINK_WPS_STOP; @@ -1578,7 +1578,7 @@ static void SwLedControlMode4(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL_OVERLAP: /*WPS session overlap*/ if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1591,7 +1591,7 @@ static void SwLedControlMode4(struct _adapter *padapter, msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); /*LED1 settings*/ if (pLed1->bLedWPSBlinkInProgress) - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); else pLed1->bLedWPSBlinkInProgress = true; pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP; @@ -1607,31 +1607,31 @@ static void SwLedControlMode4(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedStartToLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedStartToLinkBlinkInProgress = false; } if (pLed1->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); pLed1->bLedWPSBlinkInProgress = false; } pLed1->BlinkingLedState = LED_UNKNOWN; @@ -1671,7 +1671,7 @@ static void SwLedControlMode5(struct _adapter *padapter, ; /* dummy branch */ else if (pLed->bLedScanBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1705,7 +1705,7 @@ static void SwLedControlMode5(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } SwLedOff(padapter, pLed); @@ -1756,7 +1756,7 @@ static void SwLedControlMode6(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1772,7 +1772,7 @@ static void SwLedControlMode6(struct _adapter *padapter, case LED_CTL_STOP_WPS_FAIL: case LED_CTL_STOP_WPS: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->CurrLedState = LED_ON; @@ -1784,11 +1784,11 @@ static void SwLedControlMode6(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } SwLedOff(padapter, pLed); diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c index 1a1c38f885d6..e35854d28f90 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.c +++ b/drivers/staging/rtl8712/rtl871x_cmd.c @@ -910,7 +910,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter, if (pcmd->res != H2C_SUCCESS) mod_timer(&pmlmepriv->assoc_timer, jiffies + msecs_to_jiffies(1)); - del_timer_sync(&pmlmepriv->assoc_timer); + del_timer(&pmlmepriv->assoc_timer); #ifdef __BIG_ENDIAN /* endian_convert */ pnetwork->Length = le32_to_cpu(pnetwork->Length); diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c index fb2b195b90af..c044b0e55ba9 100644 --- a/drivers/staging/rtl8712/rtl871x_mlme.c +++ b/drivers/staging/rtl8712/rtl871x_mlme.c @@ -582,7 +582,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf) spin_lock_irqsave(&pmlmepriv->lock, irqL); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) { - del_timer_sync(&pmlmepriv->scan_to_timer); + del_timer(&pmlmepriv->scan_to_timer); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); } @@ -696,7 +696,7 @@ void r8712_ind_disconnect(struct _adapter *padapter) } if (padapter->pwrctrlpriv.pwr_mode != padapter->registrypriv.power_mgnt) { - del_timer_sync(&pmlmepriv->dhcp_timer); + del_timer(&pmlmepriv->dhcp_timer); r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt, padapter->registrypriv.smart_ps); } @@ -910,7 +910,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf) if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) r8712_indicate_connect(adapter); - del_timer_sync(&pmlmepriv->assoc_timer); + del_timer(&pmlmepriv->assoc_timer); } else goto ignore_joinbss_callback; } else { diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c index aaa584435c87..9bc04f474d18 100644 --- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c +++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c @@ -103,7 +103,7 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter, if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80)) return; - del_timer_sync(&padapter->pwrctrlpriv.rpwm_check_timer); + del_timer(&padapter->pwrctrlpriv.rpwm_check_timer); _enter_pwrlock(&pwrpriv->lock); pwrpriv->cpwm = (preportpwrstate->state) & 0xf; if (pwrpriv->cpwm >= PS_STATE_S2) { diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c index 7bb96c47f188..a9b93d0f6f56 100644 --- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c +++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c @@ -198,7 +198,7 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta) * cancel reordering_ctrl_timer */ for (i = 0; i < 16; i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; - del_timer_sync(&preorder_ctrl->reordering_ctrl_timer); + del_timer(&preorder_ctrl->reordering_ctrl_timer); } spin_lock(&(pfree_sta_queue->lock)); /* insert into free_sta_queue; 20061114 */ diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index cc57a3a6b02b..396344cb011f 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x, return put_user(x, ptr); } +static inline int tty_copy_to_user(struct tty_struct *tty, + void __user *to, + const void *from, + unsigned long n) +{ + struct n_tty_data *ldata = tty->disc_data; + + tty_audit_add_data(tty, to, n, ldata->icanon); + return copy_to_user(to, from, n); +} + /** * n_tty_kick_worker - start input worker (if required) * @tty: terminal @@ -2070,8 +2081,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, size = N_TTY_BUF_SIZE - tail; n = eol - tail; - if (n > 4096) - n += 4096; + if (n > N_TTY_BUF_SIZE) + n += N_TTY_BUF_SIZE; n += found; c = n; @@ -2084,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, __func__, eol, found, n, c, size, more); if (n > size) { - ret = copy_to_user(*b, read_buf_addr(ldata, tail), size); + ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size); if (ret) return -EFAULT; - ret = copy_to_user(*b + size, ldata->read_buf, n - size); + ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size); } else - ret = copy_to_user(*b, read_buf_addr(ldata, tail), n); + ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n); if (ret) return -EFAULT; diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 9289999cb7c6..dce1a23706e8 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -562,12 +562,36 @@ static irqreturn_t omap_wake_irq(int irq, void *dev_id) return IRQ_NONE; } +#ifdef CONFIG_SERIAL_8250_DMA +static int omap_8250_dma_handle_irq(struct uart_port *port); +#endif + +static irqreturn_t omap8250_irq(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct uart_8250_port *up = up_to_u8250p(port); + unsigned int iir; + int ret; + +#ifdef CONFIG_SERIAL_8250_DMA + if (up->dma) { + ret = omap_8250_dma_handle_irq(port); + return IRQ_RETVAL(ret); + } +#endif + + serial8250_rpm_get(up); + iir = serial_port_in(port, UART_IIR); + ret = serial8250_handle_irq(port, iir); + serial8250_rpm_put(up); + + return IRQ_RETVAL(ret); +} + static int omap_8250_startup(struct uart_port *port) { - struct uart_8250_port *up = - container_of(port, struct uart_8250_port, port); + struct uart_8250_port *up = up_to_u8250p(port); struct omap8250_priv *priv = port->private_data; - int ret; if (priv->wakeirq) { @@ -580,10 +604,31 @@ static int omap_8250_startup(struct uart_port *port) pm_runtime_get_sync(port->dev); - ret = serial8250_do_startup(port); - if (ret) + up->mcr = 0; + serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + + serial_out(up, UART_LCR, UART_LCR_WLEN8); + + up->lsr_saved_flags = 0; + up->msr_saved_flags = 0; + + if (up->dma) { + ret = serial8250_request_dma(up); + if (ret) { + dev_warn_ratelimited(port->dev, + "failed to request DMA\n"); + up->dma = NULL; + } + } + + ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED, + dev_name(port->dev), port); + if (ret < 0) goto err; + up->ier = UART_IER_RLSI | UART_IER_RDI; + serial_out(up, UART_IER, up->ier); + #ifdef CONFIG_PM up->capabilities |= UART_CAP_RPM; #endif @@ -610,8 +655,7 @@ err: static void omap_8250_shutdown(struct uart_port *port) { - struct uart_8250_port *up = - container_of(port, struct uart_8250_port, port); + struct uart_8250_port *up = up_to_u8250p(port); struct omap8250_priv *priv = port->private_data; flush_work(&priv->qos_work); @@ -621,11 +665,24 @@ static void omap_8250_shutdown(struct uart_port *port) pm_runtime_get_sync(port->dev); serial_out(up, UART_OMAP_WER, 0); - serial8250_do_shutdown(port); + + up->ier = 0; + serial_out(up, UART_IER, 0); + + if (up->dma) + serial8250_release_dma(up); + + /* + * Disable break condition and FIFOs + */ + if (up->lcr & UART_LCR_SBC) + serial_out(up, UART_LCR, up->lcr & ~UART_LCR_SBC); + serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); pm_runtime_mark_last_busy(port->dev); pm_runtime_put_autosuspend(port->dev); + free_irq(port->irq, port); if (priv->wakeirq) free_irq(priv->wakeirq, port); } @@ -974,6 +1031,13 @@ static inline int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir) } #endif +static int omap8250_no_handle_irq(struct uart_port *port) +{ + /* IRQ has not been requested but handling irq? */ + WARN_ONCE(1, "Unexpected irq handling before port startup\n"); + return 0; +} + static int omap8250_probe(struct platform_device *pdev) { struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1075,6 +1139,7 @@ static int omap8250_probe(struct platform_device *pdev) pm_runtime_get_sync(&pdev->dev); omap_serial_fill_features_erratas(&up, priv); + up.port.handle_irq = omap8250_no_handle_irq; #ifdef CONFIG_SERIAL_8250_DMA if (pdev->dev.of_node) { /* @@ -1088,7 +1153,6 @@ static int omap8250_probe(struct platform_device *pdev) ret = of_property_count_strings(pdev->dev.of_node, "dma-names"); if (ret == 2) { up.dma = &priv->omap8250_dma; - up.port.handle_irq = omap_8250_dma_handle_irq; priv->omap8250_dma.fn = the_no_dma_filter_fn; priv->omap8250_dma.tx_dma = omap_8250_tx_dma; priv->omap8250_dma.rx_dma = omap_8250_rx_dma; diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 6f5a0720a8c8..763eb20fe321 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1249,20 +1249,19 @@ __acquires(&uap->port.lock) /* * Transmit a character - * There must be at least one free entry in the TX FIFO to accept the char. * - * Returns true if the FIFO might have space in it afterwards; - * returns false if the FIFO definitely became full. + * Returns true if the character was successfully queued to the FIFO. + * Returns false otherwise. */ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c) { + if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) + return false; /* unable to transmit character */ + writew(c, uap->port.membase + UART01x_DR); uap->port.icount.tx++; - if (likely(uap->tx_irq_seen > 1)) - return true; - - return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF); + return true; } static bool pl011_tx_chars(struct uart_amba_port *uap) @@ -1296,7 +1295,8 @@ static bool pl011_tx_chars(struct uart_amba_port *uap) return false; if (uap->port.x_char) { - pl011_tx_char(uap, uap->port.x_char); + if (!pl011_tx_char(uap, uap->port.x_char)) + goto done; uap->port.x_char = 0; --count; } diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index c8cfa0637128..88250395b0ce 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -911,6 +911,14 @@ static void dma_rx_callback(void *data) status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state); count = RX_BUF_SIZE - state.residue; + + if (readl(sport->port.membase + USR2) & USR2_IDLE) { + /* In condition [3] the SDMA counted up too early */ + count--; + + writel(USR2_IDLE, sport->port.membase + USR2); + } + dev_dbg(sport->port.dev, "We get %d bytes.\n", count); if (count) { diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index fdab715a0631..c0eafa6fd403 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -339,7 +339,7 @@ #define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c #define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10 -#define DWC3_DGCMD_STATUS(n) (((n) >> 15) & 1) +#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F) #define DWC3_DGCMD_CMDACT (1 << 10) #define DWC3_DGCMD_CMDIOC (1 << 8) @@ -355,7 +355,7 @@ #define DWC3_DEPCMD_PARAM_SHIFT 16 #define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT) #define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) -#define DWC3_DEPCMD_STATUS(x) (((x) >> 15) & 1) +#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F) #define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) #define DWC3_DEPCMD_CMDACT (1 << 10) #define DWC3_DEPCMD_CMDIOC (1 << 8) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 6bdb57069044..3507f880eb74 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -315,7 +315,6 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, return ret; } - set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags); return len; } break; @@ -847,7 +846,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) ret = ep->status; if (io_data->read && ret > 0) { ret = copy_to_iter(data, ret, &io_data->data); - if (unlikely(iov_iter_count(&io_data->data))) + if (!ret) ret = -EFAULT; } } @@ -1463,8 +1462,7 @@ static void ffs_data_clear(struct ffs_data *ffs) { ENTER(); - if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags)) - ffs_closed(ffs); + ffs_closed(ffs); BUG_ON(ffs->gadget); @@ -3422,9 +3420,13 @@ static int ffs_ready(struct ffs_data *ffs) ffs_obj->desc_ready = true; ffs_obj->ffs_data = ffs; - if (ffs_obj->ffs_ready_callback) + if (ffs_obj->ffs_ready_callback) { ret = ffs_obj->ffs_ready_callback(ffs); + if (ret) + goto done; + } + set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags); done: ffs_dev_unlock(); return ret; @@ -3443,7 +3445,8 @@ static void ffs_closed(struct ffs_data *ffs) ffs_obj->desc_ready = false; - if (ffs_obj->ffs_closed_callback) + if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) && + ffs_obj->ffs_closed_callback) ffs_obj->ffs_closed_callback(ffs); if (!ffs_obj->opts || ffs_obj->opts->no_configfs diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 259b656c0b3e..6316aa5b1c49 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -973,7 +973,13 @@ static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page) int result; mutex_lock(&opts->lock); - result = strlcpy(page, opts->id, PAGE_SIZE); + if (opts->id) { + result = strlcpy(page, opts->id, PAGE_SIZE); + } else { + page[0] = 0; + result = 0; + } + mutex_unlock(&opts->lock); return result; diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c index 9719abfb6145..7856b3394494 100644 --- a/drivers/usb/gadget/function/f_uac1.c +++ b/drivers/usb/gadget/function/f_uac1.c @@ -588,7 +588,10 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt) if (intf == 1) { if (alt == 1) { - config_ep_by_speed(cdev->gadget, f, out_ep); + err = config_ep_by_speed(cdev->gadget, f, out_ep); + if (err) + return err; + usb_ep_enable(out_ep); out_ep->driver_data = audio; audio->copy_buf = f_audio_buffer_alloc(audio_buf_size); diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c index 7b9ef7e257d2..e821931c965c 100644 --- a/drivers/usb/gadget/legacy/g_ffs.c +++ b/drivers/usb/gadget/legacy/g_ffs.c @@ -304,8 +304,10 @@ static int functionfs_ready_callback(struct ffs_data *ffs) gfs_registered = true; ret = usb_composite_probe(&gfs_driver); - if (unlikely(ret < 0)) + if (unlikely(ret < 0)) { + ++missing_funcs; gfs_registered = false; + } return ret; } diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c index b808951491cc..99fd9a5667df 100644 --- a/drivers/usb/gadget/udc/s3c2410_udc.c +++ b/drivers/usb/gadget/udc/s3c2410_udc.c @@ -1487,7 +1487,7 @@ static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on) dprintk(DEBUG_NORMAL, "%s()\n", __func__); - s3c2410_udc_set_pullup(udc, is_on ? 0 : 1); + s3c2410_udc_set_pullup(udc, is_on); return 0; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index ec8ac1674854..36bf089b708f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3682,18 +3682,21 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); unsigned long flags; - int ret; + int ret, slot_id; struct xhci_command *command; command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); if (!command) return 0; + /* xhci->slot_id and xhci->addr_dev are not thread-safe */ + mutex_lock(&xhci->mutex); spin_lock_irqsave(&xhci->lock, flags); command->completion = &xhci->addr_dev; ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); + mutex_unlock(&xhci->mutex); xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); kfree(command); return 0; @@ -3702,8 +3705,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) spin_unlock_irqrestore(&xhci->lock, flags); wait_for_completion(command->completion); + slot_id = xhci->slot_id; + mutex_unlock(&xhci->mutex); - if (!xhci->slot_id || command->status != COMP_SUCCESS) { + if (!slot_id || command->status != COMP_SUCCESS) { xhci_err(xhci, "Error while assigning device slot ID\n"); xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", HCS_MAX_SLOTS( @@ -3728,11 +3733,11 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) * xhci_discover_or_reset_device(), which may be called as part of * mass storage driver error handling. */ - if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { + if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); goto disable_slot; } - udev->slot_id = xhci->slot_id; + udev->slot_id = slot_id; #ifndef CONFIG_USB_DEFAULT_PERSIST /* @@ -3778,12 +3783,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, struct xhci_slot_ctx *slot_ctx; struct xhci_input_control_ctx *ctrl_ctx; u64 temp_64; - struct xhci_command *command; + struct xhci_command *command = NULL; + + mutex_lock(&xhci->mutex); if (!udev->slot_id) { xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Bad Slot ID %d", udev->slot_id); - return -EINVAL; + ret = -EINVAL; + goto out; } virt_dev = xhci->devs[udev->slot_id]; @@ -3796,7 +3804,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, */ xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", udev->slot_id); - return -EINVAL; + ret = -EINVAL; + goto out; } if (setup == SETUP_CONTEXT_ONLY) { @@ -3804,13 +3813,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == SLOT_STATE_DEFAULT) { xhci_dbg(xhci, "Slot already in default state\n"); - return 0; + goto out; } } command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); - if (!command) - return -ENOMEM; + if (!command) { + ret = -ENOMEM; + goto out; + } command->in_ctx = virt_dev->in_ctx; command->completion = &xhci->addr_dev; @@ -3820,8 +3831,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, if (!ctrl_ctx) { xhci_warn(xhci, "%s: Could not get input context, bad type.\n", __func__); - kfree(command); - return -EINVAL; + ret = -EINVAL; + goto out; } /* * If this is the first Set Address since device plug-in or @@ -3848,8 +3859,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg_trace(xhci, trace_xhci_dbg_address, "FIXME: allocate a command ring segment"); - kfree(command); - return ret; + goto out; } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); @@ -3896,10 +3906,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, ret = -EINVAL; break; } - if (ret) { - kfree(command); - return ret; - } + if (ret) + goto out; temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Op regs DCBAA ptr = %#016llx", temp_64); @@ -3932,8 +3940,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Internal device address = %d", le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); +out: + mutex_unlock(&xhci->mutex); kfree(command); - return 0; + return ret; } int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) @@ -4855,6 +4865,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) return 0; } + mutex_init(&xhci->mutex); xhci->cap_regs = hcd->regs; xhci->op_regs = hcd->regs + HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); @@ -5011,4 +5022,12 @@ static int __init xhci_hcd_init(void) BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); return 0; } + +/* + * If an init function is provided, an exit function must also be provided + * to allow module unload. + */ +static void __exit xhci_hcd_fini(void) { } + module_init(xhci_hcd_init); +module_exit(xhci_hcd_fini); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index ea75e8ccd3c1..6977f8491fa7 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1497,6 +1497,8 @@ struct xhci_hcd { struct list_head lpm_failed_devs; /* slot enabling and address device helpers */ + /* these are not thread safe so use mutex */ + struct mutex mutex; struct completion addr_dev; int slot_id; /* For USB 3.0 LPM enable/disable. */ diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 3789b08ef67b..6dca3d794ced 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2021,13 +2021,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) if (musb->ops->quirks) musb->io.quirks = musb->ops->quirks; - /* At least tusb6010 has it's own offsets.. */ - if (musb->ops->ep_offset) - musb->io.ep_offset = musb->ops->ep_offset; - if (musb->ops->ep_select) - musb->io.ep_select = musb->ops->ep_select; - - /* ..and some devices use indexed offset or flat offset */ + /* Most devices use indexed offset or flat offset */ if (musb->io.quirks & MUSB_INDEXED_EP) { musb->io.ep_offset = musb_indexed_ep_offset; musb->io.ep_select = musb_indexed_ep_select; @@ -2036,6 +2030,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb->io.ep_select = musb_flat_ep_select; } + /* At least tusb6010 has its own offsets */ + if (musb->ops->ep_offset) + musb->io.ep_offset = musb->ops->ep_offset; + if (musb->ops->ep_select) + musb->io.ep_select = musb->ops->ep_select; + if (musb->ops->fifo_mode) fifo_mode = musb->ops->fifo_mode; else diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c index 7225d526df04..03ab0c699f74 100644 --- a/drivers/usb/phy/phy-ab8500-usb.c +++ b/drivers/usb/phy/phy-ab8500-usb.c @@ -1179,7 +1179,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev, } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_link_status_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, + IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT, "usb-link-status", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for link status irq\n"); @@ -1195,7 +1195,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev, } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_disconnect_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, + IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT, "usb-id-fall", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for ID fall irq\n"); @@ -1211,7 +1211,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev, } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_disconnect_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, + IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT, "usb-vbus-fall", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for Vbus fall irq\n"); diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c index 845f658276b1..2b28443d07b9 100644 --- a/drivers/usb/phy/phy-tahvo.c +++ b/drivers/usb/phy/phy-tahvo.c @@ -401,7 +401,8 @@ static int tahvo_usb_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, tu); tu->irq = platform_get_irq(pdev, 0); - ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0, + ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, + IRQF_ONESHOT, "tahvo-vbus", tu); if (ret) { dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n", diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 8597cf9cfceb..c0f5c652d272 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -611,6 +611,8 @@ struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = { static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) { struct usbhs_pipe *pipe = pkt->pipe; + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); if (usbhs_pipe_is_busy(pipe)) return 0; @@ -624,6 +626,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) usbhs_pipe_data_sequence(pipe, pkt->sequence); pkt->sequence = -1; /* -1 sequence will be ignored */ + if (usbhs_pipe_is_dcp(pipe)) + usbhsf_fifo_clear(pipe, fifo); + usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); usbhs_pipe_enable(pipe); usbhs_pipe_running(pipe, 1); @@ -673,7 +678,14 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done) *is_done = 1; usbhsf_rx_irq_ctrl(pipe, 0); usbhs_pipe_running(pipe, 0); - usbhs_pipe_disable(pipe); /* disable pipe first */ + /* + * If function mode, since this controller is possible to enter + * Control Write status stage at this timing, this driver + * should not disable the pipe. If such a case happens, this + * controller is not able to complete the status stage. + */ + if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe)) + usbhs_pipe_disable(pipe); /* disable pipe first */ } /* @@ -1227,15 +1239,21 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo, { char name[16]; - snprintf(name, sizeof(name), "tx%d", channel); - fifo->tx_chan = dma_request_slave_channel_reason(dev, name); - if (IS_ERR(fifo->tx_chan)) - fifo->tx_chan = NULL; - - snprintf(name, sizeof(name), "rx%d", channel); - fifo->rx_chan = dma_request_slave_channel_reason(dev, name); - if (IS_ERR(fifo->rx_chan)) - fifo->rx_chan = NULL; + /* + * To avoid complex handing for DnFIFOs, the driver uses each + * DnFIFO as TX or RX direction (not bi-direction). + * So, the driver uses odd channels for TX, even channels for RX. + */ + snprintf(name, sizeof(name), "ch%d", channel); + if (channel & 1) { + fifo->tx_chan = dma_request_slave_channel_reason(dev, name); + if (IS_ERR(fifo->tx_chan)) + fifo->tx_chan = NULL; + } else { + fifo->rx_chan = dma_request_slave_channel_reason(dev, name); + if (IS_ERR(fifo->rx_chan)) + fifo->rx_chan = NULL; + } } static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 9031750e7404..ffd739e31bfc 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8eb68a31cab6..4c8b3b82103d 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -699,6 +699,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) }, { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 4e4f46f3c89c..792e054126de 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -155,6 +155,7 @@ #define XSENS_AWINDA_STATION_PID 0x0101 #define XSENS_AWINDA_DONGLE_PID 0x0102 #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ +#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */ #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ /* Xsens devices using FTDI VID */ diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index e894eb278d83..eba1b7ac7294 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { + cpumask_clear(mask); cpumask_set_cpu(cpu, mask); irq_set_affinity_hint(irq, mask); } |