diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 2 | ||||
-rw-r--r-- | kernel/cpuset.c | 4 | ||||
-rw-r--r-- | kernel/exec_domain.c | 2 | ||||
-rw-r--r-- | kernel/irq/manage.c | 39 | ||||
-rw-r--r-- | kernel/kmod.c | 2 | ||||
-rw-r--r-- | kernel/module.c | 336 | ||||
-rw-r--r-- | kernel/power/Kconfig | 2 | ||||
-rw-r--r-- | kernel/rcuclassic.c | 2 | ||||
-rw-r--r-- | kernel/rcupreempt.c | 10 | ||||
-rw-r--r-- | kernel/rtmutex-tester.c | 7 | ||||
-rw-r--r-- | kernel/sched.c | 44 | ||||
-rw-r--r-- | kernel/sched_fair.c | 2 | ||||
-rw-r--r-- | kernel/sched_rt.c | 4 | ||||
-rw-r--r-- | kernel/softlockup.c | 45 | ||||
-rw-r--r-- | kernel/stop_machine.c | 3 | ||||
-rw-r--r-- | kernel/sys_ni.c | 1 | ||||
-rw-r--r-- | kernel/sysctl.c | 24 | ||||
-rw-r--r-- | kernel/taskstats.c | 4 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 12 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 3 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 14 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 4 | ||||
-rw-r--r-- | kernel/workqueue.c | 6 |
24 files changed, 359 insertions, 217 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 033603c1d7c3..2cc409ce0a8f 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -441,7 +441,7 @@ void __ref enable_nonboot_cpus(void) goto out; printk("Enabling non-boot CPUs ...\n"); - for_each_cpu_mask(cpu, frozen_cpus) { + for_each_cpu_mask_nr(cpu, frozen_cpus) { error = _cpu_up(cpu, 1); if (!error) { printk("CPU%d is up\n", cpu); diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 3c3ef02f65f1..d5738910c34c 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -679,7 +679,9 @@ restart: if (apn == b->pn) { cpus_or(*dp, *dp, b->cpus_allowed); b->pn = -1; - update_domain_attr(dattr, b); + if (dattr) + update_domain_attr(dattr + + nslot, b); } } nslot++; diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c index a9e6bad9f706..c1ef192aa655 100644 --- a/kernel/exec_domain.c +++ b/kernel/exec_domain.c @@ -65,7 +65,7 @@ lookup_exec_domain(u_long personality) goto out; } -#ifdef CONFIG_KMOD +#ifdef CONFIG_MODULES read_unlock(&exec_domains_lock); request_module("personality-%ld", pers); read_lock(&exec_domains_lock); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 77a51be36010..3cfc0fefb5ee 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -217,6 +217,17 @@ void enable_irq(unsigned int irq) } EXPORT_SYMBOL(enable_irq); +int set_irq_wake_real(unsigned int irq, unsigned int on) +{ + struct irq_desc *desc = irq_desc + irq; + int ret = -ENXIO; + + if (desc->chip->set_wake) + ret = desc->chip->set_wake(irq, on); + + return ret; +} + /** * set_irq_wake - control irq power management wakeup * @irq: interrupt to control @@ -233,30 +244,34 @@ int set_irq_wake(unsigned int irq, unsigned int on) { struct irq_desc *desc = irq_desc + irq; unsigned long flags; - int ret = -ENXIO; - int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake; + int ret = 0; /* wakeup-capable irqs can be shared between drivers that * don't need to have the same sleep mode behaviors. */ spin_lock_irqsave(&desc->lock, flags); if (on) { - if (desc->wake_depth++ == 0) - desc->status |= IRQ_WAKEUP; - else - set_wake = NULL; + if (desc->wake_depth++ == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 0; + else + desc->status |= IRQ_WAKEUP; + } } else { if (desc->wake_depth == 0) { printk(KERN_WARNING "Unbalanced IRQ %d " "wake disable\n", irq); WARN_ON(1); - } else if (--desc->wake_depth == 0) - desc->status &= ~IRQ_WAKEUP; - else - set_wake = NULL; + } else if (--desc->wake_depth == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 1; + else + desc->status &= ~IRQ_WAKEUP; + } } - if (set_wake) - ret = desc->chip->set_wake(irq, on); + spin_unlock_irqrestore(&desc->lock, flags); return ret; } diff --git a/kernel/kmod.c b/kernel/kmod.c index 8df97d3dfda8..90d7af1c1655 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -42,7 +42,7 @@ extern int max_threads; static struct workqueue_struct *khelper_wq; -#ifdef CONFIG_KMOD +#ifdef CONFIG_MODULES /* modprobe_path is set via /proc/sys. diff --git a/kernel/module.c b/kernel/module.c index 5f80478b746d..d8b5605132a0 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -70,6 +70,9 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq); static BLOCKING_NOTIFIER_HEAD(module_notify_list); +/* Bounds of module allocation, for speeding __module_text_address */ +static unsigned long module_addr_min = -1UL, module_addr_max = 0; + int register_module_notifier(struct notifier_block * nb) { return blocking_notifier_chain_register(&module_notify_list, nb); @@ -134,17 +137,19 @@ extern const struct kernel_symbol __start___ksymtab_gpl[]; extern const struct kernel_symbol __stop___ksymtab_gpl[]; extern const struct kernel_symbol __start___ksymtab_gpl_future[]; extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; -extern const struct kernel_symbol __start___ksymtab_unused[]; -extern const struct kernel_symbol __stop___ksymtab_unused[]; -extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; -extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; extern const struct kernel_symbol __start___ksymtab_gpl_future[]; extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; extern const unsigned long __start___kcrctab[]; extern const unsigned long __start___kcrctab_gpl[]; extern const unsigned long __start___kcrctab_gpl_future[]; +#ifdef CONFIG_UNUSED_SYMBOLS +extern const struct kernel_symbol __start___ksymtab_unused[]; +extern const struct kernel_symbol __stop___ksymtab_unused[]; +extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; +extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; extern const unsigned long __start___kcrctab_unused[]; extern const unsigned long __start___kcrctab_unused_gpl[]; +#endif #ifndef CONFIG_MODVERSIONS #define symversion(base, idx) NULL @@ -152,156 +157,186 @@ extern const unsigned long __start___kcrctab_unused_gpl[]; #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) #endif -/* lookup symbol in given range of kernel_symbols */ -static const struct kernel_symbol *lookup_symbol(const char *name, - const struct kernel_symbol *start, - const struct kernel_symbol *stop) -{ - const struct kernel_symbol *ks = start; - for (; ks < stop; ks++) - if (strcmp(ks->name, name) == 0) - return ks; - return NULL; -} - -static bool always_ok(bool gplok, bool warn, const char *name) -{ - return true; -} - -static bool printk_unused_warning(bool gplok, bool warn, const char *name) -{ - if (warn) { - printk(KERN_WARNING "Symbol %s is marked as UNUSED, " - "however this module is using it.\n", name); - printk(KERN_WARNING - "This symbol will go away in the future.\n"); - printk(KERN_WARNING - "Please evalute if this is the right api to use and if " - "it really is, submit a report the linux kernel " - "mailinglist together with submitting your code for " - "inclusion.\n"); - } - return true; -} - -static bool gpl_only_unused_warning(bool gplok, bool warn, const char *name) -{ - if (!gplok) - return false; - return printk_unused_warning(gplok, warn, name); -} - -static bool gpl_only(bool gplok, bool warn, const char *name) -{ - return gplok; -} - -static bool warn_if_not_gpl(bool gplok, bool warn, const char *name) -{ - if (!gplok && warn) { - printk(KERN_WARNING "Symbol %s is being used " - "by a non-GPL module, which will not " - "be allowed in the future\n", name); - printk(KERN_WARNING "Please see the file " - "Documentation/feature-removal-schedule.txt " - "in the kernel source tree for more details.\n"); - } - return true; -} - struct symsearch { const struct kernel_symbol *start, *stop; const unsigned long *crcs; - bool (*check)(bool gplok, bool warn, const char *name); + enum { + NOT_GPL_ONLY, + GPL_ONLY, + WILL_BE_GPL_ONLY, + } licence; + bool unused; }; -/* Look through this array of symbol tables for a symbol match which - * passes the check function. */ -static const struct kernel_symbol *search_symarrays(const struct symsearch *arr, - unsigned int num, - const char *name, - bool gplok, - bool warn, - const unsigned long **crc) +static bool each_symbol_in_section(const struct symsearch *arr, + unsigned int arrsize, + struct module *owner, + bool (*fn)(const struct symsearch *syms, + struct module *owner, + unsigned int symnum, void *data), + void *data) { - unsigned int i; - const struct kernel_symbol *ks; + unsigned int i, j; - for (i = 0; i < num; i++) { - ks = lookup_symbol(name, arr[i].start, arr[i].stop); - if (!ks || !arr[i].check(gplok, warn, name)) - continue; - - if (crc) - *crc = symversion(arr[i].crcs, ks - arr[i].start); - return ks; + for (j = 0; j < arrsize; j++) { + for (i = 0; i < arr[j].stop - arr[j].start; i++) + if (fn(&arr[j], owner, i, data)) + return true; } - return NULL; + + return false; } -/* Find a symbol, return value, (optional) crc and (optional) module - * which owns it */ -static unsigned long find_symbol(const char *name, - struct module **owner, - const unsigned long **crc, - bool gplok, - bool warn) +/* Returns true as soon as fn returns true, otherwise false. */ +static bool each_symbol(bool (*fn)(const struct symsearch *arr, + struct module *owner, + unsigned int symnum, void *data), + void *data) { struct module *mod; - const struct kernel_symbol *ks; const struct symsearch arr[] = { { __start___ksymtab, __stop___ksymtab, __start___kcrctab, - always_ok }, + NOT_GPL_ONLY, false }, { __start___ksymtab_gpl, __stop___ksymtab_gpl, - __start___kcrctab_gpl, gpl_only }, + __start___kcrctab_gpl, + GPL_ONLY, false }, { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, - __start___kcrctab_gpl_future, warn_if_not_gpl }, + __start___kcrctab_gpl_future, + WILL_BE_GPL_ONLY, false }, +#ifdef CONFIG_UNUSED_SYMBOLS { __start___ksymtab_unused, __stop___ksymtab_unused, - __start___kcrctab_unused, printk_unused_warning }, + __start___kcrctab_unused, + NOT_GPL_ONLY, true }, { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, - __start___kcrctab_unused_gpl, gpl_only_unused_warning }, + __start___kcrctab_unused_gpl, + GPL_ONLY, true }, +#endif }; - /* Core kernel first. */ - ks = search_symarrays(arr, ARRAY_SIZE(arr), name, gplok, warn, crc); - if (ks) { - if (owner) - *owner = NULL; - return ks->value; - } + if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) + return true; - /* Now try modules. */ list_for_each_entry(mod, &modules, list) { struct symsearch arr[] = { { mod->syms, mod->syms + mod->num_syms, mod->crcs, - always_ok }, + NOT_GPL_ONLY, false }, { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, - mod->gpl_crcs, gpl_only }, + mod->gpl_crcs, + GPL_ONLY, false }, { mod->gpl_future_syms, mod->gpl_future_syms + mod->num_gpl_future_syms, - mod->gpl_future_crcs, warn_if_not_gpl }, + mod->gpl_future_crcs, + WILL_BE_GPL_ONLY, false }, +#ifdef CONFIG_UNUSED_SYMBOLS { mod->unused_syms, mod->unused_syms + mod->num_unused_syms, - mod->unused_crcs, printk_unused_warning }, + mod->unused_crcs, + NOT_GPL_ONLY, true }, { mod->unused_gpl_syms, mod->unused_gpl_syms + mod->num_unused_gpl_syms, - mod->unused_gpl_crcs, gpl_only_unused_warning }, + mod->unused_gpl_crcs, + GPL_ONLY, true }, +#endif }; - ks = search_symarrays(arr, ARRAY_SIZE(arr), - name, gplok, warn, crc); - if (ks) { - if (owner) - *owner = mod; - return ks->value; + if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) + return true; + } + return false; +} + +struct find_symbol_arg { + /* Input */ + const char *name; + bool gplok; + bool warn; + + /* Output */ + struct module *owner; + const unsigned long *crc; + unsigned long value; +}; + +static bool find_symbol_in_section(const struct symsearch *syms, + struct module *owner, + unsigned int symnum, void *data) +{ + struct find_symbol_arg *fsa = data; + + if (strcmp(syms->start[symnum].name, fsa->name) != 0) + return false; + + if (!fsa->gplok) { + if (syms->licence == GPL_ONLY) + return false; + if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) { + printk(KERN_WARNING "Symbol %s is being used " + "by a non-GPL module, which will not " + "be allowed in the future\n", fsa->name); + printk(KERN_WARNING "Please see the file " + "Documentation/feature-removal-schedule.txt " + "in the kernel source tree for more details.\n"); } } +#ifdef CONFIG_UNUSED_SYMBOLS + if (syms->unused && fsa->warn) { + printk(KERN_WARNING "Symbol %s is marked as UNUSED, " + "however this module is using it.\n", fsa->name); + printk(KERN_WARNING + "This symbol will go away in the future.\n"); + printk(KERN_WARNING + "Please evalute if this is the right api to use and if " + "it really is, submit a report the linux kernel " + "mailinglist together with submitting your code for " + "inclusion.\n"); + } +#endif + + fsa->owner = owner; + fsa->crc = symversion(syms->crcs, symnum); + fsa->value = syms->start[symnum].value; + return true; +} + +/* Find a symbol, return value, (optional) crc and (optional) module + * which owns it */ +static unsigned long find_symbol(const char *name, + struct module **owner, + const unsigned long **crc, + bool gplok, + bool warn) +{ + struct find_symbol_arg fsa; + + fsa.name = name; + fsa.gplok = gplok; + fsa.warn = warn; + + if (each_symbol(find_symbol_in_section, &fsa)) { + if (owner) + *owner = fsa.owner; + if (crc) + *crc = fsa.crc; + return fsa.value; + } + DEBUGP("Failed to find symbol %s\n", name); return -ENOENT; } +/* lookup symbol in given range of kernel_symbols */ +static const struct kernel_symbol *lookup_symbol(const char *name, + const struct kernel_symbol *start, + const struct kernel_symbol *stop) +{ + const struct kernel_symbol *ks = start; + for (; ks < stop; ks++) + if (strcmp(ks->name, name) == 0) + return ks; + return NULL; +} + /* Search for module by name: must hold module_mutex. */ static struct module *find_module(const char *name) { @@ -639,8 +674,8 @@ static int __try_stop_module(void *_sref) { struct stopref *sref = _sref; - /* If it's not unused, quit unless we are told to block. */ - if ((sref->flags & O_NONBLOCK) && module_refcount(sref->mod) != 0) { + /* If it's not unused, quit unless we're forcing. */ + if (module_refcount(sref->mod) != 0) { if (!(*sref->forced = try_force_unload(sref->flags))) return -EWOULDBLOCK; } @@ -652,9 +687,16 @@ static int __try_stop_module(void *_sref) static int try_stop_module(struct module *mod, int flags, int *forced) { - struct stopref sref = { mod, flags, forced }; + if (flags & O_NONBLOCK) { + struct stopref sref = { mod, flags, forced }; - return stop_machine_run(__try_stop_module, &sref, NR_CPUS); + return stop_machine_run(__try_stop_module, &sref, NR_CPUS); + } else { + /* We don't need to stop the machine for this. */ + mod->state = MODULE_STATE_GOING; + synchronize_sched(); + return 0; + } } unsigned int module_refcount(struct module *mod) @@ -1445,8 +1487,10 @@ static int verify_export_symbols(struct module *mod) { mod->syms, mod->num_syms }, { mod->gpl_syms, mod->num_gpl_syms }, { mod->gpl_future_syms, mod->num_gpl_future_syms }, +#ifdef CONFIG_UNUSED_SYMBOLS { mod->unused_syms, mod->num_unused_syms }, { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, +#endif }; for (i = 0; i < ARRAY_SIZE(arr); i++) { @@ -1526,7 +1570,7 @@ static int simplify_symbols(Elf_Shdr *sechdrs, } /* Update size with this section: return offset. */ -static long get_offset(unsigned long *size, Elf_Shdr *sechdr) +static long get_offset(unsigned int *size, Elf_Shdr *sechdr) { long ret; @@ -1738,6 +1782,20 @@ static inline void add_kallsyms(struct module *mod, } #endif /* CONFIG_KALLSYMS */ +static void *module_alloc_update_bounds(unsigned long size) +{ + void *ret = module_alloc(size); + + if (ret) { + /* Update module bounds. */ + if ((unsigned long)ret < module_addr_min) + module_addr_min = (unsigned long)ret; + if ((unsigned long)ret + size > module_addr_max) + module_addr_max = (unsigned long)ret + size; + } + return ret; +} + /* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ static struct module *load_module(void __user *umod, @@ -1764,10 +1822,12 @@ static struct module *load_module(void __user *umod, unsigned int gplfutureindex; unsigned int gplfuturecrcindex; unsigned int unwindex = 0; +#ifdef CONFIG_UNUSED_SYMBOLS unsigned int unusedindex; unsigned int unusedcrcindex; unsigned int unusedgplindex; unsigned int unusedgplcrcindex; +#endif unsigned int markersindex; unsigned int markersstringsindex; struct module *mod; @@ -1850,13 +1910,15 @@ static struct module *load_module(void __user *umod, exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab"); gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl"); gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future"); - unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused"); - unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl"); crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab"); gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl"); gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future"); +#ifdef CONFIG_UNUSED_SYMBOLS + unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused"); + unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl"); unusedcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused"); unusedgplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl"); +#endif setupindex = find_sec(hdr, sechdrs, secstrings, "__param"); exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table"); obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm"); @@ -1935,7 +1997,7 @@ static struct module *load_module(void __user *umod, layout_sections(mod, hdr, sechdrs, secstrings); /* Do the allocs. */ - ptr = module_alloc(mod->core_size); + ptr = module_alloc_update_bounds(mod->core_size); if (!ptr) { err = -ENOMEM; goto free_percpu; @@ -1943,7 +2005,7 @@ static struct module *load_module(void __user *umod, memset(ptr, 0, mod->core_size); mod->module_core = ptr; - ptr = module_alloc(mod->init_size); + ptr = module_alloc_update_bounds(mod->init_size); if (!ptr && mod->init_size) { err = -ENOMEM; goto free_core; @@ -2018,14 +2080,15 @@ static struct module *load_module(void __user *umod, mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr; mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size / sizeof(*mod->gpl_future_syms); - mod->num_unused_syms = sechdrs[unusedindex].sh_size / - sizeof(*mod->unused_syms); - mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size / - sizeof(*mod->unused_gpl_syms); mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr; if (gplfuturecrcindex) mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr; +#ifdef CONFIG_UNUSED_SYMBOLS + mod->num_unused_syms = sechdrs[unusedindex].sh_size / + sizeof(*mod->unused_syms); + mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size / + sizeof(*mod->unused_gpl_syms); mod->unused_syms = (void *)sechdrs[unusedindex].sh_addr; if (unusedcrcindex) mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr; @@ -2033,13 +2096,17 @@ static struct module *load_module(void __user *umod, if (unusedgplcrcindex) mod->unused_gpl_crcs = (void *)sechdrs[unusedgplcrcindex].sh_addr; +#endif #ifdef CONFIG_MODVERSIONS - if ((mod->num_syms && !crcindex) || - (mod->num_gpl_syms && !gplcrcindex) || - (mod->num_gpl_future_syms && !gplfuturecrcindex) || - (mod->num_unused_syms && !unusedcrcindex) || - (mod->num_unused_gpl_syms && !unusedgplcrcindex)) { + if ((mod->num_syms && !crcindex) + || (mod->num_gpl_syms && !gplcrcindex) + || (mod->num_gpl_future_syms && !gplfuturecrcindex) +#ifdef CONFIG_UNUSED_SYMBOLS + || (mod->num_unused_syms && !unusedcrcindex) + || (mod->num_unused_gpl_syms && !unusedgplcrcindex) +#endif + ) { printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name); err = try_to_force_load(mod, "nocrc"); if (err) @@ -2512,7 +2579,7 @@ static int m_show(struct seq_file *m, void *p) struct module *mod = list_entry(p, struct module, list); char buf[8]; - seq_printf(m, "%s %lu", + seq_printf(m, "%s %u", mod->name, mod->init_size + mod->core_size); print_unload_info(m, mod); @@ -2595,6 +2662,9 @@ struct module *__module_text_address(unsigned long addr) { struct module *mod; + if (addr < module_addr_min || addr > module_addr_max) + return NULL; + list_for_each_entry(mod, &modules, list) if (within(addr, mod->module_init, mod->init_text_size) || within(addr, mod->module_core, mod->core_text_size)) diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index b45da40e8d25..59dfdf1e1d20 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -82,7 +82,7 @@ config PM_SLEEP_SMP config PM_SLEEP bool - depends on SUSPEND || HIBERNATION + depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE default y config SUSPEND diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 16eeeaa9d618..6f8696c502f4 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c @@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp, */ cpus_and(cpumask, rcp->cpumask, cpu_online_map); cpu_clear(rdp->cpu, cpumask); - for_each_cpu_mask(cpu, cpumask) + for_each_cpu_mask_nr(cpu, cpumask) smp_send_reschedule(cpu); } } diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 6f62b77d93c4..27827931ca0d 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c @@ -756,7 +756,7 @@ rcu_try_flip_idle(void) /* Now ask each CPU for acknowledgement of the flip. */ - for_each_cpu_mask(cpu, rcu_cpu_online_map) { + for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { per_cpu(rcu_flip_flag, cpu) = rcu_flipped; dyntick_save_progress_counter(cpu); } @@ -774,7 +774,7 @@ rcu_try_flip_waitack(void) int cpu; RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); - for_each_cpu_mask(cpu, rcu_cpu_online_map) + for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) if (rcu_try_flip_waitack_needed(cpu) && per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); @@ -806,7 +806,7 @@ rcu_try_flip_waitzero(void) /* Check to see if the sum of the "last" counters is zero. */ RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); - for_each_cpu_mask(cpu, rcu_cpu_online_map) + for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; if (sum != 0) { RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); @@ -821,7 +821,7 @@ rcu_try_flip_waitzero(void) smp_mb(); /* ^^^^^^^^^^^^ */ /* Call for a memory barrier from each CPU. */ - for_each_cpu_mask(cpu, rcu_cpu_online_map) { + for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; dyntick_save_progress_counter(cpu); } @@ -841,7 +841,7 @@ rcu_try_flip_waitmb(void) int cpu; RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); - for_each_cpu_mask(cpu, rcu_cpu_online_map) + for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) if (rcu_try_flip_waitmb_needed(cpu) && per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c index 092e4c620af9..a56f629b057a 100644 --- a/kernel/rtmutex-tester.c +++ b/kernel/rtmutex-tester.c @@ -297,8 +297,8 @@ static int test_func(void *data) * * opcode:data */ -static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, - size_t count) +static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribute *attr, + const char *buf, size_t count) { struct sched_param schedpar; struct test_thread_data *td; @@ -360,7 +360,8 @@ static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, * @dev: thread to query * @buf: char buffer to be filled with thread status info */ -static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) +static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute *attr, + char *buf) { struct test_thread_data *td; struct task_struct *tsk; diff --git a/kernel/sched.c b/kernel/sched.c index 62b1b8ecb5c7..6acf749d3336 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2029,7 +2029,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) /* Tally up the load of all CPUs in the group */ avg_load = 0; - for_each_cpu_mask(i, group->cpumask) { + for_each_cpu_mask_nr(i, group->cpumask) { /* Bias balancing toward cpus of our domain */ if (local_group) load = source_load(i, load_idx); @@ -2071,7 +2071,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, /* Traverse only the allowed CPUs */ cpus_and(*tmp, group->cpumask, p->cpus_allowed); - for_each_cpu_mask(i, *tmp) { + for_each_cpu_mask_nr(i, *tmp) { load = weighted_cpuload(i); if (load < min_load || (load == min_load && i == this_cpu)) { @@ -3089,7 +3089,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, max_cpu_load = 0; min_cpu_load = ~0UL; - for_each_cpu_mask(i, group->cpumask) { + for_each_cpu_mask_nr(i, group->cpumask) { struct rq *rq; if (!cpu_isset(i, *cpus)) @@ -3368,7 +3368,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, unsigned long max_load = 0; int i; - for_each_cpu_mask(i, group->cpumask) { + for_each_cpu_mask_nr(i, group->cpumask) { unsigned long wl; if (!cpu_isset(i, *cpus)) @@ -3910,7 +3910,7 @@ static void run_rebalance_domains(struct softirq_action *h) int balance_cpu; cpu_clear(this_cpu, cpus); - for_each_cpu_mask(balance_cpu, cpus) { + for_each_cpu_mask_nr(balance_cpu, cpus) { /* * If this cpu gets work to do, stop the load balancing * work being done for other cpus. Next load @@ -6721,7 +6721,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, cpus_clear(*covered); - for_each_cpu_mask(i, *span) { + for_each_cpu_mask_nr(i, *span) { struct sched_group *sg; int group = group_fn(i, cpu_map, &sg, tmpmask); int j; @@ -6732,7 +6732,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, cpus_clear(sg->cpumask); sg->__cpu_power = 0; - for_each_cpu_mask(j, *span) { + for_each_cpu_mask_nr(j, *span) { if (group_fn(j, cpu_map, NULL, tmpmask) != group) continue; @@ -6932,7 +6932,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) if (!sg) return; do { - for_each_cpu_mask(j, sg->cpumask) { + for_each_cpu_mask_nr(j, sg->cpumask) { struct sched_domain *sd; sd = &per_cpu(phys_domains, j); @@ -6957,7 +6957,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) { int cpu, i; - for_each_cpu_mask(cpu, *cpu_map) { + for_each_cpu_mask_nr(cpu, *cpu_map) { struct sched_group **sched_group_nodes = sched_group_nodes_bycpu[cpu]; @@ -7196,7 +7196,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, /* * Set up domains for cpus specified by the cpu_map. */ - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { struct sched_domain *sd = NULL, *p; SCHED_CPUMASK_VAR(nodemask, allmasks); @@ -7263,7 +7263,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, #ifdef CONFIG_SCHED_SMT /* Set up CPU (sibling) groups */ - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { SCHED_CPUMASK_VAR(this_sibling_map, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks); @@ -7280,7 +7280,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, #ifdef CONFIG_SCHED_MC /* Set up multi-core groups */ - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { SCHED_CPUMASK_VAR(this_core_map, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks); @@ -7347,7 +7347,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, goto error; } sched_group_nodes[i] = sg; - for_each_cpu_mask(j, *nodemask) { + for_each_cpu_mask_nr(j, *nodemask) { struct sched_domain *sd; sd = &per_cpu(node_domains, j); @@ -7393,21 +7393,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map, /* Calculate CPU power for physical packages and nodes */ #ifdef CONFIG_SCHED_SMT - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { struct sched_domain *sd = &per_cpu(cpu_domains, i); init_sched_groups_power(i, sd); } #endif #ifdef CONFIG_SCHED_MC - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { struct sched_domain *sd = &per_cpu(core_domains, i); init_sched_groups_power(i, sd); } #endif - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { struct sched_domain *sd = &per_cpu(phys_domains, i); init_sched_groups_power(i, sd); @@ -7427,7 +7427,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, #endif /* Attach the domains */ - for_each_cpu_mask(i, *cpu_map) { + for_each_cpu_mask_nr(i, *cpu_map) { struct sched_domain *sd; #ifdef CONFIG_SCHED_SMT sd = &per_cpu(cpu_domains, i); @@ -7510,7 +7510,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) unregister_sched_domain_sysctl(); - for_each_cpu_mask(i, *cpu_map) + for_each_cpu_mask_nr(i, *cpu_map) cpu_attach_domain(NULL, &def_root_domain, i); synchronize_sched(); arch_destroy_sched_domains(cpu_map, &tmpmask); @@ -7641,11 +7641,13 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) } #ifdef CONFIG_SCHED_MC -static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) +static ssize_t sched_mc_power_savings_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *page) { return sprintf(page, "%u\n", sched_mc_power_savings); } static ssize_t sched_mc_power_savings_store(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 0); @@ -7655,11 +7657,13 @@ static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, #endif #ifdef CONFIG_SCHED_SMT -static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) +static ssize_t sched_smt_power_savings_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *page) { return sprintf(page, "%u\n", sched_smt_power_savings); } static ssize_t sched_smt_power_savings_store(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 1); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7f700263f04c..cf2cd6ce4cb2 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1033,7 +1033,7 @@ static int wake_idle(int cpu, struct task_struct *p) && !task_hot(p, task_rq(p)->clock, sd))) { cpus_and(tmp, sd->span, p->cpus_allowed); cpus_and(tmp, tmp, cpu_active_map); - for_each_cpu_mask(i, tmp) { + for_each_cpu_mask_nr(i, tmp) { if (idle_cpu(i)) { if (i != task_cpu(p)) { schedstat_inc(p, diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 24621cea8bb0..f85a76363eee 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -240,7 +240,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) spin_lock(&rt_b->rt_runtime_lock); rt_period = ktime_to_ns(rt_b->rt_period); - for_each_cpu_mask(i, rd->span) { + for_each_cpu_mask_nr(i, rd->span) { struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); s64 diff; @@ -1128,7 +1128,7 @@ static int pull_rt_task(struct rq *this_rq) next = pick_next_task_rt(this_rq); - for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { + for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { if (this_cpu == cpu) continue; diff --git a/kernel/softlockup.c b/kernel/softlockup.c index a272d78185eb..7bd8d1aadd5d 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -13,6 +13,7 @@ #include <linux/delay.h> #include <linux/freezer.h> #include <linux/kthread.h> +#include <linux/lockdep.h> #include <linux/notifier.h> #include <linux/module.h> @@ -25,7 +26,22 @@ static DEFINE_PER_CPU(unsigned long, print_timestamp); static DEFINE_PER_CPU(struct task_struct *, watchdog_task); static int __read_mostly did_panic; -unsigned long __read_mostly softlockup_thresh = 60; +int __read_mostly softlockup_thresh = 60; + +/* + * Should we panic (and reboot, if panic_timeout= is set) when a + * soft-lockup occurs: + */ +unsigned int __read_mostly softlockup_panic = + CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; + +static int __init softlockup_panic_setup(char *str) +{ + softlockup_panic = simple_strtoul(str, NULL, 0); + + return 1; +} +__setup("softlockup_panic=", softlockup_panic_setup); static int softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) @@ -84,6 +100,14 @@ void softlockup_tick(void) struct pt_regs *regs = get_irq_regs(); unsigned long now; + /* Is detection switched off? */ + if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { + /* Be sure we don't false trigger if switched back on */ + if (touch_timestamp) + per_cpu(touch_timestamp, this_cpu) = 0; + return; + } + if (touch_timestamp == 0) { __touch_softlockup_watchdog(); return; @@ -92,11 +116,8 @@ void softlockup_tick(void) print_timestamp = per_cpu(print_timestamp, this_cpu); /* report at most once a second */ - if ((print_timestamp >= touch_timestamp && - print_timestamp < (touch_timestamp + 1)) || - did_panic || !per_cpu(watchdog_task, this_cpu)) { + if (print_timestamp == touch_timestamp || did_panic) return; - } /* do not print during early bootup: */ if (unlikely(system_state != SYSTEM_RUNNING)) { @@ -106,8 +127,11 @@ void softlockup_tick(void) now = get_timestamp(this_cpu); - /* Wake up the high-prio watchdog task every second: */ - if (now > (touch_timestamp + 1)) + /* + * Wake up the high-prio watchdog task twice per + * threshold timespan. + */ + if (now > touch_timestamp + softlockup_thresh/2) wake_up_process(per_cpu(watchdog_task, this_cpu)); /* Warn about unreasonable delays: */ @@ -121,11 +145,15 @@ void softlockup_tick(void) this_cpu, now - touch_timestamp, current->comm, task_pid_nr(current)); print_modules(); + print_irqtrace_events(current); if (regs) show_regs(regs); else dump_stack(); spin_unlock(&print_lock); + + if (softlockup_panic) + panic("softlockup: hung tasks"); } /* @@ -178,6 +206,9 @@ static void check_hung_task(struct task_struct *t, unsigned long now) t->last_switch_timestamp = now; touch_nmi_watchdog(); + + if (softlockup_panic) + panic("softlockup: blocked tasks"); } /* diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index ba9b2054ecbd..738b411ff2d3 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -33,8 +33,9 @@ static int stopmachine(void *cpu) { int irqs_disabled = 0; int prepared = 0; + cpumask_of_cpu_ptr(cpumask, (int)(long)cpu); - set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); + set_cpus_allowed_ptr(current, cpumask); /* Ack: we are alive */ smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 5b9b467de070..0fea0ee12da9 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -59,6 +59,7 @@ cond_syscall(sys_epoll_create); cond_syscall(sys_epoll_ctl); cond_syscall(sys_epoll_wait); cond_syscall(sys_epoll_pwait); +cond_syscall(compat_sys_epoll_pwait); cond_syscall(sys_semget); cond_syscall(sys_semop); cond_syscall(sys_semtimedop); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6b16e16428d8..2a7b9d88706b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -88,12 +88,13 @@ extern int rcutorture_runnable; #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ /* Constants used for minimum and maximum */ -#if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM) +#if defined(CONFIG_HIGHMEM) || defined(CONFIG_DETECT_SOFTLOCKUP) static int one = 1; #endif #ifdef CONFIG_DETECT_SOFTLOCKUP static int sixty = 60; +static int neg_one = -1; #endif #ifdef CONFIG_MMU @@ -110,7 +111,7 @@ static int min_percpu_pagelist_fract = 8; static int ngroups_max = NGROUPS_MAX; -#ifdef CONFIG_KMOD +#ifdef CONFIG_MODULES extern char modprobe_path[]; #endif #ifdef CONFIG_CHR_DEV_SG @@ -475,7 +476,7 @@ static struct ctl_table kern_table[] = { .proc_handler = &ftrace_enable_sysctl, }, #endif -#ifdef CONFIG_KMOD +#ifdef CONFIG_MODULES { .ctl_name = KERN_MODPROBE, .procname = "modprobe", @@ -739,13 +740,24 @@ static struct ctl_table kern_table[] = { #ifdef CONFIG_DETECT_SOFTLOCKUP { .ctl_name = CTL_UNNUMBERED, + .procname = "softlockup_panic", + .data = &softlockup_panic, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + .extra2 = &one, + }, + { + .ctl_name = CTL_UNNUMBERED, .procname = "softlockup_thresh", .data = &softlockup_thresh, - .maxlen = sizeof(unsigned long), + .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = &proc_dointvec_minmax, .strategy = &sysctl_intvec, - .extra1 = &one, + .extra1 = &neg_one, .extra2 = &sixty, }, { diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 4a23517169a6..06b17547f4e7 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) return -EINVAL; if (isadd == REGISTER) { - for_each_cpu_mask(cpu, mask) { + for_each_cpu_mask_nr(cpu, mask) { s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, cpu_to_node(cpu)); if (!s) @@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) /* Deregister or cleanup */ cleanup: - for_each_cpu_mask(cpu, mask) { + for_each_cpu_mask_nr(cpu, mask) { listeners = &per_cpu(listener_array, cpu); down_write(&listeners->sem); list_for_each_entry_safe(s, tmp, &listeners->list, list) { diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index dadde5361f32..093d4acf993b 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data) * Cycle through CPUs to check if the CPUs stay * synchronized to each other. */ - int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); + int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map); - if (next_cpu >= NR_CPUS) + if (next_cpu >= nr_cpu_ids) next_cpu = first_cpu(cpu_online_map); watchdog_timer.expires += WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, next_cpu); @@ -376,7 +376,8 @@ void clocksource_unregister(struct clocksource *cs) * Provides sysfs interface for listing current clocksource. */ static ssize_t -sysfs_show_current_clocksources(struct sys_device *dev, char *buf) +sysfs_show_current_clocksources(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { ssize_t count = 0; @@ -397,6 +398,7 @@ sysfs_show_current_clocksources(struct sys_device *dev, char *buf) * clocksource selction. */ static ssize_t sysfs_override_clocksource(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { struct clocksource *ovr = NULL; @@ -449,7 +451,9 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, * Provides sysfs interface for listing registered clocksources */ static ssize_t -sysfs_show_available_clocksources(struct sys_device *dev, char *buf) +sysfs_show_available_clocksources(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) { struct clocksource *src; ssize_t count = 0; diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f48d0f09d32f..31463d370b94 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -399,8 +399,7 @@ again: mask = CPU_MASK_NONE; now = ktime_get(); /* Find all expired events */ - for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; - cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { + for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { td = &per_cpu(tick_cpu_device, cpu); if (td->evtdev->next_event.tv64 <= now.tv64) cpu_set(cpu, mask); diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 4f3886562b8c..bf43284d6855 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -135,7 +135,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) */ static void tick_setup_device(struct tick_device *td, struct clock_event_device *newdev, int cpu, - cpumask_t cpumask) + const cpumask_t *cpumask) { ktime_t next_event; void (*handler)(struct clock_event_device *) = NULL; @@ -169,8 +169,8 @@ static void tick_setup_device(struct tick_device *td, * When the device is not per cpu, pin the interrupt to the * current cpu: */ - if (!cpus_equal(newdev->cpumask, cpumask)) - irq_set_affinity(newdev->irq, cpumask); + if (!cpus_equal(newdev->cpumask, *cpumask)) + irq_set_affinity(newdev->irq, *cpumask); /* * When global broadcasting is active, check if the current @@ -196,20 +196,20 @@ static int tick_check_new_device(struct clock_event_device *newdev) struct tick_device *td; int cpu, ret = NOTIFY_OK; unsigned long flags; - cpumask_t cpumask; + cpumask_of_cpu_ptr_declare(cpumask); spin_lock_irqsave(&tick_device_lock, flags); cpu = smp_processor_id(); + cpumask_of_cpu_ptr_next(cpumask, cpu); if (!cpu_isset(cpu, newdev->cpumask)) goto out_bc; td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; - cpumask = cpumask_of_cpu(cpu); /* cpu local device ? */ - if (!cpus_equal(newdev->cpumask, cpumask)) { + if (!cpus_equal(newdev->cpumask, *cpumask)) { /* * If the cpu affinity of the device interrupt can not @@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) * If we have a cpu local device already, do not replace it * by a non cpu local device */ - if (curdev && cpus_equal(curdev->cpumask, cpumask)) + if (curdev && cpus_equal(curdev->cpumask, *cpumask)) goto out_bc; } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index beef7ccdf842..942fc7c85283 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -140,8 +140,6 @@ void tick_nohz_update_jiffies(void) if (!ts->tick_stopped) return; - touch_softlockup_watchdog(); - cpu_clear(cpu, nohz_cpu_mask); now = ktime_get(); ts->idle_waketime = now; @@ -149,6 +147,8 @@ void tick_nohz_update_jiffies(void) local_irq_save(flags); tick_do_update_jiffies64(now); local_irq_restore(flags); + + touch_softlockup_watchdog(); } void tick_nohz_stop_idle(int cpu) diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 2301e1e7c606..63528086337c 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c @@ -213,7 +213,9 @@ static void start_stack_timers(void) int cpu; for_each_online_cpu(cpu) { - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + cpumask_of_cpu_ptr(new_mask, cpu); + + set_cpus_allowed_ptr(current, new_mask); start_stack_timer(cpu); } set_cpus_allowed_ptr(current, &saved_mask); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ce7799540c91..a6d36346d10a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq) might_sleep(); lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); lock_release(&wq->lockdep_map, 1, _THIS_IP_); - for_each_cpu_mask(cpu, *cpu_map) + for_each_cpu_mask_nr(cpu, *cpu_map) flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); } EXPORT_SYMBOL_GPL(flush_workqueue); @@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work) wq = cwq->wq; cpu_map = wq_cpu_map(wq); - for_each_cpu_mask(cpu, *cpu_map) + for_each_cpu_mask_nr(cpu, *cpu_map) wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); } @@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq) list_del(&wq->list); spin_unlock(&workqueue_lock); - for_each_cpu_mask(cpu, *cpu_map) + for_each_cpu_mask_nr(cpu, *cpu_map) cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); put_online_cpus(); |