diff options
author | James Bottomley <jejb@titanic.(none)> | 2005-11-10 15:29:07 +0100 |
---|---|---|
committer | James Bottomley <jejb@titanic.(none)> | 2005-11-10 15:29:07 +0100 |
commit | 8a87a0b6313109d2fea87b1271d497c954ce2ca8 (patch) | |
tree | 1b7ae51ff681e27118590e9cab4bf0ce38f5d80e /kernel | |
parent | [SCSI] qla2xxx: Update version number to 8.01.03-k. (diff) | |
parent | Merge master.kernel.org:/pub/scm/linux/kernel/git/bart/ide-2.6 (diff) | |
download | linux-8a87a0b6313109d2fea87b1271d497c954ce2ca8.tar.xz linux-8a87a0b6313109d2fea87b1271d497c954ce2ca8.zip |
Merge by hand (whitespace conflicts in libata.h)
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 33 | ||||
-rw-r--r-- | kernel/power/main.c | 2 | ||||
-rw-r--r-- | kernel/power/power.h | 6 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 83 | ||||
-rw-r--r-- | kernel/power/swsusp.c | 210 | ||||
-rw-r--r-- | kernel/ptrace.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 160 | ||||
-rw-r--r-- | kernel/softlockup.c | 3 | ||||
-rw-r--r-- | kernel/sysctl.c | 136 |
9 files changed, 400 insertions, 235 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 3619e939182e..d61ba88f34e5 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -21,6 +21,24 @@ EXPORT_SYMBOL_GPL(cpucontrol); static struct notifier_block *cpu_chain; +/* + * Used to check by callers if they need to acquire the cpucontrol + * or not to protect a cpu from being removed. Its sometimes required to + * call these functions both for normal operations, and in response to + * a cpu being added/removed. If the context of the call is in the same + * thread context as a CPU hotplug thread, we dont need to take the lock + * since its already protected + * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj + */ + +int current_in_cpu_hotplug(void) +{ + return (current->flags & PF_HOTPLUG_CPU); +} + +EXPORT_SYMBOL_GPL(current_in_cpu_hotplug); + + /* Need to know about CPUs going up/down? */ int register_cpu_notifier(struct notifier_block *nb) { @@ -94,6 +112,13 @@ int cpu_down(unsigned int cpu) goto out; } + /* + * Leave a trace in current->flags indicating we are already in + * process of performing CPU hotplug. Callers can check if cpucontrol + * is already acquired by current thread, and if so not cause + * a dead lock by not acquiring the lock + */ + current->flags |= PF_HOTPLUG_CPU; err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, (void *)(long)cpu); if (err == NOTIFY_BAD) { @@ -146,6 +171,7 @@ out_thread: out_allowed: set_cpus_allowed(current, old_allowed); out: + current->flags &= ~PF_HOTPLUG_CPU; unlock_cpu_hotplug(); return err; } @@ -163,6 +189,12 @@ int __devinit cpu_up(unsigned int cpu) ret = -EINVAL; goto out; } + + /* + * Leave a trace in current->flags indicating we are already in + * process of performing CPU hotplug. + */ + current->flags |= PF_HOTPLUG_CPU; ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); if (ret == NOTIFY_BAD) { printk("%s: attempt to bring up CPU %u failed\n", @@ -185,6 +217,7 @@ out_notify: if (ret != 0) notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); out: + current->flags &= ~PF_HOTPLUG_CPU; up(&cpucontrol); return ret; } diff --git a/kernel/power/main.c b/kernel/power/main.c index 18d7d693fbba..6ee2cad530e8 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -167,7 +167,7 @@ static int enter_state(suspend_state_t state) { int error; - if (pm_ops->valid && !pm_ops->valid(state)) + if (pm_ops && pm_ops->valid && !pm_ops->valid(state)) return -ENODEV; if (down_trylock(&pm_sem)) return -EBUSY; diff --git a/kernel/power/power.h b/kernel/power/power.h index d4fd96a135ab..6c042b5ee14b 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -65,8 +65,8 @@ extern suspend_pagedir_t *pagedir_save; extern asmlinkage int swsusp_arch_suspend(void); extern asmlinkage int swsusp_arch_resume(void); -extern int restore_highmem(void); -extern struct pbe * alloc_pagedir(unsigned nr_pages); +extern void free_pagedir(struct pbe *pblist); +extern struct pbe *alloc_pagedir(unsigned nr_pages, gfp_t gfp_mask, int safe_needed); extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages); extern void swsusp_free(void); -extern int enough_swap(unsigned nr_pages); +extern int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed); diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 723f5179883e..4a6dbcefd378 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -88,8 +88,7 @@ static int save_highmem_zone(struct zone *zone) return 0; } - -static int save_highmem(void) +int save_highmem(void) { struct zone *zone; int res = 0; @@ -120,11 +119,7 @@ int restore_highmem(void) } return 0; } -#else -static int save_highmem(void) { return 0; } -int restore_highmem(void) { return 0; } -#endif /* CONFIG_HIGHMEM */ - +#endif static int pfn_is_nosave(unsigned long pfn) { @@ -216,7 +211,7 @@ static void copy_data_pages(struct pbe *pblist) * free_pagedir - free pages allocated with alloc_pagedir() */ -static void free_pagedir(struct pbe *pblist) +void free_pagedir(struct pbe *pblist) { struct pbe *pbe; @@ -269,9 +264,30 @@ void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) pr_debug("create_pbe_list(): initialized %d PBEs\n", num); } -static void *alloc_image_page(void) +/** + * @safe_needed - on resume, for storing the PBE list and the image, + * we can only use memory pages that do not conflict with the pages + * which had been used before suspend. + * + * The unsafe pages are marked with the PG_nosave_free flag + * + * Allocated but unusable (ie eaten) memory pages should be marked + * so that swsusp_free() can release them + */ + +static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed) { - void *res = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD); + void *res; + + if (safe_needed) + do { + res = (void *)get_zeroed_page(gfp_mask); + if (res && PageNosaveFree(virt_to_page(res))) + /* This is for swsusp_free() */ + SetPageNosave(virt_to_page(res)); + } while (res && PageNosaveFree(virt_to_page(res))); + else + res = (void *)get_zeroed_page(gfp_mask); if (res) { SetPageNosave(virt_to_page(res)); SetPageNosaveFree(virt_to_page(res)); @@ -279,6 +295,11 @@ static void *alloc_image_page(void) return res; } +unsigned long get_safe_page(gfp_t gfp_mask) +{ + return (unsigned long)alloc_image_page(gfp_mask, 1); +} + /** * alloc_pagedir - Allocate the page directory. * @@ -292,7 +313,7 @@ static void *alloc_image_page(void) * On each page we set up a list of struct_pbe elements. */ -struct pbe *alloc_pagedir(unsigned int nr_pages) +struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed) { unsigned int num; struct pbe *pblist, *pbe; @@ -301,12 +322,12 @@ struct pbe *alloc_pagedir(unsigned int nr_pages) return NULL; pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages); - pblist = alloc_image_page(); + pblist = alloc_image_page(gfp_mask, safe_needed); /* FIXME: rewrite this ugly loop */ for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; pbe = pbe->next, num += PBES_PER_PAGE) { pbe += PB_PAGE_SKIP; - pbe->next = alloc_image_page(); + pbe->next = alloc_image_page(gfp_mask, safe_needed); } if (!pbe) { /* get_zeroed_page() failed */ free_pagedir(pblist); @@ -354,24 +375,32 @@ static int enough_free_mem(unsigned int nr_pages) (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); } +int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) +{ + struct pbe *p; + + for_each_pbe (p, pblist) { + p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed); + if (!p->address) + return -ENOMEM; + } + return 0; +} static struct pbe *swsusp_alloc(unsigned int nr_pages) { - struct pbe *pblist, *p; + struct pbe *pblist; - if (!(pblist = alloc_pagedir(nr_pages))) { + if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) { printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); return NULL; } create_pbe_list(pblist, nr_pages); - for_each_pbe (p, pblist) { - p->address = (unsigned long)alloc_image_page(); - if (!p->address) { - printk(KERN_ERR "suspend: Allocating image pages failed.\n"); - swsusp_free(); - return NULL; - } + if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) { + printk(KERN_ERR "suspend: Allocating image pages failed.\n"); + swsusp_free(); + return NULL; } return pblist; @@ -382,11 +411,6 @@ asmlinkage int swsusp_save(void) unsigned int nr_pages; pr_debug("swsusp: critical section: \n"); - if (save_highmem()) { - printk(KERN_CRIT "swsusp: Not enough free pages for highmem\n"); - restore_highmem(); - return -ENOMEM; - } drain_local_pages(); nr_pages = count_data_pages(); @@ -406,11 +430,6 @@ asmlinkage int swsusp_save(void) return -ENOMEM; } - if (!enough_swap(nr_pages)) { - printk(KERN_ERR "swsusp: Not enough free swap\n"); - return -ENOSPC; - } - pagedir_nosave = swsusp_alloc(nr_pages); if (!pagedir_nosave) return -ENOMEM; diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index e1ab28b9b217..c05f46e7348f 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -73,6 +73,14 @@ #include "power.h" +#ifdef CONFIG_HIGHMEM +int save_highmem(void); +int restore_highmem(void); +#else +static int save_highmem(void) { return 0; } +static int restore_highmem(void) { return 0; } +#endif + #define CIPHER "aes" #define MAXKEY 32 #define MAXIV 32 @@ -500,6 +508,26 @@ static int write_pagedir(void) } /** + * enough_swap - Make sure we have enough swap to save the image. + * + * Returns TRUE or FALSE after checking the total amount of swap + * space avaiable. + * + * FIXME: si_swapinfo(&i) returns all swap devices information. + * We should only consider resume_device. + */ + +static int enough_swap(unsigned int nr_pages) +{ + struct sysinfo i; + + si_swapinfo(&i); + pr_debug("swsusp: available swap: %lu pages\n", i.freeswap); + return i.freeswap > (nr_pages + PAGES_FOR_IO + + (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); +} + +/** * write_suspend_image - Write entire image and metadata. * */ @@ -507,6 +535,11 @@ static int write_suspend_image(void) { int error; + if (!enough_swap(nr_copy_pages)) { + printk(KERN_ERR "swsusp: Not enough free swap\n"); + return -ENOSPC; + } + init_header(); if ((error = data_write())) goto FreeData; @@ -526,27 +559,6 @@ static int write_suspend_image(void) goto Done; } -/** - * enough_swap - Make sure we have enough swap to save the image. - * - * Returns TRUE or FALSE after checking the total amount of swap - * space avaiable. - * - * FIXME: si_swapinfo(&i) returns all swap devices information. - * We should only consider resume_device. - */ - -int enough_swap(unsigned int nr_pages) -{ - struct sysinfo i; - - si_swapinfo(&i); - pr_debug("swsusp: available swap: %lu pages\n", i.freeswap); - return i.freeswap > (nr_pages + PAGES_FOR_IO + - (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); -} - - /* It is important _NOT_ to umount filesystems at this point. We want * them synced (in case something goes wrong) but we DO not want to mark * filesystem clean: it is not. (And it does not matter, if we resume @@ -556,12 +568,15 @@ int swsusp_write(void) { int error; + if ((error = swsusp_swap_check())) { + printk(KERN_ERR "swsusp: cannot find swap device, try swapon -a.\n"); + return error; + } lock_swapdevices(); error = write_suspend_image(); /* This will unlock ignored swap devices since writing is finished */ lock_swapdevices(); return error; - } @@ -569,6 +584,7 @@ int swsusp_write(void) int swsusp_suspend(void) { int error; + if ((error = arch_prepare_suspend())) return error; local_irq_disable(); @@ -580,15 +596,12 @@ int swsusp_suspend(void) */ if ((error = device_power_down(PMSG_FREEZE))) { printk(KERN_ERR "Some devices failed to power down, aborting suspend\n"); - local_irq_enable(); - return error; + goto Enable_irqs; } - if ((error = swsusp_swap_check())) { - printk(KERN_ERR "swsusp: cannot find swap device, try swapon -a.\n"); - device_power_up(); - local_irq_enable(); - return error; + if ((error = save_highmem())) { + printk(KERN_ERR "swsusp: Not enough free pages for highmem\n"); + goto Restore_highmem; } save_processor_state(); @@ -596,8 +609,10 @@ int swsusp_suspend(void) printk(KERN_ERR "Error %d suspending\n", error); /* Restore control flow magically appears here */ restore_processor_state(); +Restore_highmem: restore_highmem(); device_power_up(); +Enable_irqs: local_irq_enable(); return error; } @@ -629,127 +644,43 @@ int swsusp_resume(void) } /** - * On resume, for storing the PBE list and the image, - * we can only use memory pages that do not conflict with the pages - * which had been used before suspend. - * - * We don't know which pages are usable until we allocate them. - * - * Allocated but unusable (ie eaten) memory pages are marked so that - * swsusp_free() can release them - */ - -unsigned long get_safe_page(gfp_t gfp_mask) -{ - unsigned long m; - - do { - m = get_zeroed_page(gfp_mask); - if (m && PageNosaveFree(virt_to_page(m))) - /* This is for swsusp_free() */ - SetPageNosave(virt_to_page(m)); - } while (m && PageNosaveFree(virt_to_page(m))); - if (m) { - /* This is for swsusp_free() */ - SetPageNosave(virt_to_page(m)); - SetPageNosaveFree(virt_to_page(m)); - } - return m; -} - -/** - * check_pagedir - We ensure here that pages that the PBEs point to - * won't collide with pages where we're going to restore from the loaded - * pages later - */ - -static int check_pagedir(struct pbe *pblist) -{ - struct pbe *p; - - /* This is necessary, so that we can free allocated pages - * in case of failure - */ - for_each_pbe (p, pblist) - p->address = 0UL; - - for_each_pbe (p, pblist) { - p->address = get_safe_page(GFP_ATOMIC); - if (!p->address) - return -ENOMEM; - } - return 0; -} - -/** - * swsusp_pagedir_relocate - It is possible, that some memory pages - * occupied by the list of PBEs collide with pages where we're going to - * restore from the loaded pages later. We relocate them here. + * mark_unsafe_pages - mark the pages that cannot be used for storing + * the image during resume, because they conflict with the pages that + * had been used before suspend */ -static struct pbe *swsusp_pagedir_relocate(struct pbe *pblist) +static void mark_unsafe_pages(struct pbe *pblist) { struct zone *zone; unsigned long zone_pfn; - struct pbe *pbpage, *tail, *p; - void *m; - int rel = 0; + struct pbe *p; if (!pblist) /* a sanity check */ - return NULL; - - pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n", - swsusp_info.pagedir_pages); + return; /* Clear page flags */ - for_each_zone (zone) { - for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) - if (pfn_valid(zone_pfn + zone->zone_start_pfn)) - ClearPageNosaveFree(pfn_to_page(zone_pfn + + for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) + if (pfn_valid(zone_pfn + zone->zone_start_pfn)) + ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); } /* Mark orig addresses */ - for_each_pbe (p, pblist) SetPageNosaveFree(virt_to_page(p->orig_address)); - tail = pblist + PB_PAGE_SKIP; - - /* Relocate colliding pages */ - - for_each_pb_page (pbpage, pblist) { - if (PageNosaveFree(virt_to_page((unsigned long)pbpage))) { - m = (void *)get_safe_page(GFP_ATOMIC | __GFP_COLD); - if (!m) - return NULL; - memcpy(m, (void *)pbpage, PAGE_SIZE); - if (pbpage == pblist) - pblist = (struct pbe *)m; - else - tail->next = (struct pbe *)m; - pbpage = (struct pbe *)m; - - /* We have to link the PBEs again */ - for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++) - if (p->next) /* needed to save the end */ - p->next = p + 1; - - rel++; - } - tail = pbpage + PB_PAGE_SKIP; - } +} - /* This is for swsusp_free() */ - for_each_pb_page (pbpage, pblist) { - SetPageNosave(virt_to_page(pbpage)); - SetPageNosaveFree(virt_to_page(pbpage)); +static void copy_page_backup_list(struct pbe *dst, struct pbe *src) +{ + /* We assume both lists contain the same number of elements */ + while (src) { + dst->orig_address = src->orig_address; + dst->swap_address = src->swap_address; + dst = dst->next; + src = src->next; } - - printk("swsusp: Relocated %d pages\n", rel); - - return pblist; } /* @@ -888,7 +819,7 @@ static int check_sig(void) * Reset swap signature now. */ error = bio_write_page(0, &swsusp_header); - } else { + } else { return -EINVAL; } if (!error) @@ -990,20 +921,25 @@ static int read_suspend_image(void) int error = 0; struct pbe *p; - if (!(p = alloc_pagedir(nr_copy_pages))) + if (!(p = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 0))) return -ENOMEM; if ((error = read_pagedir(p))) return error; - create_pbe_list(p, nr_copy_pages); - - if (!(pagedir_nosave = swsusp_pagedir_relocate(p))) + mark_unsafe_pages(p); + pagedir_nosave = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1); + if (pagedir_nosave) { + create_pbe_list(pagedir_nosave, nr_copy_pages); + copy_page_backup_list(pagedir_nosave, p); + } + free_pagedir(p); + if (!pagedir_nosave) return -ENOMEM; /* Allocate memory for the image and read the data from swap */ - error = check_pagedir(pagedir_nosave); + error = alloc_data_pages(pagedir_nosave, GFP_ATOMIC, 1); if (!error) error = data_read(pagedir_nosave); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 5b8dd98a230e..b88d4186cd7a 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -155,7 +155,7 @@ int ptrace_attach(struct task_struct *task) retval = -EPERM; if (task->pid <= 1) goto bad; - if (task == current) + if (task->tgid == current->tgid) goto bad; /* the same process cannot be attached many times */ if (task->ptrace & PT_PTRACED) diff --git a/kernel/sched.c b/kernel/sched.c index 3ce26954be12..b6506671b2be 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -206,6 +206,7 @@ struct runqueue { */ unsigned long nr_running; #ifdef CONFIG_SMP + unsigned long prio_bias; unsigned long cpu_load[3]; #endif unsigned long long nr_switches; @@ -659,13 +660,68 @@ static int effective_prio(task_t *p) return prio; } +#ifdef CONFIG_SMP +static inline void inc_prio_bias(runqueue_t *rq, int prio) +{ + rq->prio_bias += MAX_PRIO - prio; +} + +static inline void dec_prio_bias(runqueue_t *rq, int prio) +{ + rq->prio_bias -= MAX_PRIO - prio; +} + +static inline void inc_nr_running(task_t *p, runqueue_t *rq) +{ + rq->nr_running++; + if (rt_task(p)) { + if (p != rq->migration_thread) + /* + * The migration thread does the actual balancing. Do + * not bias by its priority as the ultra high priority + * will skew balancing adversely. + */ + inc_prio_bias(rq, p->prio); + } else + inc_prio_bias(rq, p->static_prio); +} + +static inline void dec_nr_running(task_t *p, runqueue_t *rq) +{ + rq->nr_running--; + if (rt_task(p)) { + if (p != rq->migration_thread) + dec_prio_bias(rq, p->prio); + } else + dec_prio_bias(rq, p->static_prio); +} +#else +static inline void inc_prio_bias(runqueue_t *rq, int prio) +{ +} + +static inline void dec_prio_bias(runqueue_t *rq, int prio) +{ +} + +static inline void inc_nr_running(task_t *p, runqueue_t *rq) +{ + rq->nr_running++; +} + +static inline void dec_nr_running(task_t *p, runqueue_t *rq) +{ + rq->nr_running--; +} +#endif + /* * __activate_task - move a task to the runqueue. */ static inline void __activate_task(task_t *p, runqueue_t *rq) { enqueue_task(p, rq->active); - rq->nr_running++; + inc_nr_running(p, rq); } /* @@ -674,7 +730,7 @@ static inline void __activate_task(task_t *p, runqueue_t *rq) static inline void __activate_idle_task(task_t *p, runqueue_t *rq) { enqueue_task_head(p, rq->active); - rq->nr_running++; + inc_nr_running(p, rq); } static int recalc_task_prio(task_t *p, unsigned long long now) @@ -759,7 +815,8 @@ static void activate_task(task_t *p, runqueue_t *rq, int local) } #endif - p->prio = recalc_task_prio(p, now); + if (!rt_task(p)) + p->prio = recalc_task_prio(p, now); /* * This checks to make sure it's not an uninterruptible task @@ -793,7 +850,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local) */ static void deactivate_task(struct task_struct *p, runqueue_t *rq) { - rq->nr_running--; + dec_nr_running(p, rq); dequeue_task(p, p->array); p->array = NULL; } @@ -808,21 +865,28 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq) #ifdef CONFIG_SMP static void resched_task(task_t *p) { - int need_resched, nrpolling; + int cpu; assert_spin_locked(&task_rq(p)->lock); - /* minimise the chance of sending an interrupt to poll_idle() */ - nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG); - need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED); - nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG); + if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) + return; + + set_tsk_thread_flag(p, TIF_NEED_RESCHED); + + cpu = task_cpu(p); + if (cpu == smp_processor_id()) + return; - if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id())) - smp_send_reschedule(task_cpu(p)); + /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */ + smp_mb(); + if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG)) + smp_send_reschedule(cpu); } #else static inline void resched_task(task_t *p) { + assert_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); } #endif @@ -930,27 +994,61 @@ void kick_process(task_t *p) * We want to under-estimate the load of migration sources, to * balance conservatively. */ -static inline unsigned long source_load(int cpu, int type) +static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); - unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; + unsigned long running = rq->nr_running; + unsigned long source_load, cpu_load = rq->cpu_load[type-1], + load_now = running * SCHED_LOAD_SCALE; + if (type == 0) - return load_now; + source_load = load_now; + else + source_load = min(cpu_load, load_now); - return min(rq->cpu_load[type-1], load_now); + if (running > 1 || (idle == NOT_IDLE && running)) + /* + * If we are busy rebalancing the load is biased by + * priority to create 'nice' support across cpus. When + * idle rebalancing we should only bias the source_load if + * there is more than one task running on that queue to + * prevent idle rebalance from trying to pull tasks from a + * queue with only one running task. + */ + source_load = source_load * rq->prio_bias / running; + + return source_load; +} + +static inline unsigned long source_load(int cpu, int type) +{ + return __source_load(cpu, type, NOT_IDLE); } /* * Return a high guess at the load of a migration-target cpu */ -static inline unsigned long target_load(int cpu, int type) +static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); - unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; + unsigned long running = rq->nr_running; + unsigned long target_load, cpu_load = rq->cpu_load[type-1], + load_now = running * SCHED_LOAD_SCALE; + if (type == 0) - return load_now; + target_load = load_now; + else + target_load = max(cpu_load, load_now); + + if (running > 1 || (idle == NOT_IDLE && running)) + target_load = target_load * rq->prio_bias / running; - return max(rq->cpu_load[type-1], load_now); + return target_load; +} + +static inline unsigned long target_load(int cpu, int type) +{ + return __target_load(cpu, type, NOT_IDLE); } /* @@ -1411,7 +1509,7 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) list_add_tail(&p->run_list, ¤t->run_list); p->array = current->array; p->array->nr_active++; - rq->nr_running++; + inc_nr_running(p, rq); } set_need_resched(); } else @@ -1756,9 +1854,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) { dequeue_task(p, src_array); - src_rq->nr_running--; + dec_nr_running(p, src_rq); set_task_cpu(p, this_cpu); - this_rq->nr_running++; + inc_nr_running(p, this_rq); enqueue_task(p, this_array); p->timestamp = (p->timestamp - src_rq->timestamp_last_tick) + this_rq->timestamp_last_tick; @@ -1937,9 +2035,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, /* Bias balancing toward cpus of our domain */ if (local_group) - load = target_load(i, load_idx); + load = __target_load(i, load_idx, idle); else - load = source_load(i, load_idx); + load = __source_load(i, load_idx, idle); avg_load += load; } @@ -2044,14 +2142,15 @@ out_balanced: /* * find_busiest_queue - find the busiest runqueue among the cpus in group. */ -static runqueue_t *find_busiest_queue(struct sched_group *group) +static runqueue_t *find_busiest_queue(struct sched_group *group, + enum idle_type idle) { unsigned long load, max_load = 0; runqueue_t *busiest = NULL; int i; for_each_cpu_mask(i, group->cpumask) { - load = source_load(i, 0); + load = __source_load(i, 0, idle); if (load > max_load) { max_load = load; @@ -2095,7 +2194,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, goto out_balanced; } - busiest = find_busiest_queue(group); + busiest = find_busiest_queue(group, idle); if (!busiest) { schedstat_inc(sd, lb_nobusyq[idle]); goto out_balanced; @@ -2218,7 +2317,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, goto out_balanced; } - busiest = find_busiest_queue(group); + busiest = find_busiest_queue(group, NEWLY_IDLE); if (!busiest) { schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); goto out_balanced; @@ -3451,8 +3550,10 @@ void set_user_nice(task_t *p, long nice) goto out_unlock; } array = p->array; - if (array) + if (array) { dequeue_task(p, array); + dec_prio_bias(rq, p->static_prio); + } old_prio = p->prio; new_prio = NICE_TO_PRIO(nice); @@ -3462,6 +3563,7 @@ void set_user_nice(task_t *p, long nice) if (array) { enqueue_task(p, array); + inc_prio_bias(rq, p->static_prio); /* * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU: diff --git a/kernel/softlockup.c b/kernel/softlockup.c index a2dcceb9437d..c67189a25d52 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -73,9 +73,6 @@ void softlockup_tick(struct pt_regs *regs) static int watchdog(void * __bind_cpu) { struct sched_param param = { .sched_priority = 99 }; - int this_cpu = (long) __bind_cpu; - - printk("softlockup thread %d started up.\n", this_cpu); sched_setscheduler(current, SCHED_FIFO, ¶m); current->flags |= PF_NOFREEZE; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c4f35f96884d..9990e10192e8 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -169,7 +169,7 @@ struct file_operations proc_sys_file_operations = { extern struct proc_dir_entry *proc_sys_root; -static void register_proc_table(ctl_table *, struct proc_dir_entry *); +static void register_proc_table(ctl_table *, struct proc_dir_entry *, void *); static void unregister_proc_table(ctl_table *, struct proc_dir_entry *); #endif @@ -992,10 +992,51 @@ static ctl_table dev_table[] = { extern void init_irq_proc (void); +static DEFINE_SPINLOCK(sysctl_lock); + +/* called under sysctl_lock */ +static int use_table(struct ctl_table_header *p) +{ + if (unlikely(p->unregistering)) + return 0; + p->used++; + return 1; +} + +/* called under sysctl_lock */ +static void unuse_table(struct ctl_table_header *p) +{ + if (!--p->used) + if (unlikely(p->unregistering)) + complete(p->unregistering); +} + +/* called under sysctl_lock, will reacquire if has to wait */ +static void start_unregistering(struct ctl_table_header *p) +{ + /* + * if p->used is 0, nobody will ever touch that entry again; + * we'll eliminate all paths to it before dropping sysctl_lock + */ + if (unlikely(p->used)) { + struct completion wait; + init_completion(&wait); + p->unregistering = &wait; + spin_unlock(&sysctl_lock); + wait_for_completion(&wait); + spin_lock(&sysctl_lock); + } + /* + * do not remove from the list until nobody holds it; walking the + * list in do_sysctl() relies on that. + */ + list_del_init(&p->ctl_entry); +} + void __init sysctl_init(void) { #ifdef CONFIG_PROC_FS - register_proc_table(root_table, proc_sys_root); + register_proc_table(root_table, proc_sys_root, &root_table_header); init_irq_proc(); #endif } @@ -1004,6 +1045,7 @@ int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *ol void __user *newval, size_t newlen) { struct list_head *tmp; + int error = -ENOTDIR; if (nlen <= 0 || nlen >= CTL_MAXNAME) return -ENOTDIR; @@ -1012,20 +1054,30 @@ int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *ol if (!oldlenp || get_user(old_len, oldlenp)) return -EFAULT; } + spin_lock(&sysctl_lock); tmp = &root_table_header.ctl_entry; do { struct ctl_table_header *head = list_entry(tmp, struct ctl_table_header, ctl_entry); void *context = NULL; - int error = parse_table(name, nlen, oldval, oldlenp, + + if (!use_table(head)) + continue; + + spin_unlock(&sysctl_lock); + + error = parse_table(name, nlen, oldval, oldlenp, newval, newlen, head->ctl_table, &context); kfree(context); + + spin_lock(&sysctl_lock); + unuse_table(head); if (error != -ENOTDIR) - return error; - tmp = tmp->next; - } while (tmp != &root_table_header.ctl_entry); - return -ENOTDIR; + break; + } while ((tmp = tmp->next) != &root_table_header.ctl_entry); + spin_unlock(&sysctl_lock); + return error; } asmlinkage long sys_sysctl(struct __sysctl_args __user *args) @@ -1236,12 +1288,16 @@ struct ctl_table_header *register_sysctl_table(ctl_table * table, return NULL; tmp->ctl_table = table; INIT_LIST_HEAD(&tmp->ctl_entry); + tmp->used = 0; + tmp->unregistering = NULL; + spin_lock(&sysctl_lock); if (insert_at_head) list_add(&tmp->ctl_entry, &root_table_header.ctl_entry); else list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry); + spin_unlock(&sysctl_lock); #ifdef CONFIG_PROC_FS - register_proc_table(table, proc_sys_root); + register_proc_table(table, proc_sys_root, tmp); #endif return tmp; } @@ -1255,10 +1311,13 @@ struct ctl_table_header *register_sysctl_table(ctl_table * table, */ void unregister_sysctl_table(struct ctl_table_header * header) { - list_del(&header->ctl_entry); + might_sleep(); + spin_lock(&sysctl_lock); + start_unregistering(header); #ifdef CONFIG_PROC_FS unregister_proc_table(header->ctl_table, proc_sys_root); #endif + spin_unlock(&sysctl_lock); kfree(header); } @@ -1269,7 +1328,7 @@ void unregister_sysctl_table(struct ctl_table_header * header) #ifdef CONFIG_PROC_FS /* Scan the sysctl entries in table and add them all into /proc */ -static void register_proc_table(ctl_table * table, struct proc_dir_entry *root) +static void register_proc_table(ctl_table * table, struct proc_dir_entry *root, void *set) { struct proc_dir_entry *de; int len; @@ -1305,13 +1364,14 @@ static void register_proc_table(ctl_table * table, struct proc_dir_entry *root) de = create_proc_entry(table->procname, mode, root); if (!de) continue; + de->set = set; de->data = (void *) table; if (table->proc_handler) de->proc_fops = &proc_sys_file_operations; } table->de = de; if (de->mode & S_IFDIR) - register_proc_table(table->child, de); + register_proc_table(table->child, de, set); } } @@ -1336,6 +1396,13 @@ static void unregister_proc_table(ctl_table * table, struct proc_dir_entry *root continue; } + /* + * In any case, mark the entry as goner; we'll keep it + * around if it's busy, but we'll know to do nothing with + * its fields. We are under sysctl_lock here. + */ + de->data = NULL; + /* Don't unregister proc entries that are still being used.. */ if (atomic_read(&de->count)) continue; @@ -1349,27 +1416,38 @@ static ssize_t do_rw_proc(int write, struct file * file, char __user * buf, size_t count, loff_t *ppos) { int op; - struct proc_dir_entry *de; + struct proc_dir_entry *de = PDE(file->f_dentry->d_inode); struct ctl_table *table; size_t res; - ssize_t error; - - de = PDE(file->f_dentry->d_inode); - if (!de || !de->data) - return -ENOTDIR; - table = (struct ctl_table *) de->data; - if (!table || !table->proc_handler) - return -ENOTDIR; - op = (write ? 002 : 004); - if (ctl_perm(table, op)) - return -EPERM; + ssize_t error = -ENOTDIR; - res = count; - - error = (*table->proc_handler) (table, write, file, buf, &res, ppos); - if (error) - return error; - return res; + spin_lock(&sysctl_lock); + if (de && de->data && use_table(de->set)) { + /* + * at that point we know that sysctl was not unregistered + * and won't be until we finish + */ + spin_unlock(&sysctl_lock); + table = (struct ctl_table *) de->data; + if (!table || !table->proc_handler) + goto out; + error = -EPERM; + op = (write ? 002 : 004); + if (ctl_perm(table, op)) + goto out; + + /* careful: calling conventions are nasty here */ + res = count; + error = (*table->proc_handler)(table, write, file, + buf, &res, ppos); + if (!error) + error = res; + out: + spin_lock(&sysctl_lock); + unuse_table(de->set); + } + spin_unlock(&sysctl_lock); + return error; } static int proc_opensys(struct inode *inode, struct file *file) |