From a52f5c5620673c292cb159205bf0e1eb5af1985b Mon Sep 17 00:00:00 2001 From: Dmitri Vorobiev Date: Fri, 1 May 2009 13:10:21 -0700 Subject: clockevents: tick_broadcast_device can become static The variable tick_broadcast_device is not used outside of the file where it is defined, so let's make it static. Signed-off-by: Dmitri Vorobiev Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner --- kernel/time/tick-broadcast.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 118a3b3b3f9a..877dbedc3118 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -27,7 +27,7 @@ * timer stops in C3 state. */ -struct tick_device tick_broadcast_device; +static struct tick_device tick_broadcast_device; /* FIXME: Use cpumask_var_t. */ static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); static DECLARE_BITMAP(tmpmask, NR_CPUS); -- cgit v1.2.3 From a04198887658e1d8ae25f5420035c057cb170e67 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Fri, 1 May 2009 13:10:23 -0700 Subject: timers: allow deferrable timers for intervals tv2-tv5 to be deferred In the current kernel implementation only kernel timers for time interval tv1 are being deferred. This patch allows any timer that is configured as deferrable to be defer regardless of time interval. This patch was previously discussed in http://marc.info/?l=linux-kernel&m=123196343531966&w=2 and was acked by Venki Pallipadi, the author of the original deferrable timer patch. Signed-off-by: Jon Hunter Acked-by: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner --- kernel/timer.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index cffffad01c31..5c1e84beaf4a 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1015,6 +1015,9 @@ cascade: index = slot = timer_jiffies & TVN_MASK; do { list_for_each_entry(nte, varp->vec + slot, entry) { + if (tbase_get_deferrable(nte->base)) + continue; + found = 1; if (time_before(nte->expires, expires)) expires = nte->expires; -- cgit v1.2.3 From c81fc2c331b8514ad112054cd2d87e6ec132286b Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 1 May 2009 14:52:47 +0900 Subject: clockevent: export register_device and delta2ns Export the following symbols using EXPORT_SYMBOL_GPL: - clockevent_delta2ns - clockevents_register_device This allows us to build SuperH clockevent and clocksource drivers as modules, see drivers/clocksource/sh_*.c [ Impact: allow modular build of clockevent drivers ] Signed-off-by: Magnus Damm LKML-Reference: <20090501055247.8286.64067.sendpatchset@rx1.opensource.se> Signed-off-by: Thomas Gleixner --- kernel/time/clockevents.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index d13be216a790..3948fa644a2d 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -54,6 +54,7 @@ unsigned long clockevent_delta2ns(unsigned long latch, return (unsigned long) clc; } +EXPORT_SYMBOL_GPL(clockevent_delta2ns); /** * clockevents_set_mode - set the operating mode of a clock event device @@ -187,6 +188,7 @@ void clockevents_register_device(struct clock_event_device *dev) spin_unlock(&clockevents_lock); } +EXPORT_SYMBOL_GPL(clockevents_register_device); /* * Noop handler when we shut down an event device -- cgit v1.2.3 From 597d0275736dad9c3bda6f0a00a1c477dc0f37b1 Mon Sep 17 00:00:00 2001 From: Arun R Bharadwaj Date: Thu, 16 Apr 2009 12:13:26 +0530 Subject: timers: Framework for identifying pinned timers * Arun R Bharadwaj [2009-04-16 12:11:36]: This patch creates a new framework for identifying cpu-pinned timers and hrtimers. This framework is needed because pinned timers are expected to fire on the same CPU on which they are queued. So it is essential to identify these and not migrate them, in case there are any. For regular timers, the currently existing add_timer_on() can be used queue pinned timers and subsequently mod_timer_pinned() can be used to modify the 'expires' field. For hrtimers, new modes HRTIMER_ABS_PINNED and HRTIMER_REL_PINNED are added to queue cpu-pinned hrtimer. [ tglx: use .._PINNED mode argument instead of creating tons of new functions ] Signed-off-by: Arun R Bharadwaj Signed-off-by: Thomas Gleixner --- include/linux/hrtimer.h | 7 +++++-- include/linux/timer.h | 3 +++ kernel/hrtimer.c | 7 ++++--- kernel/timer.c | 31 +++++++++++++++++++++++++++---- 4 files changed, 39 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 0d2f7c8a33d6..7400900de94a 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -30,8 +30,11 @@ struct hrtimer_cpu_base; * Mode arguments of xxx_hrtimer functions: */ enum hrtimer_mode { - HRTIMER_MODE_ABS, /* Time value is absolute */ - HRTIMER_MODE_REL, /* Time value is relative to now */ + HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */ + HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */ + HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */ + HRTIMER_MODE_ABS_PINNED = 0x02, + HRTIMER_MODE_REL_PINNED = 0x03, }; /* diff --git a/include/linux/timer.h b/include/linux/timer.h index 6cdb6f3331f1..ccf882eed8f8 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -163,7 +163,10 @@ extern void add_timer_on(struct timer_list *timer, int cpu); extern int del_timer(struct timer_list * timer); extern int mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); +extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); +#define TIMER_NOT_PINNED 0 +#define TIMER_PINNED 1 /* * The jiffies value which is added to now, when there is no timer * in the timer wheel: diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index cb8a15c19583..c71bcd549241 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -193,7 +193,8 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, * Switch the timer base to the current CPU when possible. */ static inline struct hrtimer_clock_base * -switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base) +switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, + int pinned) { struct hrtimer_clock_base *new_base; struct hrtimer_cpu_base *new_cpu_base; @@ -907,9 +908,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, ret = remove_hrtimer(timer, base); /* Switch the timer base, if necessary: */ - new_base = switch_hrtimer_base(timer, base); + new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); - if (mode == HRTIMER_MODE_REL) { + if (mode & HRTIMER_MODE_REL) { tim = ktime_add_safe(tim, new_base->get_time()); /* * CONFIG_TIME_LOW_RES is a temporary way for architectures diff --git a/kernel/timer.c b/kernel/timer.c index 5c1e84beaf4a..3424dfd11d50 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -604,7 +604,8 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer, } static inline int -__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) +__mod_timer(struct timer_list *timer, unsigned long expires, + bool pending_only, int pinned) { struct tvec_base *base, *new_base; unsigned long flags; @@ -668,7 +669,7 @@ out_unlock: */ int mod_timer_pending(struct timer_list *timer, unsigned long expires) { - return __mod_timer(timer, expires, true); + return __mod_timer(timer, expires, true, TIMER_NOT_PINNED); } EXPORT_SYMBOL(mod_timer_pending); @@ -702,10 +703,32 @@ int mod_timer(struct timer_list *timer, unsigned long expires) if (timer->expires == expires && timer_pending(timer)) return 1; - return __mod_timer(timer, expires, false); + return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); } EXPORT_SYMBOL(mod_timer); +/** + * mod_timer_pinned - modify a timer's timeout + * @timer: the timer to be modified + * @expires: new timeout in jiffies + * + * mod_timer_pinned() is a way to update the expire field of an + * active timer (if the timer is inactive it will be activated) + * and not allow the timer to be migrated to a different CPU. + * + * mod_timer_pinned(timer, expires) is equivalent to: + * + * del_timer(timer); timer->expires = expires; add_timer(timer); + */ +int mod_timer_pinned(struct timer_list *timer, unsigned long expires) +{ + if (timer->expires == expires && timer_pending(timer)) + return 1; + + return __mod_timer(timer, expires, false, TIMER_PINNED); +} +EXPORT_SYMBOL(mod_timer_pinned); + /** * add_timer - start a timer * @timer: the timer to be added @@ -1356,7 +1379,7 @@ signed long __sched schedule_timeout(signed long timeout) expire = timeout + jiffies; setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); - __mod_timer(&timer, expire, false); + __mod_timer(&timer, expire, false, TIMER_NOT_PINNED); schedule(); del_singleshot_timer_sync(&timer); -- cgit v1.2.3 From 5c333864a6ba811052d52ef14fbed056b9ac3512 Mon Sep 17 00:00:00 2001 From: Arun R Bharadwaj Date: Thu, 16 Apr 2009 12:14:37 +0530 Subject: timers: Identifying the existing pinned timers * Arun R Bharadwaj [2009-04-16 12:11:36]: The following pinned hrtimers have been identified and marked: 1)sched_rt_period_timer 2)tick_sched_timer 3)stack_trace_timer_fn [ tglx: fixup the hrtimer pinned mode ] Signed-off-by: Arun R Bharadwaj Signed-off-by: Thomas Gleixner --- arch/x86/kernel/apic/x2apic_uv_x.c | 2 +- kernel/sched.c | 4 ++-- kernel/time/tick-sched.c | 7 ++++--- kernel/trace/trace_sysprof.c | 3 ++- 4 files changed, 9 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 2bda69352976..a9cad1b00d6b 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -463,7 +463,7 @@ static void uv_heartbeat(unsigned long ignored) uv_set_scir_bits(bits); /* enable next timer period */ - mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); + mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); } static void __cpuinit uv_heartbeat_enable(int cpu) diff --git a/kernel/sched.c b/kernel/sched.c index b902e587a3a0..9c5b4d3f97ab 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -244,7 +244,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) hard = hrtimer_get_expires(&rt_b->rt_period_timer); delta = ktime_to_ns(ktime_sub(hard, soft)); __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, - HRTIMER_MODE_ABS, 0); + HRTIMER_MODE_ABS_PINNED, 0); } spin_unlock(&rt_b->rt_runtime_lock); } @@ -1154,7 +1154,7 @@ static __init void init_hrtick(void) static void hrtick_start(struct rq *rq, u64 delay) { __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, - HRTIMER_MODE_REL, 0); + HRTIMER_MODE_REL_PINNED, 0); } static inline void init_hrtick(void) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index d3f1ef4d5cbe..2aff39c6f10c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -349,7 +349,7 @@ void tick_nohz_stop_sched_tick(int inidle) if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { hrtimer_start(&ts->sched_timer, expires, - HRTIMER_MODE_ABS); + HRTIMER_MODE_ABS_PINNED); /* Check, if the timer was already in the past */ if (hrtimer_active(&ts->sched_timer)) goto out; @@ -395,7 +395,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { hrtimer_start_expires(&ts->sched_timer, - HRTIMER_MODE_ABS); + HRTIMER_MODE_ABS_PINNED); /* Check, if the timer was already in the past */ if (hrtimer_active(&ts->sched_timer)) break; @@ -698,7 +698,8 @@ void tick_setup_sched_timer(void) for (;;) { hrtimer_forward(&ts->sched_timer, now, tick_period); - hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS); + hrtimer_start_expires(&ts->sched_timer, + HRTIMER_MODE_ABS_PINNED); /* Check, if the timer was already in the past */ if (hrtimer_active(&ts->sched_timer)) break; diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 91fd19c2149f..d180554bc935 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c @@ -203,7 +203,8 @@ static void start_stack_timer(void *unused) hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = stack_trace_timer_fn; - hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); + hrtimer_start(hrtimer, ns_to_ktime(sample_period), + HRTIMER_MODE_REL_PINNED); } static void start_stack_timers(void) -- cgit v1.2.3 From cd1bb94b4a0531e8211a3774f17de831f8285f76 Mon Sep 17 00:00:00 2001 From: Arun R Bharadwaj Date: Thu, 16 Apr 2009 12:15:34 +0530 Subject: timers: /proc/sys sysctl hook to enable timer migration * Arun R Bharadwaj [2009-04-16 12:11:36]: This patch creates the /proc/sys sysctl interface at /proc/sys/kernel/timer_migration Timer migration is enabled by default. To disable timer migration, when CONFIG_SCHED_DEBUG = y, echo 0 > /proc/sys/kernel/timer_migration Signed-off-by: Arun R Bharadwaj Signed-off-by: Thomas Gleixner --- include/linux/sched.h | 1 + kernel/sched.c | 2 ++ kernel/sysctl.c | 8 ++++++++ 3 files changed, 11 insertions(+) (limited to 'kernel') diff --git a/include/linux/sched.h b/include/linux/sched.h index b4c38bc8049c..618504010400 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1766,6 +1766,7 @@ extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_features; extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; +extern unsigned int sysctl_timer_migration; int sched_nr_latency_handler(struct ctl_table *table, int write, struct file *file, void __user *buffer, size_t *length, diff --git a/kernel/sched.c b/kernel/sched.c index 9c5b4d3f97ab..7f1dd56af863 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8731,6 +8731,8 @@ void __init sched_init_smp(void) } #endif /* CONFIG_SMP */ +const_debug unsigned int sysctl_timer_migration = 1; + int in_sched_functions(unsigned long addr) { return in_lock_functions(addr) || diff --git a/kernel/sysctl.c b/kernel/sysctl.c index e3d2c7dd59b9..b3ce58137303 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -324,6 +324,14 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "timer_migration", + .data = &sysctl_timer_migration, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, #endif { .ctl_name = CTL_UNNUMBERED, -- cgit v1.2.3 From eea08f32adb3f97553d49a4f79a119833036000a Mon Sep 17 00:00:00 2001 From: Arun R Bharadwaj Date: Thu, 16 Apr 2009 12:16:41 +0530 Subject: timers: Logic to move non pinned timers * Arun R Bharadwaj [2009-04-16 12:11:36]: This patch migrates all non pinned timers and hrtimers to the current idle load balancer, from all the idle CPUs. Timers firing on busy CPUs are not migrated. While migrating hrtimers, care should be taken to check if migrating a hrtimer would result in a latency or not. So we compare the expiry of the hrtimer with the next timer interrupt on the target cpu and migrate the hrtimer only if it expires *after* the next interrupt on the target cpu. So, added a clockevents_get_next_event() helper function to return the next_event on the target cpu's clock_event_device. [ tglx: cleanups and simplifications ] Signed-off-by: Arun R Bharadwaj Signed-off-by: Thomas Gleixner --- include/linux/clockchips.h | 9 ++++++++ include/linux/sched.h | 12 +++++++++++ kernel/hrtimer.c | 51 ++++++++++++++++++++++++++++++++++++++++++++-- kernel/sched.c | 5 +++++ kernel/time/clockevents.c | 12 +++++++++++ kernel/timer.c | 17 +++++++++++++--- 6 files changed, 101 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 3a1dbba4d3ae..20a100fe2b4f 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -143,3 +143,12 @@ extern void clockevents_notify(unsigned long reason, void *arg); #endif #endif + +#ifdef CONFIG_GENERIC_CLOCKEVENTS +extern ktime_t clockevents_get_next_event(int cpu); +#else +static inline ktime_t clockevents_get_next_event(int cpu) +{ + return (ktime_t) { .tv64 = KTIME_MAX }; +} +#endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 618504010400..311dec123974 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -257,6 +257,7 @@ extern void task_rq_unlock_wait(struct task_struct *p); extern cpumask_var_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern int select_nohz_load_balancer(int cpu); +extern int get_nohz_load_balancer(void); #else static inline int select_nohz_load_balancer(int cpu) { @@ -1772,6 +1773,17 @@ int sched_nr_latency_handler(struct ctl_table *table, int write, struct file *file, void __user *buffer, size_t *length, loff_t *ppos); #endif +#ifdef CONFIG_SCHED_DEBUG +static inline unsigned int get_sysctl_timer_migration(void) +{ + return sysctl_timer_migration; +} +#else +static inline unsigned int get_sysctl_timer_migration(void) +{ + return 1; +} +#endif extern unsigned int sysctl_sched_rt_period; extern int sysctl_sched_rt_runtime; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index c71bcd549241..b675a67c9ac3 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -43,6 +43,8 @@ #include #include #include +#include +#include #include @@ -198,8 +200,19 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, { struct hrtimer_clock_base *new_base; struct hrtimer_cpu_base *new_cpu_base; + int cpu, preferred_cpu = -1; + + cpu = smp_processor_id(); +#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) + if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) { + preferred_cpu = get_nohz_load_balancer(); + if (preferred_cpu >= 0) + cpu = preferred_cpu; + } +#endif - new_cpu_base = &__get_cpu_var(hrtimer_bases); +again: + new_cpu_base = &per_cpu(hrtimer_bases, cpu); new_base = &new_cpu_base->clock_base[base->index]; if (base != new_base) { @@ -219,6 +232,40 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, timer->base = NULL; spin_unlock(&base->cpu_base->lock); spin_lock(&new_base->cpu_base->lock); + + /* Optimized away for NOHZ=n SMP=n */ + if (cpu == preferred_cpu) { + /* Calculate clock monotonic expiry time */ +#ifdef CONFIG_HIGH_RES_TIMERS + ktime_t expires = ktime_sub(hrtimer_get_expires(timer), + new_base->offset); +#else + ktime_t expires = hrtimer_get_expires(timer); +#endif + + /* + * Get the next event on target cpu from the + * clock events layer. + * This covers the highres=off nohz=on case as well. + */ + ktime_t next = clockevents_get_next_event(cpu); + + ktime_t delta = ktime_sub(expires, next); + + /* + * We do not migrate the timer when it is expiring + * before the next event on the target cpu because + * we cannot reprogram the target cpu hardware and + * we would cause it to fire late. + */ + if (delta.tv64 < 0) { + cpu = smp_processor_id(); + spin_unlock(&new_base->cpu_base->lock); + spin_lock(&base->cpu_base->lock); + timer->base = base; + goto again; + } + } timer->base = new_base; } return new_base; @@ -236,7 +283,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) return base; } -# define switch_hrtimer_base(t, b) (b) +# define switch_hrtimer_base(t, b, p) (b) #endif /* !CONFIG_SMP */ diff --git a/kernel/sched.c b/kernel/sched.c index 7f1dd56af863..9fe3774a0fd3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4244,6 +4244,11 @@ static struct { .load_balancer = ATOMIC_INIT(-1), }; +int get_nohz_load_balancer(void) +{ + return atomic_read(&nohz.load_balancer); +} + /* * This routine will try to nominate the ilb (idle load balancing) * owner among the cpus whose ticks are stopped. ilb owner will do the idle diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index d13be216a790..ab20ded013bd 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -18,6 +18,7 @@ #include #include #include +#include /* The registered clock event devices */ static LIST_HEAD(clockevent_devices); @@ -251,4 +252,15 @@ void clockevents_notify(unsigned long reason, void *arg) spin_unlock(&clockevents_lock); } EXPORT_SYMBOL_GPL(clockevents_notify); + +ktime_t clockevents_get_next_event(int cpu) +{ + struct tick_device *td; + struct clock_event_device *dev; + + td = &per_cpu(tick_cpu_device, cpu); + dev = td->evtdev; + + return dev->next_event; +} #endif diff --git a/kernel/timer.c b/kernel/timer.c index 3424dfd11d50..3f841db5edf9 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -609,9 +610,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, { struct tvec_base *base, *new_base; unsigned long flags; - int ret; - - ret = 0; + int ret = 0 , cpu; timer_stats_timer_set_start_info(timer); BUG_ON(!timer->function); @@ -630,6 +629,18 @@ __mod_timer(struct timer_list *timer, unsigned long expires, new_base = __get_cpu_var(tvec_bases); + cpu = smp_processor_id(); + +#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) + if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) { + int preferred_cpu = get_nohz_load_balancer(); + + if (preferred_cpu >= 0) + cpu = preferred_cpu; + } +#endif + new_base = per_cpu(tvec_bases, cpu); + if (base != new_base) { /* * We are trying to schedule the timer on the local CPU. -- cgit v1.2.3 From a9862e0560866eadbc59b84867492004da436516 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 19 May 2009 22:49:07 +0200 Subject: Export add_timer_on for modules Needed in followon patch. Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin Signed-off-by: Hidetoshi Seto Signed-off-by: H. Peter Anvin --- kernel/timer.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index cffffad01c31..e2c47b82ac36 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -756,6 +756,7 @@ void add_timer_on(struct timer_list *timer, int cpu) wake_up_idle_cpu(cpu); spin_unlock_irqrestore(&base->lock, flags); } +EXPORT_SYMBOL_GPL(add_timer_on); /** * del_timer - deactive a timer. -- cgit v1.2.3 From ad6ccfad6f759a5d657dabe2071a8f2a503fcc84 Mon Sep 17 00:00:00 2001 From: Manish Katiyar Date: Tue, 12 May 2009 13:43:35 -0700 Subject: kernel/kallsyms.c: replace deprecated __initcall with device_initcall and fix whitespace Fix coding style whitespace issues and replace __initcall with device_initcall. Fixed multi-line comments as per coding style. Errors as reported by checkpatch.pl :- Before: total: 14 errors, 14 warnings, 487 lines checked After : total: 0 errors, 8 warnings, 507 lines checked Compile tested binary verified as :- Before: text data bss dec hex filename 2405 4 0 2409 969 kernel/kallsyms.o After : text data bss dec hex filename 2405 4 0 2409 969 kernel/kallsyms.o Signed-off-by: Manish Katiyar Signed-off-by: Andrew Morton Signed-off-by: Sam Ravnborg --- kernel/kallsyms.c | 134 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 78 insertions(+), 56 deletions(-) (limited to 'kernel') diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 374faf9bfdc7..3a29dbe7898e 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -30,12 +30,16 @@ #define all_var 0 #endif -/* These will be re-linked against their real values during the second link stage */ +/* + * These will be re-linked against their real values + * during the second link stage. + */ extern const unsigned long kallsyms_addresses[] __attribute__((weak)); extern const u8 kallsyms_names[] __attribute__((weak)); -/* tell the compiler that the count isn't in the small data section if the arch - * has one (eg: FRV) +/* + * Tell the compiler that the count isn't in the small data section if the arch + * has one (eg: FRV). */ extern const unsigned long kallsyms_num_syms __attribute__((weak, section(".rodata"))); @@ -75,31 +79,37 @@ static int is_ksym_addr(unsigned long addr) return is_kernel_text(addr) || is_kernel_inittext(addr); } -/* expand a compressed symbol data into the resulting uncompressed string, - given the offset to where the symbol is in the compressed stream */ +/* + * Expand a compressed symbol data into the resulting uncompressed string, + * given the offset to where the symbol is in the compressed stream. + */ static unsigned int kallsyms_expand_symbol(unsigned int off, char *result) { int len, skipped_first = 0; const u8 *tptr, *data; - /* get the compressed symbol length from the first symbol byte */ + /* Get the compressed symbol length from the first symbol byte. */ data = &kallsyms_names[off]; len = *data; data++; - /* update the offset to return the offset for the next symbol on - * the compressed stream */ + /* + * Update the offset to return the offset for the next symbol on + * the compressed stream. + */ off += len + 1; - /* for every byte on the compressed symbol data, copy the table - entry for that byte */ - while(len) { - tptr = &kallsyms_token_table[ kallsyms_token_index[*data] ]; + /* + * For every byte on the compressed symbol data, copy the table + * entry for that byte. + */ + while (len) { + tptr = &kallsyms_token_table[kallsyms_token_index[*data]]; data++; len--; while (*tptr) { - if(skipped_first) { + if (skipped_first) { *result = *tptr; result++; } else @@ -110,36 +120,46 @@ static unsigned int kallsyms_expand_symbol(unsigned int off, char *result) *result = '\0'; - /* return to offset to the next symbol */ + /* Return to offset to the next symbol. */ return off; } -/* get symbol type information. This is encoded as a single char at the - * begining of the symbol name */ +/* + * Get symbol type information. This is encoded as a single char at the + * beginning of the symbol name. + */ static char kallsyms_get_symbol_type(unsigned int off) { - /* get just the first code, look it up in the token table, and return the - * first char from this token */ - return kallsyms_token_table[ kallsyms_token_index[ kallsyms_names[off+1] ] ]; + /* + * Get just the first code, look it up in the token table, + * and return the first char from this token. + */ + return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]]; } -/* find the offset on the compressed stream given and index in the - * kallsyms array */ +/* + * Find the offset on the compressed stream given and index in the + * kallsyms array. + */ static unsigned int get_symbol_offset(unsigned long pos) { const u8 *name; int i; - /* use the closest marker we have. We have markers every 256 positions, - * so that should be close enough */ - name = &kallsyms_names[ kallsyms_markers[pos>>8] ]; + /* + * Use the closest marker we have. We have markers every 256 positions, + * so that should be close enough. + */ + name = &kallsyms_names[kallsyms_markers[pos >> 8]]; - /* sequentially scan all the symbols up to the point we're searching for. - * Every symbol is stored in a [][ bytes of data] format, so we - * just need to add the len to the current pointer for every symbol we - * wish to skip */ - for(i = 0; i < (pos&0xFF); i++) + /* + * Sequentially scan all the symbols up to the point we're searching + * for. Every symbol is stored in a [][ bytes of data] format, + * so we just need to add the len to the current pointer for every + * symbol we wish to skip. + */ + for (i = 0; i < (pos & 0xFF); i++) name = name + (*name) + 1; return name - kallsyms_names; @@ -190,7 +210,7 @@ static unsigned long get_symbol_pos(unsigned long addr, /* This kernel should never had been booted. */ BUG_ON(!kallsyms_addresses); - /* do a binary search on the sorted kallsyms_addresses array */ + /* Do a binary search on the sorted kallsyms_addresses array. */ low = 0; high = kallsyms_num_syms; @@ -203,15 +223,15 @@ static unsigned long get_symbol_pos(unsigned long addr, } /* - * search for the first aliased symbol. Aliased - * symbols are symbols with the same address + * Search for the first aliased symbol. Aliased + * symbols are symbols with the same address. */ while (low && kallsyms_addresses[low-1] == kallsyms_addresses[low]) --low; symbol_start = kallsyms_addresses[low]; - /* Search for next non-aliased symbol */ + /* Search for next non-aliased symbol. */ for (i = low + 1; i < kallsyms_num_syms; i++) { if (kallsyms_addresses[i] > symbol_start) { symbol_end = kallsyms_addresses[i]; @@ -219,7 +239,7 @@ static unsigned long get_symbol_pos(unsigned long addr, } } - /* if we found no next symbol, we use the end of the section */ + /* If we found no next symbol, we use the end of the section. */ if (!symbol_end) { if (is_kernel_inittext(addr)) symbol_end = (unsigned long)_einittext; @@ -252,10 +272,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, /* * Lookup an address - * - modname is set to NULL if it's in the kernel - * - we guarantee that the returned name is valid until we reschedule even if - * it resides in a module - * - we also guarantee that modname will be valid until rescheduled + * - modname is set to NULL if it's in the kernel. + * - We guarantee that the returned name is valid until we reschedule even if. + * It resides in a module. + * - We also guarantee that modname will be valid until rescheduled. */ const char *kallsyms_lookup(unsigned long addr, unsigned long *symbolsize, @@ -276,7 +296,7 @@ const char *kallsyms_lookup(unsigned long addr, return namebuf; } - /* see if it's in a module */ + /* See if it's in a module. */ return module_address_lookup(addr, symbolsize, offset, modname, namebuf); } @@ -294,7 +314,7 @@ int lookup_symbol_name(unsigned long addr, char *symname) kallsyms_expand_symbol(get_symbol_offset(pos), symname); return 0; } - /* see if it's in a module */ + /* See if it's in a module. */ return lookup_module_symbol_name(addr, symname); } @@ -313,7 +333,7 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size, modname[0] = '\0'; return 0; } - /* see if it's in a module */ + /* See if it's in a module. */ return lookup_module_symbol_attrs(addr, size, offset, modname, name); } @@ -342,6 +362,7 @@ int sprint_symbol(char *buffer, unsigned long address) return len; } +EXPORT_SYMBOL_GPL(sprint_symbol); /* Look up a kernel symbol and print it to the kernel messages. */ void __print_symbol(const char *fmt, unsigned long address) @@ -352,13 +373,13 @@ void __print_symbol(const char *fmt, unsigned long address) printk(fmt, buffer); } +EXPORT_SYMBOL(__print_symbol); /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ -struct kallsym_iter -{ +struct kallsym_iter { loff_t pos; unsigned long value; - unsigned int nameoff; /* If iterating in core kernel symbols */ + unsigned int nameoff; /* If iterating in core kernel symbols. */ char type; char name[KSYM_NAME_LEN]; char module_name[MODULE_NAME_LEN]; @@ -404,7 +425,7 @@ static int update_iter(struct kallsym_iter *iter, loff_t pos) iter->pos = pos; return get_ksymbol_mod(iter); } - + /* If we're not on the desired position, reset to new position. */ if (pos != iter->pos) reset_iter(iter, pos); @@ -439,23 +460,25 @@ static int s_show(struct seq_file *m, void *p) { struct kallsym_iter *iter = m->private; - /* Some debugging symbols have no name. Ignore them. */ + /* Some debugging symbols have no name. Ignore them. */ if (!iter->name[0]) return 0; if (iter->module_name[0]) { char type; - /* Label it "global" if it is exported, - * "local" if not exported. */ + /* + * Label it "global" if it is exported, + * "local" if not exported. + */ type = iter->exported ? toupper(iter->type) : tolower(iter->type); seq_printf(m, "%0*lx %c %s\t[%s]\n", - (int)(2*sizeof(void*)), + (int)(2 * sizeof(void *)), iter->value, type, iter->name, iter->module_name); } else seq_printf(m, "%0*lx %c %s\n", - (int)(2*sizeof(void*)), + (int)(2 * sizeof(void *)), iter->value, iter->type, iter->name); return 0; } @@ -469,9 +492,11 @@ static const struct seq_operations kallsyms_op = { static int kallsyms_open(struct inode *inode, struct file *file) { - /* We keep iterator in m->private, since normal case is to + /* + * We keep iterator in m->private, since normal case is to * s_start from where we left off, so we avoid doing - * using get_symbol_offset for every symbol */ + * using get_symbol_offset for every symbol. + */ struct kallsym_iter *iter; int ret; @@ -500,7 +525,4 @@ static int __init kallsyms_init(void) proc_create("kallsyms", 0444, NULL, &kallsyms_operations); return 0; } -__initcall(kallsyms_init); - -EXPORT_SYMBOL(__print_symbol); -EXPORT_SYMBOL_GPL(sprint_symbol); +device_initcall(kallsyms_init); -- cgit v1.2.3 From 3f68535adad8dd89499505a65fb25d0e02d118cc Mon Sep 17 00:00:00 2001 From: john stultz Date: Wed, 21 Jan 2009 22:53:22 -0700 Subject: clocksource: sanity check sysfs clocksource changes Thomas, Andrew and Ingo pointed out that we don't have any safety checks in the clocksource sysfs entries to make sure sysadmins don't try to change the clocksource to a non high-res timer capable clocksource (such as jiffies) when high-res timers (HRT) is enabled. Doing so will likely hang a system. Correct this by filtering non HRT clocksources from available_clocksources and not accepting non HRT clocksources with HRT enabled. Signed-off-by: John Stultz Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner --- include/linux/hrtimer.h | 2 +- kernel/hrtimer.c | 4 ++-- kernel/time/clocksource.c | 18 +++++++++++++++++- 3 files changed, 20 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 0d2f7c8a33d6..58021b0c396d 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -305,7 +305,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) extern ktime_t ktime_get(void); extern ktime_t ktime_get_real(void); - +extern int hrtimer_hres_active(void); DECLARE_PER_CPU(struct tick_device, tick_cpu_device); diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index cb8a15c19583..1a70c18cdffe 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -476,7 +476,7 @@ static inline int hrtimer_is_hres_enabled(void) /* * Is the high resolution mode active ? */ -static inline int hrtimer_hres_active(void) +int hrtimer_hres_active(void) { return __get_cpu_var(hrtimer_bases).hres_active; } @@ -704,7 +704,7 @@ static int hrtimer_switch_to_hres(void) #else -static inline int hrtimer_hres_active(void) { return 0; } +int hrtimer_hres_active(void) { return 0; } static inline int hrtimer_is_hres_enabled(void) { return 0; } static inline int hrtimer_switch_to_hres(void) { return 0; } static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 80189f6f1c5a..18b9f5da4ee9 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -30,6 +30,7 @@ #include #include /* for spin_unlock_irq() using preempt_count() m68k */ #include +#include void timecounter_init(struct timecounter *tc, const struct cyclecounter *cc, @@ -509,6 +510,18 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, } } + /* + * Check to make sure we don't switch to a non-HRT usable + * clocksource if HRT is enabled and running + */ + if (hrtimer_hres_active() && + !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) { + printk(KERN_WARNING "%s clocksource is not HRT compatible. " + "Cannot switch while in HRT mode\n", ovr->name); + ovr = NULL; + override_name[0] = 0; + } + /* Reselect, when the override name has changed */ if (ovr != clocksource_override) { clocksource_override = ovr; @@ -537,7 +550,10 @@ sysfs_show_available_clocksources(struct sys_device *dev, spin_lock_irq(&clocksource_lock); list_for_each_entry(src, &clocksource_list, list) { - count += snprintf(buf + count, + /* Don't show non-HRES clocksource if HRES is enabled */ + if (!hrtimer_hres_active() || + (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) + count += snprintf(buf + count, max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "%s ", src->name); } -- cgit v1.2.3 From 1dc492a0a4470852cb451db1e00d580ce9fd7a28 Mon Sep 17 00:00:00 2001 From: Manish Katiyar Date: Sun, 22 Feb 2009 10:24:27 +0530 Subject: trivial: kernel/power/poweroff.c: whitespace fix Fix coding style whitespace fixes. Patch compile tested Before :- total: 1 errors, 0 warnings, 46 lines checked After total: 0 errors, 0 warnings, 46 lines checked Before :- text data bss dec hex filename 107 48 0 155 9b kernel/power/poweroff.o After text data bss dec hex filename 107 48 0 155 9b kernel/power/poweroff.o Signed-off-by: Manish Katiyar Signed-off-by: Jiri Kosina --- kernel/power/poweroff.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index 97890831e1b5..e8b337006276 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c @@ -34,7 +34,7 @@ static struct sysrq_key_op sysrq_poweroff_op = { .handler = handle_poweroff, .help_msg = "powerOff", .action_msg = "Power Off", - .enable_mask = SYSRQ_ENABLE_BOOT, + .enable_mask = SYSRQ_ENABLE_BOOT, }; static int pm_sysrq_init(void) -- cgit v1.2.3 From 3ac49a1c9928b4a242b3cb1d83bc1d5c9b8fcb50 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 4 Jun 2009 16:20:28 +0200 Subject: trivial: fix ETIMEOUT -> ETIMEDOUT typos fix ETIMEOUT -> ETIMEDOUT typos Signed-off-by: Jean Delvare Signed-off-by: Jiri Kosina --- arch/arm/mach-sa1100/jornada720_ssp.c | 4 ++-- drivers/net/qlge/qlge_main.c | 2 +- drivers/net/usb/usbnet.c | 2 +- drivers/staging/wlan-ng/hfa384x_usb.c | 2 +- kernel/rtmutex.c | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c index 28cf36967977..506a5e5a9ad5 100644 --- a/arch/arm/mach-sa1100/jornada720_ssp.c +++ b/arch/arm/mach-sa1100/jornada720_ssp.c @@ -54,7 +54,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse); * timeout after rounds. Needs mcu running before its called. * * returns : %mcu output on success - * : %-ETIMEOUT on timeout + * : %-ETIMEDOUT on timeout */ int jornada_ssp_byte(u8 byte) { @@ -82,7 +82,7 @@ EXPORT_SYMBOL(jornada_ssp_byte); * jornada_ssp_inout - decide if input is command or trading byte * * returns : (jornada_ssp_byte(byte)) on success - * : %-ETIMEOUT on timeout failure + * : %-ETIMEDOUT on timeout failure */ int jornada_ssp_inout(u8 byte) { diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index c92ced247947..1fd5ecb24425 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -3174,7 +3174,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev) if (value & RST_FO_FR) { QPRINTK(qdev, IFDOWN, ERR, - "ETIMEOUT!!! errored out of resetting the chip!\n"); + "ETIMEDOUT!!! errored out of resetting the chip!\n"); status = -ETIMEDOUT; } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index f3a2fce6166c..47f68cfa7e21 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -398,7 +398,7 @@ static void rx_complete (struct urb *urb) /* stalls need manual reset. this is rare ... except that * when going through USB 2.0 TTs, unplug appears this way. - * we avoid the highspeed version of the ETIMEOUT/EILSEQ + * we avoid the highspeed version of the ETIMEDOUT/EILSEQ * storm, recovering as needed. */ case -EPIPE: diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 888198c9a106..824e65bdc433 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -2424,7 +2424,7 @@ int hfa384x_drvr_ramdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len) * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error -* -ETIMEOUT timout waiting for the cmd regs to become +* -ETIMEDOUT timout waiting for the cmd regs to become * available, or waiting for the control reg * to indicate the Aux port is enabled. * -ENODATA the buffer does NOT contain a valid PDA. diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 820c5af44f3e..fcd107a78c5a 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -902,7 +902,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); * Returns: * 0 on success * -EINTR when interrupted by a signal - * -ETIMEOUT when the timeout expired + * -ETIMEDOUT when the timeout expired * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ int -- cgit v1.2.3 From 7c692cbade8b8884f1c20500393bcc7cd6d24ef8 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Wed, 21 May 2008 22:53:13 +0200 Subject: tasklets: new tasklet scheduling function Rationale: kmemcheck needs to be able to schedule a tasklet without touching any dynamically allocated memory _at_ _all_ (since that would lead to a recursive page fault). This tasklet is used for writing the error reports to the kernel log. The new scheduling function avoids touching any other tasklets by inserting the new tasklist as the head of the "tasklet_hi" list instead of on the tail. Also don't wake up the softirq thread lest the scheduler access some tracked memory and we go down with a recursive page fault. In this case, we'd better just wait for the maximum time of 1/HZ for the message to appear. Signed-off-by: Vegard Nossum --- include/linux/interrupt.h | 14 ++++++++++++++ kernel/softirq.c | 11 +++++++++++ 2 files changed, 25 insertions(+) (limited to 'kernel') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index ff374ceface0..dd574d51bcaa 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -466,6 +466,20 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t) __tasklet_hi_schedule(t); } +extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); + +/* + * This version avoids touching any other tasklets. Needed for kmemcheck + * in order not to take any page faults while enqueueing this tasklet; + * consider VERY carefully whether you really need this or + * tasklet_hi_schedule()... + */ +static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_hi_schedule_first(t); +} + static inline void tasklet_disable_nosync(struct tasklet_struct *t) { diff --git a/kernel/softirq.c b/kernel/softirq.c index 258885a543db..b41fb710e114 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -382,6 +382,17 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) EXPORT_SYMBOL(__tasklet_hi_schedule); +void __tasklet_hi_schedule_first(struct tasklet_struct *t) +{ + BUG_ON(!irqs_disabled()); + + t->next = __get_cpu_var(tasklet_hi_vec).head; + __get_cpu_var(tasklet_hi_vec).head = t; + __raise_softirq_irqoff(HI_SOFTIRQ); +} + +EXPORT_SYMBOL(__tasklet_hi_schedule_first); + static void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; -- cgit v1.2.3 From cd6d95d8449b7c9f415f26041e9ae173d387b6bd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 12 Jun 2009 11:29:27 +0200 Subject: clocksource: prevent selection of low resolution clocksourse also for nohz=on commit 3f68535adad (clocksource: sanity check sysfs clocksource changes) prevents selection of non high resolution capable clocksources when high resolution mode is active, but did not take into account that the same rules apply for highres=off nohz=on. Check the tick device mode instead of hrtimer_hres_active() to verify whether the system needs to be protected from a switch to jiffies or other non highres capable clock sources. Reported-by: Luming Yu Signed-off-by: Thomas Gleixner --- include/linux/hrtimer.h | 2 +- include/linux/tick.h | 3 +++ kernel/hrtimer.c | 4 ++-- kernel/time/clocksource.c | 18 ++++++++++-------- kernel/time/tick-oneshot.c | 17 +++++++++++++++++ 5 files changed, 33 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 58021b0c396d..0d2f7c8a33d6 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -305,7 +305,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) extern ktime_t ktime_get(void); extern ktime_t ktime_get_real(void); -extern int hrtimer_hres_active(void); + DECLARE_PER_CPU(struct tick_device, tick_cpu_device); diff --git a/include/linux/tick.h b/include/linux/tick.h index 469b82d88b3b..0482229c07db 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -97,10 +97,12 @@ extern void tick_clock_notify(void); extern int tick_check_oneshot_change(int allow_nohz); extern struct tick_sched *tick_get_tick_sched(int cpu); extern void tick_check_idle(int cpu); +extern int tick_oneshot_mode_active(void); # else static inline void tick_clock_notify(void) { } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } static inline void tick_check_idle(int cpu) { } +static inline int tick_oneshot_mode_active(void) { return 0; } # endif #else /* CONFIG_GENERIC_CLOCKEVENTS */ @@ -109,6 +111,7 @@ static inline void tick_cancel_sched_timer(int cpu) { } static inline void tick_clock_notify(void) { } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } static inline void tick_check_idle(int cpu) { } +static inline int tick_oneshot_mode_active(void) { return 0; } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ # ifdef CONFIG_NO_HZ diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 1a70c18cdffe..cb8a15c19583 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -476,7 +476,7 @@ static inline int hrtimer_is_hres_enabled(void) /* * Is the high resolution mode active ? */ -int hrtimer_hres_active(void) +static inline int hrtimer_hres_active(void) { return __get_cpu_var(hrtimer_bases).hres_active; } @@ -704,7 +704,7 @@ static int hrtimer_switch_to_hres(void) #else -int hrtimer_hres_active(void) { return 0; } +static inline int hrtimer_hres_active(void) { return 0; } static inline int hrtimer_is_hres_enabled(void) { return 0; } static inline int hrtimer_switch_to_hres(void) { return 0; } static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 18b9f5da4ee9..592bf584d1d2 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -30,7 +30,6 @@ #include #include /* for spin_unlock_irq() using preempt_count() m68k */ #include -#include void timecounter_init(struct timecounter *tc, const struct cyclecounter *cc, @@ -511,13 +510,13 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, } /* - * Check to make sure we don't switch to a non-HRT usable - * clocksource if HRT is enabled and running + * Check to make sure we don't switch to a non-highres capable + * clocksource if the tick code is in oneshot mode (highres or nohz) */ - if (hrtimer_hres_active() && + if (tick_oneshot_mode_active() && !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) { printk(KERN_WARNING "%s clocksource is not HRT compatible. " - "Cannot switch while in HRT mode\n", ovr->name); + "Cannot switch while in HRT/NOHZ mode\n", ovr->name); ovr = NULL; override_name[0] = 0; } @@ -550,9 +549,12 @@ sysfs_show_available_clocksources(struct sys_device *dev, spin_lock_irq(&clocksource_lock); list_for_each_entry(src, &clocksource_list, list) { - /* Don't show non-HRES clocksource if HRES is enabled */ - if (!hrtimer_hres_active() || - (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) + /* + * Don't show non-HRES clocksource if the tick code is + * in one shot mode (highres=on or nohz=on) + */ + if (!tick_oneshot_mode_active() || + (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) count += snprintf(buf + count, max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "%s ", src->name); diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 2e8de678e767..a96c0e2b89cf 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -128,6 +128,23 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) return 0; } +/** + * tick_check_oneshot_mode - check whether the system is in oneshot mode + * + * returns 1 when either nohz or highres are enabled. otherwise 0. + */ +int tick_oneshot_mode_active(void) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT; + local_irq_restore(flags); + + return ret; +} + #ifdef CONFIG_HIGH_RES_TIMERS /** * tick_init_highres - switch to high resolution mode -- cgit v1.2.3 From dfec072ecd35ba6ecad2d51dde325253ac9a2936 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 4 Apr 2008 00:51:41 +0200 Subject: kmemcheck: add the kmemcheck core General description: kmemcheck is a patch to the linux kernel that detects use of uninitialized memory. It does this by trapping every read and write to memory that was allocated dynamically (e.g. using kmalloc()). If a memory address is read that has not previously been written to, a message is printed to the kernel log. Thanks to Andi Kleen for the set_memory_4k() solution. Andrew Morton suggested documenting the shadow member of struct page. Signed-off-by: Vegard Nossum Signed-off-by: Pekka Enberg [export kmemcheck_mark_initialized] [build fix for setup_max_cpus] Signed-off-by: Ingo Molnar [rebased for mainline inclusion] Signed-off-by: Vegard Nossum --- arch/x86/Makefile | 5 + arch/x86/include/asm/kmemcheck.h | 42 +++ arch/x86/include/asm/pgtable.h | 9 + arch/x86/include/asm/pgtable_types.h | 4 +- arch/x86/mm/Makefile | 2 + arch/x86/mm/kmemcheck/Makefile | 1 + arch/x86/mm/kmemcheck/error.c | 229 ++++++++++++ arch/x86/mm/kmemcheck/error.h | 15 + arch/x86/mm/kmemcheck/kmemcheck.c | 650 +++++++++++++++++++++++++++++++++++ arch/x86/mm/kmemcheck/opcode.c | 101 ++++++ arch/x86/mm/kmemcheck/opcode.h | 9 + arch/x86/mm/kmemcheck/pte.c | 22 ++ arch/x86/mm/kmemcheck/pte.h | 10 + arch/x86/mm/kmemcheck/shadow.c | 153 +++++++++ arch/x86/mm/kmemcheck/shadow.h | 16 + include/linux/kmemcheck.h | 17 + include/linux/mm_types.h | 8 + init/main.c | 1 + kernel/sysctl.c | 12 + 19 files changed, 1304 insertions(+), 2 deletions(-) create mode 100644 arch/x86/include/asm/kmemcheck.h create mode 100644 arch/x86/mm/kmemcheck/Makefile create mode 100644 arch/x86/mm/kmemcheck/error.c create mode 100644 arch/x86/mm/kmemcheck/error.h create mode 100644 arch/x86/mm/kmemcheck/kmemcheck.c create mode 100644 arch/x86/mm/kmemcheck/opcode.c create mode 100644 arch/x86/mm/kmemcheck/opcode.h create mode 100644 arch/x86/mm/kmemcheck/pte.c create mode 100644 arch/x86/mm/kmemcheck/pte.h create mode 100644 arch/x86/mm/kmemcheck/shadow.c create mode 100644 arch/x86/mm/kmemcheck/shadow.h create mode 100644 include/linux/kmemcheck.h (limited to 'kernel') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index edbd0ca62067..1b68659c41b4 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -81,6 +81,11 @@ ifdef CONFIG_CC_STACKPROTECTOR endif endif +# Don't unroll struct assignments with kmemcheck enabled +ifeq ($(CONFIG_KMEMCHECK),y) + KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) +endif + # Stackpointer is addressed different for 32 bit and 64 bit x86 sp-$(CONFIG_X86_32) := esp sp-$(CONFIG_X86_64) := rsp diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h new file mode 100644 index 000000000000..ed01518f297e --- /dev/null +++ b/arch/x86/include/asm/kmemcheck.h @@ -0,0 +1,42 @@ +#ifndef ASM_X86_KMEMCHECK_H +#define ASM_X86_KMEMCHECK_H + +#include +#include + +#ifdef CONFIG_KMEMCHECK +bool kmemcheck_active(struct pt_regs *regs); + +void kmemcheck_show(struct pt_regs *regs); +void kmemcheck_hide(struct pt_regs *regs); + +bool kmemcheck_fault(struct pt_regs *regs, + unsigned long address, unsigned long error_code); +bool kmemcheck_trap(struct pt_regs *regs); +#else +static inline bool kmemcheck_active(struct pt_regs *regs) +{ + return false; +} + +static inline void kmemcheck_show(struct pt_regs *regs) +{ +} + +static inline void kmemcheck_hide(struct pt_regs *regs) +{ +} + +static inline bool kmemcheck_fault(struct pt_regs *regs, + unsigned long address, unsigned long error_code) +{ + return false; +} + +static inline bool kmemcheck_trap(struct pt_regs *regs) +{ + return false; +} +#endif /* CONFIG_KMEMCHECK */ + +#endif diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 18ef7ebf2631..c5a08079ad5e 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -317,6 +317,15 @@ static inline int pte_present(pte_t a) return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); } +static inline int pte_hidden(pte_t x) +{ +#ifdef CONFIG_KMEMCHECK + return pte_flags(x) & _PAGE_HIDDEN; +#else + return 0; +#endif +} + static inline int pmd_present(pmd_t pmd) { return pmd_flags(pmd) & _PAGE_PRESENT; diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 4d258ad76a0f..9b5c92140aab 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -18,7 +18,7 @@ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ -#define _PAGE_BIT_UNUSED3 11 +#define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 @@ -41,7 +41,7 @@ #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) -#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) +#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index fdd30d08ab52..eefdeee8a871 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -10,6 +10,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o obj-$(CONFIG_HIGHMEM) += highmem_32.o +obj-$(CONFIG_KMEMCHECK) += kmemcheck/ + obj-$(CONFIG_MMIOTRACE) += mmiotrace.o mmiotrace-y := kmmio.o pf_in.o mmio-mod.o obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile new file mode 100644 index 000000000000..4666b7a778be --- /dev/null +++ b/arch/x86/mm/kmemcheck/Makefile @@ -0,0 +1 @@ +obj-y := error.o kmemcheck.o opcode.o pte.o shadow.o diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c new file mode 100644 index 000000000000..5ec9f5a93f47 --- /dev/null +++ b/arch/x86/mm/kmemcheck/error.c @@ -0,0 +1,229 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "error.h" +#include "shadow.h" + +enum kmemcheck_error_type { + KMEMCHECK_ERROR_INVALID_ACCESS, + KMEMCHECK_ERROR_BUG, +}; + +#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT) + +struct kmemcheck_error { + enum kmemcheck_error_type type; + + union { + /* KMEMCHECK_ERROR_INVALID_ACCESS */ + struct { + /* Kind of access that caused the error */ + enum kmemcheck_shadow state; + /* Address and size of the erroneous read */ + unsigned long address; + unsigned int size; + }; + }; + + struct pt_regs regs; + struct stack_trace trace; + unsigned long trace_entries[32]; + + /* We compress it to a char. */ + unsigned char shadow_copy[SHADOW_COPY_SIZE]; + unsigned char memory_copy[SHADOW_COPY_SIZE]; +}; + +/* + * Create a ring queue of errors to output. We can't call printk() directly + * from the kmemcheck traps, since this may call the console drivers and + * result in a recursive fault. + */ +static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE]; +static unsigned int error_count; +static unsigned int error_rd; +static unsigned int error_wr; +static unsigned int error_missed_count; + +static struct kmemcheck_error *error_next_wr(void) +{ + struct kmemcheck_error *e; + + if (error_count == ARRAY_SIZE(error_fifo)) { + ++error_missed_count; + return NULL; + } + + e = &error_fifo[error_wr]; + if (++error_wr == ARRAY_SIZE(error_fifo)) + error_wr = 0; + ++error_count; + return e; +} + +static struct kmemcheck_error *error_next_rd(void) +{ + struct kmemcheck_error *e; + + if (error_count == 0) + return NULL; + + e = &error_fifo[error_rd]; + if (++error_rd == ARRAY_SIZE(error_fifo)) + error_rd = 0; + --error_count; + return e; +} + +static void do_wakeup(unsigned long); +static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0); + +/* + * Save the context of an error report. + */ +void kmemcheck_error_save(enum kmemcheck_shadow state, + unsigned long address, unsigned int size, struct pt_regs *regs) +{ + static unsigned long prev_ip; + + struct kmemcheck_error *e; + void *shadow_copy; + void *memory_copy; + + /* Don't report several adjacent errors from the same EIP. */ + if (regs->ip == prev_ip) + return; + prev_ip = regs->ip; + + e = error_next_wr(); + if (!e) + return; + + e->type = KMEMCHECK_ERROR_INVALID_ACCESS; + + e->state = state; + e->address = address; + e->size = size; + + /* Save regs */ + memcpy(&e->regs, regs, sizeof(*regs)); + + /* Save stack trace */ + e->trace.nr_entries = 0; + e->trace.entries = e->trace_entries; + e->trace.max_entries = ARRAY_SIZE(e->trace_entries); + e->trace.skip = 0; + save_stack_trace_bp(&e->trace, regs->bp); + + /* Round address down to nearest 16 bytes */ + shadow_copy = kmemcheck_shadow_lookup(address + & ~(SHADOW_COPY_SIZE - 1)); + BUG_ON(!shadow_copy); + + memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE); + + kmemcheck_show_addr(address); + memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1)); + memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE); + kmemcheck_hide_addr(address); + + tasklet_hi_schedule_first(&kmemcheck_tasklet); +} + +/* + * Save the context of a kmemcheck bug. + */ +void kmemcheck_error_save_bug(struct pt_regs *regs) +{ + struct kmemcheck_error *e; + + e = error_next_wr(); + if (!e) + return; + + e->type = KMEMCHECK_ERROR_BUG; + + memcpy(&e->regs, regs, sizeof(*regs)); + + e->trace.nr_entries = 0; + e->trace.entries = e->trace_entries; + e->trace.max_entries = ARRAY_SIZE(e->trace_entries); + e->trace.skip = 1; + save_stack_trace(&e->trace); + + tasklet_hi_schedule_first(&kmemcheck_tasklet); +} + +void kmemcheck_error_recall(void) +{ + static const char *desc[] = { + [KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated", + [KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized", + [KMEMCHECK_SHADOW_INITIALIZED] = "initialized", + [KMEMCHECK_SHADOW_FREED] = "freed", + }; + + static const char short_desc[] = { + [KMEMCHECK_SHADOW_UNALLOCATED] = 'a', + [KMEMCHECK_SHADOW_UNINITIALIZED] = 'u', + [KMEMCHECK_SHADOW_INITIALIZED] = 'i', + [KMEMCHECK_SHADOW_FREED] = 'f', + }; + + struct kmemcheck_error *e; + unsigned int i; + + e = error_next_rd(); + if (!e) + return; + + switch (e->type) { + case KMEMCHECK_ERROR_INVALID_ACCESS: + printk(KERN_ERR "WARNING: kmemcheck: Caught %d-bit read " + "from %s memory (%p)\n", + 8 * e->size, e->state < ARRAY_SIZE(desc) ? + desc[e->state] : "(invalid shadow state)", + (void *) e->address); + + printk(KERN_INFO); + for (i = 0; i < SHADOW_COPY_SIZE; ++i) + printk("%02x", e->memory_copy[i]); + printk("\n"); + + printk(KERN_INFO); + for (i = 0; i < SHADOW_COPY_SIZE; ++i) { + if (e->shadow_copy[i] < ARRAY_SIZE(short_desc)) + printk(" %c", short_desc[e->shadow_copy[i]]); + else + printk(" ?"); + } + printk("\n"); + printk(KERN_INFO "%*c\n", 2 + 2 + * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^'); + break; + case KMEMCHECK_ERROR_BUG: + printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n"); + break; + } + + __show_regs(&e->regs, 1); + print_stack_trace(&e->trace, 0); +} + +static void do_wakeup(unsigned long data) +{ + while (error_count > 0) + kmemcheck_error_recall(); + + if (error_missed_count > 0) { + printk(KERN_WARNING "kmemcheck: Lost %d error reports because " + "the queue was too small\n", error_missed_count); + error_missed_count = 0; + } +} diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h new file mode 100644 index 000000000000..0efc2e8d0a20 --- /dev/null +++ b/arch/x86/mm/kmemcheck/error.h @@ -0,0 +1,15 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H +#define ARCH__X86__MM__KMEMCHECK__ERROR_H + +#include + +#include "shadow.h" + +void kmemcheck_error_save(enum kmemcheck_shadow state, + unsigned long address, unsigned int size, struct pt_regs *regs); + +void kmemcheck_error_save_bug(struct pt_regs *regs); + +void kmemcheck_error_recall(void); + +#endif diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c new file mode 100644 index 000000000000..9de7d8f6b6e1 --- /dev/null +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -0,0 +1,650 @@ +/** + * kmemcheck - a heavyweight memory checker for the linux kernel + * Copyright (C) 2007, 2008 Vegard Nossum + * (With a lot of help from Ingo Molnar and Pekka Enberg.) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2) as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "error.h" +#include "opcode.h" +#include "pte.h" +#include "shadow.h" + +#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT +# define KMEMCHECK_ENABLED 0 +#endif + +#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT +# define KMEMCHECK_ENABLED 1 +#endif + +#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT +# define KMEMCHECK_ENABLED 2 +#endif + +int kmemcheck_enabled = KMEMCHECK_ENABLED; + +int __init kmemcheck_init(void) +{ + printk(KERN_INFO "kmemcheck: \"Bugs, beware!\"\n"); + +#ifdef CONFIG_SMP + /* + * Limit SMP to use a single CPU. We rely on the fact that this code + * runs before SMP is set up. + */ + if (setup_max_cpus > 1) { + printk(KERN_INFO + "kmemcheck: Limiting number of CPUs to 1.\n"); + setup_max_cpus = 1; + } +#endif + + return 0; +} + +early_initcall(kmemcheck_init); + +#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT +int kmemcheck_enabled = 0; +#endif + +#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT +int kmemcheck_enabled = 1; +#endif + +#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT +int kmemcheck_enabled = 2; +#endif + +/* + * We need to parse the kmemcheck= option before any memory is allocated. + */ +static int __init param_kmemcheck(char *str) +{ + if (!str) + return -EINVAL; + + sscanf(str, "%d", &kmemcheck_enabled); + return 0; +} + +early_param("kmemcheck", param_kmemcheck); + +int kmemcheck_show_addr(unsigned long address) +{ + pte_t *pte; + + pte = kmemcheck_pte_lookup(address); + if (!pte) + return 0; + + set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + __flush_tlb_one(address); + return 1; +} + +int kmemcheck_hide_addr(unsigned long address) +{ + pte_t *pte; + + pte = kmemcheck_pte_lookup(address); + if (!pte) + return 0; + + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + __flush_tlb_one(address); + return 1; +} + +struct kmemcheck_context { + bool busy; + int balance; + + /* + * There can be at most two memory operands to an instruction, but + * each address can cross a page boundary -- so we may need up to + * four addresses that must be hidden/revealed for each fault. + */ + unsigned long addr[4]; + unsigned long n_addrs; + unsigned long flags; + + /* Data size of the instruction that caused a fault. */ + unsigned int size; +}; + +static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); + +bool kmemcheck_active(struct pt_regs *regs) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + + return data->balance > 0; +} + +/* Save an address that needs to be shown/hidden */ +static void kmemcheck_save_addr(unsigned long addr) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + + BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); + data->addr[data->n_addrs++] = addr; +} + +static unsigned int kmemcheck_show_all(void) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + unsigned int i; + unsigned int n; + + n = 0; + for (i = 0; i < data->n_addrs; ++i) + n += kmemcheck_show_addr(data->addr[i]); + + return n; +} + +static unsigned int kmemcheck_hide_all(void) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + unsigned int i; + unsigned int n; + + n = 0; + for (i = 0; i < data->n_addrs; ++i) + n += kmemcheck_hide_addr(data->addr[i]); + + return n; +} + +/* + * Called from the #PF handler. + */ +void kmemcheck_show(struct pt_regs *regs) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + + BUG_ON(!irqs_disabled()); + + if (unlikely(data->balance != 0)) { + kmemcheck_show_all(); + kmemcheck_error_save_bug(regs); + data->balance = 0; + return; + } + + /* + * None of the addresses actually belonged to kmemcheck. Note that + * this is not an error. + */ + if (kmemcheck_show_all() == 0) + return; + + ++data->balance; + + /* + * The IF needs to be cleared as well, so that the faulting + * instruction can run "uninterrupted". Otherwise, we might take + * an interrupt and start executing that before we've had a chance + * to hide the page again. + * + * NOTE: In the rare case of multiple faults, we must not override + * the original flags: + */ + if (!(regs->flags & X86_EFLAGS_TF)) + data->flags = regs->flags; + + regs->flags |= X86_EFLAGS_TF; + regs->flags &= ~X86_EFLAGS_IF; +} + +/* + * Called from the #DB handler. + */ +void kmemcheck_hide(struct pt_regs *regs) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + int n; + + BUG_ON(!irqs_disabled()); + + if (data->balance == 0) + return; + + if (unlikely(data->balance != 1)) { + kmemcheck_show_all(); + kmemcheck_error_save_bug(regs); + data->n_addrs = 0; + data->balance = 0; + + if (!(data->flags & X86_EFLAGS_TF)) + regs->flags &= ~X86_EFLAGS_TF; + if (data->flags & X86_EFLAGS_IF) + regs->flags |= X86_EFLAGS_IF; + return; + } + + if (kmemcheck_enabled) + n = kmemcheck_hide_all(); + else + n = kmemcheck_show_all(); + + if (n == 0) + return; + + --data->balance; + + data->n_addrs = 0; + + if (!(data->flags & X86_EFLAGS_TF)) + regs->flags &= ~X86_EFLAGS_TF; + if (data->flags & X86_EFLAGS_IF) + regs->flags |= X86_EFLAGS_IF; +} + +void kmemcheck_show_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) { + unsigned long address; + pte_t *pte; + unsigned int level; + + address = (unsigned long) page_address(&p[i]); + pte = lookup_address(address, &level); + BUG_ON(!pte); + BUG_ON(level != PG_LEVEL_4K); + + set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN)); + __flush_tlb_one(address); + } +} + +bool kmemcheck_page_is_tracked(struct page *p) +{ + /* This will also check the "hidden" flag of the PTE. */ + return kmemcheck_pte_lookup((unsigned long) page_address(p)); +} + +void kmemcheck_hide_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) { + unsigned long address; + pte_t *pte; + unsigned int level; + + address = (unsigned long) page_address(&p[i]); + pte = lookup_address(address, &level); + BUG_ON(!pte); + BUG_ON(level != PG_LEVEL_4K); + + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN)); + __flush_tlb_one(address); + } +} + +/* Access may NOT cross page boundary */ +static void kmemcheck_read_strict(struct pt_regs *regs, + unsigned long addr, unsigned int size) +{ + void *shadow; + enum kmemcheck_shadow status; + + shadow = kmemcheck_shadow_lookup(addr); + if (!shadow) + return; + + kmemcheck_save_addr(addr); + status = kmemcheck_shadow_test(shadow, size); + if (status == KMEMCHECK_SHADOW_INITIALIZED) + return; + + if (kmemcheck_enabled) + kmemcheck_error_save(status, addr, size, regs); + + if (kmemcheck_enabled == 2) + kmemcheck_enabled = 0; + + /* Don't warn about it again. */ + kmemcheck_shadow_set(shadow, size); +} + +/* Access may cross page boundary */ +static void kmemcheck_read(struct pt_regs *regs, + unsigned long addr, unsigned int size) +{ + unsigned long page = addr & PAGE_MASK; + unsigned long next_addr = addr + size - 1; + unsigned long next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + kmemcheck_read_strict(regs, addr, size); + return; + } + + /* + * What we do is basically to split the access across the + * two pages and handle each part separately. Yes, this means + * that we may now see reads that are 3 + 5 bytes, for + * example (and if both are uninitialized, there will be two + * reports), but it makes the code a lot simpler. + */ + kmemcheck_read_strict(regs, addr, next_page - addr); + kmemcheck_read_strict(regs, next_page, next_addr - next_page); +} + +static void kmemcheck_write_strict(struct pt_regs *regs, + unsigned long addr, unsigned int size) +{ + void *shadow; + + shadow = kmemcheck_shadow_lookup(addr); + if (!shadow) + return; + + kmemcheck_save_addr(addr); + kmemcheck_shadow_set(shadow, size); +} + +static void kmemcheck_write(struct pt_regs *regs, + unsigned long addr, unsigned int size) +{ + unsigned long page = addr & PAGE_MASK; + unsigned long next_addr = addr + size - 1; + unsigned long next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + kmemcheck_write_strict(regs, addr, size); + return; + } + + /* See comment in kmemcheck_read(). */ + kmemcheck_write_strict(regs, addr, next_page - addr); + kmemcheck_write_strict(regs, next_page, next_addr - next_page); +} + +/* + * Copying is hard. We have two addresses, each of which may be split across + * a page (and each page will have different shadow addresses). + */ +static void kmemcheck_copy(struct pt_regs *regs, + unsigned long src_addr, unsigned long dst_addr, unsigned int size) +{ + uint8_t shadow[8]; + enum kmemcheck_shadow status; + + unsigned long page; + unsigned long next_addr; + unsigned long next_page; + + uint8_t *x; + unsigned int i; + unsigned int n; + + BUG_ON(size > sizeof(shadow)); + + page = src_addr & PAGE_MASK; + next_addr = src_addr + size - 1; + next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + /* Same page */ + x = kmemcheck_shadow_lookup(src_addr); + if (x) { + kmemcheck_save_addr(src_addr); + for (i = 0; i < size; ++i) + shadow[i] = x[i]; + } else { + for (i = 0; i < size; ++i) + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } else { + n = next_page - src_addr; + BUG_ON(n > sizeof(shadow)); + + /* First page */ + x = kmemcheck_shadow_lookup(src_addr); + if (x) { + kmemcheck_save_addr(src_addr); + for (i = 0; i < n; ++i) + shadow[i] = x[i]; + } else { + /* Not tracked */ + for (i = 0; i < n; ++i) + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + + /* Second page */ + x = kmemcheck_shadow_lookup(next_page); + if (x) { + kmemcheck_save_addr(next_page); + for (i = n; i < size; ++i) + shadow[i] = x[i - n]; + } else { + /* Not tracked */ + for (i = n; i < size; ++i) + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + + page = dst_addr & PAGE_MASK; + next_addr = dst_addr + size - 1; + next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + /* Same page */ + x = kmemcheck_shadow_lookup(dst_addr); + if (x) { + kmemcheck_save_addr(dst_addr); + for (i = 0; i < size; ++i) { + x[i] = shadow[i]; + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + } else { + n = next_page - dst_addr; + BUG_ON(n > sizeof(shadow)); + + /* First page */ + x = kmemcheck_shadow_lookup(dst_addr); + if (x) { + kmemcheck_save_addr(dst_addr); + for (i = 0; i < n; ++i) { + x[i] = shadow[i]; + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + + /* Second page */ + x = kmemcheck_shadow_lookup(next_page); + if (x) { + kmemcheck_save_addr(next_page); + for (i = n; i < size; ++i) { + x[i - n] = shadow[i]; + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + } + + status = kmemcheck_shadow_test(shadow, size); + if (status == KMEMCHECK_SHADOW_INITIALIZED) + return; + + if (kmemcheck_enabled) + kmemcheck_error_save(status, src_addr, size, regs); + + if (kmemcheck_enabled == 2) + kmemcheck_enabled = 0; +} + +enum kmemcheck_method { + KMEMCHECK_READ, + KMEMCHECK_WRITE, +}; + +static void kmemcheck_access(struct pt_regs *regs, + unsigned long fallback_address, enum kmemcheck_method fallback_method) +{ + const uint8_t *insn; + const uint8_t *insn_primary; + unsigned int size; + + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + + /* Recursive fault -- ouch. */ + if (data->busy) { + kmemcheck_show_addr(fallback_address); + kmemcheck_error_save_bug(regs); + return; + } + + data->busy = true; + + insn = (const uint8_t *) regs->ip; + insn_primary = kmemcheck_opcode_get_primary(insn); + + kmemcheck_opcode_decode(insn, &size); + + switch (insn_primary[0]) { +#ifdef CONFIG_KMEMCHECK_BITOPS_OK + /* AND, OR, XOR */ + /* + * Unfortunately, these instructions have to be excluded from + * our regular checking since they access only some (and not + * all) bits. This clears out "bogus" bitfield-access warnings. + */ + case 0x80: + case 0x81: + case 0x82: + case 0x83: + switch ((insn_primary[1] >> 3) & 7) { + /* OR */ + case 1: + /* AND */ + case 4: + /* XOR */ + case 6: + kmemcheck_write(regs, fallback_address, size); + goto out; + + /* ADD */ + case 0: + /* ADC */ + case 2: + /* SBB */ + case 3: + /* SUB */ + case 5: + /* CMP */ + case 7: + break; + } + break; +#endif + + /* MOVS, MOVSB, MOVSW, MOVSD */ + case 0xa4: + case 0xa5: + /* + * These instructions are special because they take two + * addresses, but we only get one page fault. + */ + kmemcheck_copy(regs, regs->si, regs->di, size); + goto out; + + /* CMPS, CMPSB, CMPSW, CMPSD */ + case 0xa6: + case 0xa7: + kmemcheck_read(regs, regs->si, size); + kmemcheck_read(regs, regs->di, size); + goto out; + } + + /* + * If the opcode isn't special in any way, we use the data from the + * page fault handler to determine the address and type of memory + * access. + */ + switch (fallback_method) { + case KMEMCHECK_READ: + kmemcheck_read(regs, fallback_address, size); + goto out; + case KMEMCHECK_WRITE: + kmemcheck_write(regs, fallback_address, size); + goto out; + } + +out: + data->busy = false; +} + +bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, + unsigned long error_code) +{ + pte_t *pte; + unsigned int level; + + /* + * XXX: Is it safe to assume that memory accesses from virtual 86 + * mode or non-kernel code segments will _never_ access kernel + * memory (e.g. tracked pages)? For now, we need this to avoid + * invoking kmemcheck for PnP BIOS calls. + */ + if (regs->flags & X86_VM_MASK) + return false; + if (regs->cs != __KERNEL_CS) + return false; + + pte = lookup_address(address, &level); + if (!pte) + return false; + if (level != PG_LEVEL_4K) + return false; + if (!pte_hidden(*pte)) + return false; + + if (error_code & 2) + kmemcheck_access(regs, address, KMEMCHECK_WRITE); + else + kmemcheck_access(regs, address, KMEMCHECK_READ); + + kmemcheck_show(regs); + return true; +} + +bool kmemcheck_trap(struct pt_regs *regs) +{ + if (!kmemcheck_active(regs)) + return false; + + /* We're done. */ + kmemcheck_hide(regs); + return true; +} diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c new file mode 100644 index 000000000000..a4100b6e783a --- /dev/null +++ b/arch/x86/mm/kmemcheck/opcode.c @@ -0,0 +1,101 @@ +#include + +#include "opcode.h" + +static bool opcode_is_prefix(uint8_t b) +{ + return + /* Group 1 */ + b == 0xf0 || b == 0xf2 || b == 0xf3 + /* Group 2 */ + || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26 + || b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e + /* Group 3 */ + || b == 0x66 + /* Group 4 */ + || b == 0x67; +} + +static bool opcode_is_rex_prefix(uint8_t b) +{ + return (b & 0xf0) == 0x40; +} + +#define REX_W (1 << 3) + +/* + * This is a VERY crude opcode decoder. We only need to find the size of the + * load/store that caused our #PF and this should work for all the opcodes + * that we care about. Moreover, the ones who invented this instruction set + * should be shot. + */ +void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size) +{ + /* Default operand size */ + int operand_size_override = 4; + + /* prefixes */ + for (; opcode_is_prefix(*op); ++op) { + if (*op == 0x66) + operand_size_override = 2; + } + +#ifdef CONFIG_X86_64 + /* REX prefix */ + if (opcode_is_rex_prefix(*op)) { + uint8_t rex = *op; + + ++op; + if (rex & REX_W) { + switch (*op) { + case 0x63: + *size = 4; + return; + case 0x0f: + ++op; + + switch (*op) { + case 0xb6: + case 0xbe: + *size = 1; + return; + case 0xb7: + case 0xbf: + *size = 2; + return; + } + + break; + } + + *size = 8; + return; + } + } +#endif + + /* escape opcode */ + if (*op == 0x0f) { + ++op; + + /* + * This is move with zero-extend and sign-extend, respectively; + * we don't have to think about 0xb6/0xbe, because this is + * already handled in the conditional below. + */ + if (*op == 0xb7 || *op == 0xbf) + operand_size_override = 2; + } + + *size = (*op & 1) ? operand_size_override : 1; +} + +const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op) +{ + /* skip prefixes */ + while (opcode_is_prefix(*op)) + ++op; + if (opcode_is_rex_prefix(*op)) + ++op; + return op; +} diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h new file mode 100644 index 000000000000..6956aad66b5b --- /dev/null +++ b/arch/x86/mm/kmemcheck/opcode.h @@ -0,0 +1,9 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H +#define ARCH__X86__MM__KMEMCHECK__OPCODE_H + +#include + +void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size); +const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op); + +#endif diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c new file mode 100644 index 000000000000..4ead26eeaf96 --- /dev/null +++ b/arch/x86/mm/kmemcheck/pte.c @@ -0,0 +1,22 @@ +#include + +#include + +#include "pte.h" + +pte_t *kmemcheck_pte_lookup(unsigned long address) +{ + pte_t *pte; + unsigned int level; + + pte = lookup_address(address, &level); + if (!pte) + return NULL; + if (level != PG_LEVEL_4K) + return NULL; + if (!pte_hidden(*pte)) + return NULL; + + return pte; +} + diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h new file mode 100644 index 000000000000..9f5966456492 --- /dev/null +++ b/arch/x86/mm/kmemcheck/pte.h @@ -0,0 +1,10 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H +#define ARCH__X86__MM__KMEMCHECK__PTE_H + +#include + +#include + +pte_t *kmemcheck_pte_lookup(unsigned long address); + +#endif diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c new file mode 100644 index 000000000000..5544d3600877 --- /dev/null +++ b/arch/x86/mm/kmemcheck/shadow.c @@ -0,0 +1,153 @@ +#include +#include +#include + +#include +#include + +#include "pte.h" +#include "shadow.h" + +/* + * Return the shadow address for the given address. Returns NULL if the + * address is not tracked. + * + * We need to be extremely careful not to follow any invalid pointers, + * because this function can be called for *any* possible address. + */ +void *kmemcheck_shadow_lookup(unsigned long address) +{ + pte_t *pte; + struct page *page; + + if (!virt_addr_valid(address)) + return NULL; + + pte = kmemcheck_pte_lookup(address); + if (!pte) + return NULL; + + page = virt_to_page(address); + if (!page->shadow) + return NULL; + return page->shadow + (address & (PAGE_SIZE - 1)); +} + +static void mark_shadow(void *address, unsigned int n, + enum kmemcheck_shadow status) +{ + unsigned long addr = (unsigned long) address; + unsigned long last_addr = addr + n - 1; + unsigned long page = addr & PAGE_MASK; + unsigned long last_page = last_addr & PAGE_MASK; + unsigned int first_n; + void *shadow; + + /* If the memory range crosses a page boundary, stop there. */ + if (page == last_page) + first_n = n; + else + first_n = page + PAGE_SIZE - addr; + + shadow = kmemcheck_shadow_lookup(addr); + if (shadow) + memset(shadow, status, first_n); + + addr += first_n; + n -= first_n; + + /* Do full-page memset()s. */ + while (n >= PAGE_SIZE) { + shadow = kmemcheck_shadow_lookup(addr); + if (shadow) + memset(shadow, status, PAGE_SIZE); + + addr += PAGE_SIZE; + n -= PAGE_SIZE; + } + + /* Do the remaining page, if any. */ + if (n > 0) { + shadow = kmemcheck_shadow_lookup(addr); + if (shadow) + memset(shadow, status, n); + } +} + +void kmemcheck_mark_unallocated(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED); +} + +void kmemcheck_mark_uninitialized(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED); +} + +/* + * Fill the shadow memory of the given address such that the memory at that + * address is marked as being initialized. + */ +void kmemcheck_mark_initialized(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED); +} +EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized); + +void kmemcheck_mark_freed(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_FREED); +} + +void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) + kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE); +} + +void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) + kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); +} + +enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) +{ + uint8_t *x; + unsigned int i; + + x = shadow; + +#ifdef CONFIG_KMEMCHECK_PARTIAL_OK + /* + * Make sure _some_ bytes are initialized. Gcc frequently generates + * code to access neighboring bytes. + */ + for (i = 0; i < size; ++i) { + if (x[i] == KMEMCHECK_SHADOW_INITIALIZED) + return x[i]; + } +#else + /* All bytes must be initialized. */ + for (i = 0; i < size; ++i) { + if (x[i] != KMEMCHECK_SHADOW_INITIALIZED) + return x[i]; + } +#endif + + return x[0]; +} + +void kmemcheck_shadow_set(void *shadow, unsigned int size) +{ + uint8_t *x; + unsigned int i; + + x = shadow; + for (i = 0; i < size; ++i) + x[i] = KMEMCHECK_SHADOW_INITIALIZED; +} diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h new file mode 100644 index 000000000000..af46d9ab9d86 --- /dev/null +++ b/arch/x86/mm/kmemcheck/shadow.h @@ -0,0 +1,16 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H +#define ARCH__X86__MM__KMEMCHECK__SHADOW_H + +enum kmemcheck_shadow { + KMEMCHECK_SHADOW_UNALLOCATED, + KMEMCHECK_SHADOW_UNINITIALIZED, + KMEMCHECK_SHADOW_INITIALIZED, + KMEMCHECK_SHADOW_FREED, +}; + +void *kmemcheck_shadow_lookup(unsigned long address); + +enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size); +void kmemcheck_shadow_set(void *shadow, unsigned int size); + +#endif diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h new file mode 100644 index 000000000000..39480c91b2f9 --- /dev/null +++ b/include/linux/kmemcheck.h @@ -0,0 +1,17 @@ +#ifndef LINUX_KMEMCHECK_H +#define LINUX_KMEMCHECK_H + +#include +#include + +#ifdef CONFIG_KMEMCHECK +extern int kmemcheck_enabled; + +int kmemcheck_show_addr(unsigned long address); +int kmemcheck_hide_addr(unsigned long address); +#else +#define kmemcheck_enabled 0 + +#endif /* CONFIG_KMEMCHECK */ + +#endif /* LINUX_KMEMCHECK_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0e80e26ecf21..0042090a4d70 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -98,6 +98,14 @@ struct page { #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS unsigned long debug_flags; /* Use atomic bitops on this */ #endif + +#ifdef CONFIG_KMEMCHECK + /* + * kmemcheck wants to track the status of each byte in a page; this + * is a pointer to such a status block. NULL if not tracked. + */ + void *shadow; +#endif }; /* diff --git a/init/main.c b/init/main.c index 5616661eac01..e3c335e47cd2 100644 --- a/init/main.c +++ b/init/main.c @@ -65,6 +65,7 @@ #include #include #include +#include #include #include diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ce664f98e3fb..9ef80bba3509 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -959,6 +960,17 @@ static struct ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, #endif +#ifdef CONFIG_KMEMCHECK + { + .ctl_name = CTL_UNNUMBERED, + .procname = "kmemcheck", + .data = &kmemcheck_enabled, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif + /* * NOTE: do not add new entries to this table unless you have read * Documentation/sysctl/ctl_unnumbered.txt -- cgit v1.2.3 From 2dff440525f8faba8836e9f05297b76f23b4af30 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 31 May 2008 15:56:17 +0200 Subject: kmemcheck: add mm functions With kmemcheck enabled, the slab allocator needs to do this: 1. Tell kmemcheck to allocate the shadow memory which stores the status of each byte in the allocation proper, e.g. whether it is initialized or uninitialized. 2. Tell kmemcheck which parts of memory that should be marked uninitialized. There are actually a few more states, such as "not yet allocated" and "recently freed". If a slab cache is set up using the SLAB_NOTRACK flag, it will never return memory that can take page faults because of kmemcheck. If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still request memory with the __GFP_NOTRACK flag. This does not prevent the page faults from occuring, however, but marks the object in question as being initialized so that no warnings will ever be produced for this object. In addition to (and in contrast to) __GFP_NOTRACK, the __GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should not be tracked _because_ it would produce a false positive. Their values are identical, but need not be so in the future (for example, we could now enable/disable false positives with a config option). Parts of this patch were contributed by Pekka Enberg but merged for atomicity. Signed-off-by: Vegard Nossum Signed-off-by: Pekka Enberg Signed-off-by: Ingo Molnar [rebased for mainline inclusion] Signed-off-by: Vegard Nossum --- arch/x86/kernel/process.c | 2 +- include/linux/gfp.h | 9 +++- include/linux/kmemcheck.h | 47 +++++++++++++++++++++ include/linux/slab.h | 7 ++++ kernel/fork.c | 14 +++---- mm/Makefile | 1 + mm/kmemcheck.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 174 insertions(+), 9 deletions(-) create mode 100644 mm/kmemcheck.c (limited to 'kernel') diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 3bb2be1649bd..994dd6a4a2a0 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -63,7 +63,7 @@ void arch_task_cache_init(void) task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, __alignof__(union thread_xstate), - SLAB_PANIC, NULL); + SLAB_PANIC | SLAB_NOTRACK, NULL); } /* diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0bbc15f54536..daeaa8fe1bbd 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -51,8 +51,15 @@ struct vm_area_struct; #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ +#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ -#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */ +/* + * This may seem redundant, but it's a way of annotating false positives vs. + * allocations that simply cannot be supported (e.g. page tables). + */ +#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) + +#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* This equals 0, but use constants in case they ever change */ diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 39480c91b2f9..5b65f4ebeadf 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -7,11 +7,58 @@ #ifdef CONFIG_KMEMCHECK extern int kmemcheck_enabled; +/* The slab-related functions. */ +void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, + struct page *page, int order); +void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order); +void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size); +void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); + +void kmemcheck_show_pages(struct page *p, unsigned int n); +void kmemcheck_hide_pages(struct page *p, unsigned int n); + +bool kmemcheck_page_is_tracked(struct page *p); + +void kmemcheck_mark_unallocated(void *address, unsigned int n); +void kmemcheck_mark_uninitialized(void *address, unsigned int n); +void kmemcheck_mark_initialized(void *address, unsigned int n); +void kmemcheck_mark_freed(void *address, unsigned int n); + +void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); +void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); + int kmemcheck_show_addr(unsigned long address); int kmemcheck_hide_addr(unsigned long address); #else #define kmemcheck_enabled 0 +static inline void +kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, + struct page *page, int order) +{ +} + +static inline void +kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) +{ +} + +static inline void +kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size) +{ +} + +static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, + size_t size) +{ +} + +static inline bool kmemcheck_page_is_tracked(struct page *p) +{ + return false; +} #endif /* CONFIG_KMEMCHECK */ #endif /* LINUX_KMEMCHECK_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 48803064cedf..e339fcf17cd3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -64,6 +64,13 @@ #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ +/* Don't track use of uninitialized memory */ +#ifdef CONFIG_KMEMCHECK +# define SLAB_NOTRACK 0x01000000UL +#else +# define SLAB_NOTRACK 0x00000000UL +#endif + /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ diff --git a/kernel/fork.c b/kernel/fork.c index 4430eb1376f2..be022c200da6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -178,7 +178,7 @@ void __init fork_init(unsigned long mempages) /* create a slab on which task_structs can be allocated */ task_struct_cachep = kmem_cache_create("task_struct", sizeof(struct task_struct), - ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); #endif /* do the arch specific task caches init */ @@ -1470,20 +1470,20 @@ void __init proc_caches_init(void) { sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, - sighand_ctor); + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| + SLAB_NOTRACK, sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); mm_cachep = kmem_cache_create("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); mmap_init(); } diff --git a/mm/Makefile b/mm/Makefile index e89acb090b4d..c379ce08354a 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLUB) += slub.o +obj-$(CONFIG_KMEMCHECK) += kmemcheck.o obj-$(CONFIG_FAILSLAB) += failslab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c new file mode 100644 index 000000000000..eaa41b802611 --- /dev/null +++ b/mm/kmemcheck.c @@ -0,0 +1,103 @@ +#include +#include +#include +#include + +void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, + struct page *page, int order) +{ + struct page *shadow; + int pages; + int i; + + pages = 1 << order; + + /* + * With kmemcheck enabled, we need to allocate a memory area for the + * shadow bits as well. + */ + shadow = alloc_pages_node(node, flags, order); + if (!shadow) { + if (printk_ratelimit()) + printk(KERN_ERR "kmemcheck: failed to allocate " + "shadow bitmap\n"); + return; + } + + for(i = 0; i < pages; ++i) + page[i].shadow = page_address(&shadow[i]); + + /* + * Mark it as non-present for the MMU so that our accesses to + * this memory will trigger a page fault and let us analyze + * the memory accesses. + */ + kmemcheck_hide_pages(page, pages); + + /* + * Objects from caches that have a constructor don't get + * cleared when they're allocated, so we need to do it here. + */ + if (s->ctor) + kmemcheck_mark_uninitialized_pages(page, pages); + else + kmemcheck_mark_unallocated_pages(page, pages); +} + +void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) +{ + struct page *shadow; + int pages; + int i; + + pages = 1 << order; + + kmemcheck_show_pages(page, pages); + + shadow = virt_to_page(page[0].shadow); + + for(i = 0; i < pages; ++i) + page[i].shadow = NULL; + + __free_pages(shadow, order); +} + +void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size) +{ + /* + * Has already been memset(), which initializes the shadow for us + * as well. + */ + if (gfpflags & __GFP_ZERO) + return; + + /* No need to initialize the shadow of a non-tracked slab. */ + if (s->flags & SLAB_NOTRACK) + return; + + if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) { + /* + * Allow notracked objects to be allocated from + * tracked caches. Note however that these objects + * will still get page faults on access, they just + * won't ever be flagged as uninitialized. If page + * faults are not acceptable, the slab cache itself + * should be marked NOTRACK. + */ + kmemcheck_mark_initialized(object, size); + } else if (!s->ctor) { + /* + * New objects should be marked uninitialized before + * they're returned to the called. + */ + kmemcheck_mark_uninitialized(object, size); + } +} + +void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) +{ + /* TODO: RCU freeing is unsupported for now; hide false positives. */ + if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) + kmemcheck_mark_freed(object, size); +} -- cgit v1.2.3 From 1744a21d57d9c60136461adb6afa85e51b3e94d9 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 28 Feb 2009 08:29:44 +0100 Subject: trace: annotate bitfields in struct ring_buffer_event This gets rid of a heap of false-positive warnings from the tracer code due to the use of bitfields. [rebased for mainline inclusion] Signed-off-by: Vegard Nossum --- include/linux/ring_buffer.h | 4 ++++ kernel/trace/ring_buffer.c | 3 +++ 2 files changed, 7 insertions(+) (limited to 'kernel') diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 8670f1575fe1..29f8599e6bea 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -1,6 +1,7 @@ #ifndef _LINUX_RING_BUFFER_H #define _LINUX_RING_BUFFER_H +#include #include #include @@ -11,7 +12,10 @@ struct ring_buffer_iter; * Don't refer to this struct directly, use functions below. */ struct ring_buffer_event { + kmemcheck_bitfield_begin(bitfield); u32 type_len:5, time_delta:27; + kmemcheck_bitfield_end(bitfield); + u32 array[]; }; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2e642b2b7253..dc4dc70171ce 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -1270,6 +1271,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, if (tail < BUF_PAGE_SIZE) { /* Mark the rest of the page with padding */ event = __rb_page_index(tail_page, tail); + kmemcheck_annotate_bitfield(event, bitfield); rb_event_set_padding(event); } @@ -1327,6 +1329,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, return NULL; event = __rb_page_index(tail_page, tail); + kmemcheck_annotate_bitfield(event, bitfield); rb_update_event(event, type, length); /* The passed in type is zero for DATA */ -- cgit v1.2.3 From 7a0aeb14e18ad59394bd9bbc6e57fb345819e748 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 16 May 2009 11:28:33 +0200 Subject: signal: fix __send_signal() false positive kmemcheck warning This false positive is due to field padding in struct sigqueue. When this dynamically allocated structure is copied to the stack (in arch- specific delivery code), kmemcheck sees a read from the padding, which is, naturally, uninitialized. Hide the false positive using the __GFP_NOTRACK_FALSE_POSITIVE flag. Also made the rlimit override code a bit clearer by introducing a new variable. Cc: Oleg Nesterov Signed-off-by: Vegard Nossum --- kernel/signal.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index 809a228019ad..d81f4952eebb 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -832,6 +832,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, { struct sigpending *pending; struct sigqueue *q; + int override_rlimit; trace_sched_signal_send(sig, t); @@ -863,9 +864,13 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, make sure at least one signal gets delivered and don't pass on the info struct. */ - q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && - (is_si_special(info) || - info->si_code >= 0))); + if (sig < SIGRTMIN) + override_rlimit = (is_si_special(info) || info->si_code >= 0); + else + override_rlimit = 0; + + q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, + override_rlimit); if (q) { list_add_tail(&q->list, &pending->list); switch ((unsigned long) info) { -- cgit v1.2.3 From 3959214f971417f4162926ac52ad4cd042958caa Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Tue, 24 Mar 2009 15:43:30 +0100 Subject: sched: delayed cleanup of user_struct During bootup performance tracing we see repeated occurrences of /sys/kernel/uid/* events for the same uid, leading to a, in this case, rather pointless userspace processing for the same uid over and over. This is usually caused by tools which change their uid to "nobody", to run without privileges to read data supplied by untrusted users. This change delays the execution of the (already existing) scheduled work, to cleanup the uid after one second, so the allocated and announced uid can possibly be re-used by another process. This is the current behavior, where almost every invocation of a binary, which changes the uid, creates two events: $ read START < /sys/kernel/uevent_seqnum; \ for i in `seq 100`; do su --shell=/bin/true bin; done; \ read END < /sys/kernel/uevent_seqnum; \ echo $(($END - $START)) 178 With the delayed cleanup, we get only two events, and userspace finishes a bit faster too: $ read START < /sys/kernel/uevent_seqnum; \ for i in `seq 100`; do su --shell=/bin/true bin; done; \ read END < /sys/kernel/uevent_seqnum; \ echo $(($END - $START)) 1 Acked-by: Dhaval Giani Signed-off-by: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- include/linux/sched.h | 2 +- kernel/user.c | 67 ++++++++++++++++++++++++++++++--------------------- 2 files changed, 40 insertions(+), 29 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched.h b/include/linux/sched.h index c900aa530070..7531b1c28201 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -674,7 +674,7 @@ struct user_struct { struct task_group *tg; #ifdef CONFIG_SYSFS struct kobject kobj; - struct work_struct work; + struct delayed_work work; #endif #endif diff --git a/kernel/user.c b/kernel/user.c index 850e0ba41c1e..2c000e7132ac 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -75,21 +75,6 @@ static void uid_hash_remove(struct user_struct *up) put_user_ns(up->user_ns); } -static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) -{ - struct user_struct *user; - struct hlist_node *h; - - hlist_for_each_entry(user, h, hashent, uidhash_node) { - if (user->uid == uid) { - atomic_inc(&user->__count); - return user; - } - } - - return NULL; -} - #ifdef CONFIG_USER_SCHED static void sched_destroy_user(struct user_struct *up) @@ -119,6 +104,23 @@ static int sched_create_user(struct user_struct *up) { return 0; } #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) +static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) +{ + struct user_struct *user; + struct hlist_node *h; + + hlist_for_each_entry(user, h, hashent, uidhash_node) { + if (user->uid == uid) { + /* possibly resurrect an "almost deleted" object */ + if (atomic_inc_return(&user->__count) == 1) + cancel_delayed_work(&user->work); + return user; + } + } + + return NULL; +} + static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ static DEFINE_MUTEX(uids_mutex); @@ -283,12 +285,12 @@ int __init uids_sysfs_init(void) return uids_user_create(&root_user); } -/* work function to remove sysfs directory for a user and free up +/* delayed work function to remove sysfs directory for a user and free up * corresponding structures. */ static void cleanup_user_struct(struct work_struct *w) { - struct user_struct *up = container_of(w, struct user_struct, work); + struct user_struct *up = container_of(w, struct user_struct, work.work); unsigned long flags; int remove_user = 0; @@ -297,15 +299,12 @@ static void cleanup_user_struct(struct work_struct *w) */ uids_mutex_lock(); - local_irq_save(flags); - - if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { + spin_lock_irqsave(&uidhash_lock, flags); + if (atomic_read(&up->__count) == 0) { uid_hash_remove(up); remove_user = 1; - spin_unlock_irqrestore(&uidhash_lock, flags); - } else { - local_irq_restore(flags); } + spin_unlock_irqrestore(&uidhash_lock, flags); if (!remove_user) goto done; @@ -331,16 +330,28 @@ done: */ static void free_user(struct user_struct *up, unsigned long flags) { - /* restore back the count */ - atomic_inc(&up->__count); spin_unlock_irqrestore(&uidhash_lock, flags); - - INIT_WORK(&up->work, cleanup_user_struct); - schedule_work(&up->work); + INIT_DELAYED_WORK(&up->work, cleanup_user_struct); + schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); } #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ +static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) +{ + struct user_struct *user; + struct hlist_node *h; + + hlist_for_each_entry(user, h, hashent, uidhash_node) { + if (user->uid == uid) { + atomic_inc(&user->__count); + return user; + } + } + + return NULL; +} + int uids_sysfs_init(void) { return 0; } static inline int uids_user_create(struct user_struct *up) { return 0; } static inline void uids_mutex_lock(void) { } -- cgit v1.2.3 From 156f5a7801195fa2ce44aeeb62d6cf8468f3332a Mon Sep 17 00:00:00 2001 From: GeunSik Lim Date: Tue, 2 Jun 2009 15:01:37 +0900 Subject: debugfs: Fix terminology inconsistency of dir name to mount debugfs filesystem. Many developers use "/debug/" or "/debugfs/" or "/sys/kernel/debug/" directory name to mount debugfs filesystem for ftrace according to ./Documentation/tracers/ftrace.txt file. And, three directory names(ex:/debug/, /debugfs/, /sys/kernel/debug/) is existed in kernel source like ftrace, DRM, Wireless, Documentation, Network[sky2]files to mount debugfs filesystem. debugfs means debug filesystem for debugging easy to use by greg kroah hartman. "/sys/kernel/debug/" name is suitable as directory name of debugfs filesystem. - debugfs related reference: http://lwn.net/Articles/334546/ Fix inconsistency of directory name to mount debugfs filesystem. * From Steven Rostedt - find_debugfs() and tracing_files() in this patch. Signed-off-by: GeunSik Lim Acked-by : Inaky Perez-Gonzalez Reviewed-by : Steven Rostedt Reviewed-by : James Smart CC: Jiri Kosina CC: David Airlie CC: Peter Osterlund CC: Ananth N Mavinakayanahalli CC: Anil S Keshavamurthy CC: Masami Hiramatsu Signed-off-by: Greg Kroah-Hartman --- Documentation/DocBook/debugobjects.tmpl | 2 +- Documentation/cdrom/packet-writing.txt | 2 +- Documentation/fault-injection/fault-injection.txt | 70 +++---- Documentation/kprobes.txt | 6 +- Documentation/trace/ftrace.txt | 233 +++++++++++++--------- Documentation/trace/mmiotrace.txt | 26 +-- drivers/block/pktcdvd.c | 2 +- drivers/gpu/drm/drm_debugfs.c | 12 +- drivers/gpu/drm/drm_drv.c | 2 +- drivers/gpu/drm/drm_stub.c | 2 +- drivers/net/Kconfig | 4 +- drivers/net/wimax/i2400m/i2400m.h | 2 +- drivers/net/wireless/ath/ath5k/Kconfig | 5 +- drivers/net/wireless/libertas/README | 12 +- drivers/scsi/lpfc/lpfc_debugfs.c | 3 +- include/linux/kernel.h | 2 +- include/linux/tracepoint.h | 4 +- kernel/trace/Kconfig | 10 +- kernel/trace/trace.c | 23 +-- scripts/tracing/draw_functrace.py | 7 +- 20 files changed, 238 insertions(+), 191 deletions(-) (limited to 'kernel') diff --git a/Documentation/DocBook/debugobjects.tmpl b/Documentation/DocBook/debugobjects.tmpl index 7f5f218015fe..08ff908aa7a2 100644 --- a/Documentation/DocBook/debugobjects.tmpl +++ b/Documentation/DocBook/debugobjects.tmpl @@ -106,7 +106,7 @@ number of errors are printk'ed including a full stack trace. - The statistics are available via debugfs/debug_objects/stats. + The statistics are available via /sys/kernel/debug/debug_objects/stats. They provide information about the number of warnings and the number of successful fixups along with information about the usage of the internal tracking objects and the state of the diff --git a/Documentation/cdrom/packet-writing.txt b/Documentation/cdrom/packet-writing.txt index cf1f8126991c..1c407778c8b2 100644 --- a/Documentation/cdrom/packet-writing.txt +++ b/Documentation/cdrom/packet-writing.txt @@ -117,7 +117,7 @@ Using the pktcdvd debugfs interface To read pktcdvd device infos in human readable form, do: - # cat /debug/pktcdvd/pktcdvd[0-7]/info + # cat /sys/kernel/debug/pktcdvd/pktcdvd[0-7]/info For a description of the debugfs interface look into the file: diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt index 4bc374a14345..079305640790 100644 --- a/Documentation/fault-injection/fault-injection.txt +++ b/Documentation/fault-injection/fault-injection.txt @@ -29,16 +29,16 @@ o debugfs entries fault-inject-debugfs kernel module provides some debugfs entries for runtime configuration of fault-injection capabilities. -- /debug/fail*/probability: +- /sys/kernel/debug/fail*/probability: likelihood of failure injection, in percent. Format: Note that one-failure-per-hundred is a very high error rate for some testcases. Consider setting probability=100 and configure - /debug/fail*/interval for such testcases. + /sys/kernel/debug/fail*/interval for such testcases. -- /debug/fail*/interval: +- /sys/kernel/debug/fail*/interval: specifies the interval between failures, for calls to should_fail() that pass all the other tests. @@ -46,18 +46,18 @@ configuration of fault-injection capabilities. Note that if you enable this, by setting interval>1, you will probably want to set probability=100. -- /debug/fail*/times: +- /sys/kernel/debug/fail*/times: specifies how many times failures may happen at most. A value of -1 means "no limit". -- /debug/fail*/space: +- /sys/kernel/debug/fail*/space: specifies an initial resource "budget", decremented by "size" on each call to should_fail(,size). Failure injection is suppressed until "space" reaches zero. -- /debug/fail*/verbose +- /sys/kernel/debug/fail*/verbose Format: { 0 | 1 | 2 } specifies the verbosity of the messages when failure is @@ -65,17 +65,17 @@ configuration of fault-injection capabilities. log line per failure; '2' will print a call trace too -- useful to debug the problems revealed by fault injection. -- /debug/fail*/task-filter: +- /sys/kernel/debug/fail*/task-filter: Format: { 'Y' | 'N' } A value of 'N' disables filtering by process (default). Any positive value limits failures to only processes indicated by /proc//make-it-fail==1. -- /debug/fail*/require-start: -- /debug/fail*/require-end: -- /debug/fail*/reject-start: -- /debug/fail*/reject-end: +- /sys/kernel/debug/fail*/require-start: +- /sys/kernel/debug/fail*/require-end: +- /sys/kernel/debug/fail*/reject-start: +- /sys/kernel/debug/fail*/reject-end: specifies the range of virtual addresses tested during stacktrace walking. Failure is injected only if some caller @@ -84,26 +84,26 @@ configuration of fault-injection capabilities. Default required range is [0,ULONG_MAX) (whole of virtual address space). Default rejected range is [0,0). -- /debug/fail*/stacktrace-depth: +- /sys/kernel/debug/fail*/stacktrace-depth: specifies the maximum stacktrace depth walked during search for a caller within [require-start,require-end) OR [reject-start,reject-end). -- /debug/fail_page_alloc/ignore-gfp-highmem: +- /sys/kernel/debug/fail_page_alloc/ignore-gfp-highmem: Format: { 'Y' | 'N' } default is 'N', setting it to 'Y' won't inject failures into highmem/user allocations. -- /debug/failslab/ignore-gfp-wait: -- /debug/fail_page_alloc/ignore-gfp-wait: +- /sys/kernel/debug/failslab/ignore-gfp-wait: +- /sys/kernel/debug/fail_page_alloc/ignore-gfp-wait: Format: { 'Y' | 'N' } default is 'N', setting it to 'Y' will inject failures only into non-sleep allocations (GFP_ATOMIC allocations). -- /debug/fail_page_alloc/min-order: +- /sys/kernel/debug/fail_page_alloc/min-order: specifies the minimum page allocation order to be injected failures. @@ -166,13 +166,13 @@ o Inject slab allocation failures into module init/exit code #!/bin/bash FAILTYPE=failslab -echo Y > /debug/$FAILTYPE/task-filter -echo 10 > /debug/$FAILTYPE/probability -echo 100 > /debug/$FAILTYPE/interval -echo -1 > /debug/$FAILTYPE/times -echo 0 > /debug/$FAILTYPE/space -echo 2 > /debug/$FAILTYPE/verbose -echo 1 > /debug/$FAILTYPE/ignore-gfp-wait +echo Y > /sys/kernel/debug/$FAILTYPE/task-filter +echo 10 > /sys/kernel/debug/$FAILTYPE/probability +echo 100 > /sys/kernel/debug/$FAILTYPE/interval +echo -1 > /sys/kernel/debug/$FAILTYPE/times +echo 0 > /sys/kernel/debug/$FAILTYPE/space +echo 2 > /sys/kernel/debug/$FAILTYPE/verbose +echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait faulty_system() { @@ -217,20 +217,20 @@ then exit 1 fi -cat /sys/module/$module/sections/.text > /debug/$FAILTYPE/require-start -cat /sys/module/$module/sections/.data > /debug/$FAILTYPE/require-end +cat /sys/module/$module/sections/.text > /sys/kernel/debug/$FAILTYPE/require-start +cat /sys/module/$module/sections/.data > /sys/kernel/debug/$FAILTYPE/require-end -echo N > /debug/$FAILTYPE/task-filter -echo 10 > /debug/$FAILTYPE/probability -echo 100 > /debug/$FAILTYPE/interval -echo -1 > /debug/$FAILTYPE/times -echo 0 > /debug/$FAILTYPE/space -echo 2 > /debug/$FAILTYPE/verbose -echo 1 > /debug/$FAILTYPE/ignore-gfp-wait -echo 1 > /debug/$FAILTYPE/ignore-gfp-highmem -echo 10 > /debug/$FAILTYPE/stacktrace-depth +echo N > /sys/kernel/debug/$FAILTYPE/task-filter +echo 10 > /sys/kernel/debug/$FAILTYPE/probability +echo 100 > /sys/kernel/debug/$FAILTYPE/interval +echo -1 > /sys/kernel/debug/$FAILTYPE/times +echo 0 > /sys/kernel/debug/$FAILTYPE/space +echo 2 > /sys/kernel/debug/$FAILTYPE/verbose +echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait +echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-highmem +echo 10 > /sys/kernel/debug/$FAILTYPE/stacktrace-depth -trap "echo 0 > /debug/$FAILTYPE/probability" SIGINT SIGTERM EXIT +trap "echo 0 > /sys/kernel/debug/$FAILTYPE/probability" SIGINT SIGTERM EXIT echo "Injecting errors into the module $module... (interrupt to stop)" sleep 1000000 diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 1e7a769a10f9..053037a1fe6d 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -507,9 +507,9 @@ http://www.linuxsymposium.org/2006/linuxsymposium_procv2.pdf (pages 101-115) Appendix A: The kprobes debugfs interface With recent kernels (> 2.6.20) the list of registered kprobes is visible -under the /debug/kprobes/ directory (assuming debugfs is mounted at /debug). +under the /sys/kernel/debug/kprobes/ directory (assuming debugfs is mounted at //sys/kernel/debug). -/debug/kprobes/list: Lists all registered probes on the system +/sys/kernel/debug/kprobes/list: Lists all registered probes on the system c015d71a k vfs_read+0x0 c011a316 j do_fork+0x0 @@ -525,7 +525,7 @@ virtual addresses that correspond to modules that've been unloaded), such probes are marked with [GONE]. If the probe is temporarily disabled, such probes are marked with [DISABLED]. -/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly. +/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly. Provides a knob to globally and forcibly turn registered kprobes ON or OFF. By default, all kprobes are enabled. By echoing "0" to this file, all diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index 7bd27f0e2880..a39b3c749de5 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt @@ -7,7 +7,6 @@ Copyright 2008 Red Hat Inc. (dual licensed under the GPL v2) Reviewers: Elias Oltmanns, Randy Dunlap, Andrew Morton, John Kacur, and David Teigland. - Written for: 2.6.28-rc2 Introduction @@ -33,13 +32,26 @@ The File System Ftrace uses the debugfs file system to hold the control files as well as the files to display output. -To mount the debugfs system: +When debugfs is configured into the kernel (which selecting any ftrace +option will do) the directory /sys/kernel/debug will be created. To mount +this directory, you can add to your /etc/fstab file: + + debugfs /sys/kernel/debug debugfs defaults 0 0 + +Or you can mount it at run time with: + + mount -t debugfs nodev /sys/kernel/debug - # mkdir /debug - # mount -t debugfs nodev /debug +For quicker access to that directory you may want to make a soft link to +it: -( Note: it is more common to mount at /sys/kernel/debug, but for - simplicity this document will use /debug) + ln -s /sys/kernel/debug /debug + +Any selected ftrace option will also create a directory called tracing +within the debugfs. The rest of the document will assume that you are in +the ftrace directory (cd /sys/kernel/debug/tracing) and will only concentrate +on the files within that directory and not distract from the content with +the extended "/sys/kernel/debug/tracing" path name. That's it! (assuming that you have ftrace configured into your kernel) @@ -389,18 +401,18 @@ trace_options The trace_options file is used to control what gets printed in the trace output. To see what is available, simply cat the file: - cat /debug/tracing/trace_options + cat trace_options print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \ noblock nostacktrace nosched-tree nouserstacktrace nosym-userobj To disable one of the options, echo in the option prepended with "no". - echo noprint-parent > /debug/tracing/trace_options + echo noprint-parent > trace_options To enable an option, leave off the "no". - echo sym-offset > /debug/tracing/trace_options + echo sym-offset > trace_options Here are the available options: @@ -476,11 +488,11 @@ sched_switch This tracer simply records schedule switches. Here is an example of how to use it. - # echo sched_switch > /debug/tracing/current_tracer - # echo 1 > /debug/tracing/tracing_enabled + # echo sched_switch > current_tracer + # echo 1 > tracing_enabled # sleep 1 - # echo 0 > /debug/tracing/tracing_enabled - # cat /debug/tracing/trace + # echo 0 > tracing_enabled + # cat trace # tracer: sched_switch # @@ -583,13 +595,13 @@ new trace is saved. To reset the maximum, echo 0 into tracing_max_latency. Here is an example: - # echo irqsoff > /debug/tracing/current_tracer - # echo 0 > /debug/tracing/tracing_max_latency - # echo 1 > /debug/tracing/tracing_enabled + # echo irqsoff > current_tracer + # echo 0 > tracing_max_latency + # echo 1 > tracing_enabled # ls -ltr [...] - # echo 0 > /debug/tracing/tracing_enabled - # cat /debug/tracing/latency_trace + # echo 0 > tracing_enabled + # cat latency_trace # tracer: irqsoff # irqsoff latency trace v1.1.5 on 2.6.26 @@ -690,13 +702,13 @@ Like the irqsoff tracer, it records the maximum latency for which preemption was disabled. The control of preemptoff tracer is much like the irqsoff tracer. - # echo preemptoff > /debug/tracing/current_tracer - # echo 0 > /debug/tracing/tracing_max_latency - # echo 1 > /debug/tracing/tracing_enabled + # echo preemptoff > current_tracer + # echo 0 > tracing_max_latency + # echo 1 > tracing_enabled # ls -ltr [...] - # echo 0 > /debug/tracing/tracing_enabled - # cat /debug/tracing/latency_trace + # echo 0 > tracing_enabled + # cat latency_trace # tracer: preemptoff # preemptoff latency trace v1.1.5 on 2.6.26-rc8 @@ -837,13 +849,13 @@ tracer. Again, using this trace is much like the irqsoff and preemptoff tracers. - # echo preemptirqsoff > /debug/tracing/current_tracer - # echo 0 > /debug/tracing/tracing_max_latency - # echo 1 > /debug/tracing/tracing_enabled + # echo preemptirqsoff > current_tracer + # echo 0 > tracing_max_latency + # echo 1 > tracing_enabled # ls -ltr [...] - # echo 0 > /debug/tracing/tracing_enabled - # cat /debug/tracing/latency_trace + # echo 0 > tracing_enabled + # cat latency_trace # tracer: preemptirqsoff # preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8 @@ -999,12 +1011,12 @@ slightly differently than we did with the previous tracers. Instead of performing an 'ls', we will run 'sleep 1' under 'chrt' which changes the priority of the task. - # echo wakeup > /debug/tracing/current_tracer - # echo 0 > /debug/tracing/tracing_max_latency - # echo 1 > /debug/tracing/tracing_enabled + # echo wakeup > current_tracer + # echo 0 > tracing_max_latency + # echo 1 > tracing_enabled # chrt -f 5 sleep 1 - # echo 0 > /debug/tracing/tracing_enabled - # cat /debug/tracing/latency_trace + # echo 0 > tracing_enabled + # cat latency_trace # tracer: wakeup # wakeup latency trace v1.1.5 on 2.6.26-rc8 @@ -1114,11 +1126,11 @@ can be done from the debug file system. Make sure the ftrace_enabled is set; otherwise this tracer is a nop. # sysctl kernel.ftrace_enabled=1 - # echo function > /debug/tracing/current_tracer - # echo 1 > /debug/tracing/tracing_enabled + # echo function > current_tracer + # echo 1 > tracing_enabled # usleep 1 - # echo 0 > /debug/tracing/tracing_enabled - # cat /debug/tracing/trace + # echo 0 > tracing_enabled + # cat trace # tracer: function # # TASK-PID CPU# TIMESTAMP FUNCTION @@ -1155,7 +1167,7 @@ int trace_fd; [...] int main(int argc, char *argv[]) { [...] - trace_fd = open("/debug/tracing/tracing_enabled", O_WRONLY); + trace_fd = open(tracing_file("tracing_enabled"), O_WRONLY); [...] if (condition_hit()) { write(trace_fd, "0", 1); @@ -1163,26 +1175,20 @@ int main(int argc, char *argv[]) { [...] } -Note: Here we hard coded the path name. The debugfs mount is not -guaranteed to be at /debug (and is more commonly at -/sys/kernel/debug). For simple one time traces, the above is -sufficent. For anything else, a search through /proc/mounts may -be needed to find where the debugfs file-system is mounted. - Single thread tracing --------------------- -By writing into /debug/tracing/set_ftrace_pid you can trace a +By writing into set_ftrace_pid you can trace a single thread. For example: -# cat /debug/tracing/set_ftrace_pid +# cat set_ftrace_pid no pid -# echo 3111 > /debug/tracing/set_ftrace_pid -# cat /debug/tracing/set_ftrace_pid +# echo 3111 > set_ftrace_pid +# cat set_ftrace_pid 3111 -# echo function > /debug/tracing/current_tracer -# cat /debug/tracing/trace | head +# echo function > current_tracer +# cat trace | head # tracer: function # # TASK-PID CPU# TIMESTAMP FUNCTION @@ -1193,8 +1199,8 @@ no pid yum-updatesd-3111 [003] 1637.254683: lock_hrtimer_base <-hrtimer_try_to_cancel yum-updatesd-3111 [003] 1637.254685: fget_light <-do_sys_poll yum-updatesd-3111 [003] 1637.254686: pipe_poll <-do_sys_poll -# echo -1 > /debug/tracing/set_ftrace_pid -# cat /debug/tracing/trace |head +# echo -1 > set_ftrace_pid +# cat trace |head # tracer: function # # TASK-PID CPU# TIMESTAMP FUNCTION @@ -1216,6 +1222,51 @@ something like this simple program: #include #include +#define _STR(x) #x +#define STR(x) _STR(x) +#define MAX_PATH 256 + +const char *find_debugfs(void) +{ + static char debugfs[MAX_PATH+1]; + static int debugfs_found; + char type[100]; + FILE *fp; + + if (debugfs_found) + return debugfs; + + if ((fp = fopen("/proc/mounts","r")) == NULL) { + perror("/proc/mounts"); + return NULL; + } + + while (fscanf(fp, "%*s %" + STR(MAX_PATH) + "s %99s %*s %*d %*d\n", + debugfs, type) == 2) { + if (strcmp(type, "debugfs") == 0) + break; + } + fclose(fp); + + if (strcmp(type, "debugfs") != 0) { + fprintf(stderr, "debugfs not mounted"); + return NULL; + } + + debugfs_found = 1; + + return debugfs; +} + +const char *tracing_file(const char *file_name) +{ + static char trace_file[MAX_PATH+1]; + snprintf(trace_file, MAX_PATH, "%s/%s", find_debugfs(), file_name); + return trace_file; +} + int main (int argc, char **argv) { if (argc < 1) @@ -1226,12 +1277,12 @@ int main (int argc, char **argv) char line[64]; int s; - ffd = open("/debug/tracing/current_tracer", O_WRONLY); + ffd = open(tracing_file("current_tracer"), O_WRONLY); if (ffd < 0) exit(-1); write(ffd, "nop", 3); - fd = open("/debug/tracing/set_ftrace_pid", O_WRONLY); + fd = open(tracing_file("set_ftrace_pid"), O_WRONLY); s = sprintf(line, "%d\n", getpid()); write(fd, line, s); @@ -1383,22 +1434,22 @@ want, depending on your needs. tracing_cpu_mask file) or you might sometimes see unordered function calls while cpu tracing switch. - hide: echo nofuncgraph-cpu > /debug/tracing/trace_options - show: echo funcgraph-cpu > /debug/tracing/trace_options + hide: echo nofuncgraph-cpu > trace_options + show: echo funcgraph-cpu > trace_options - The duration (function's time of execution) is displayed on the closing bracket line of a function or on the same line than the current function in case of a leaf one. It is default enabled. - hide: echo nofuncgraph-duration > /debug/tracing/trace_options - show: echo funcgraph-duration > /debug/tracing/trace_options + hide: echo nofuncgraph-duration > trace_options + show: echo funcgraph-duration > trace_options - The overhead field precedes the duration field in case of reached duration thresholds. - hide: echo nofuncgraph-overhead > /debug/tracing/trace_options - show: echo funcgraph-overhead > /debug/tracing/trace_options + hide: echo nofuncgraph-overhead > trace_options + show: echo funcgraph-overhead > trace_options depends on: funcgraph-duration ie: @@ -1427,8 +1478,8 @@ want, depending on your needs. - The task/pid field displays the thread cmdline and pid which executed the function. It is default disabled. - hide: echo nofuncgraph-proc > /debug/tracing/trace_options - show: echo funcgraph-proc > /debug/tracing/trace_options + hide: echo nofuncgraph-proc > trace_options + show: echo funcgraph-proc > trace_options ie: @@ -1451,8 +1502,8 @@ want, depending on your needs. system clock since it started. A snapshot of this time is given on each entry/exit of functions - hide: echo nofuncgraph-abstime > /debug/tracing/trace_options - show: echo funcgraph-abstime > /debug/tracing/trace_options + hide: echo nofuncgraph-abstime > trace_options + show: echo funcgraph-abstime > trace_options ie: @@ -1549,7 +1600,7 @@ listed in: available_filter_functions - # cat /debug/tracing/available_filter_functions + # cat available_filter_functions put_prev_task_idle kmem_cache_create pick_next_task_rt @@ -1561,12 +1612,12 @@ mutex_lock If I am only interested in sys_nanosleep and hrtimer_interrupt: # echo sys_nanosleep hrtimer_interrupt \ - > /debug/tracing/set_ftrace_filter - # echo ftrace > /debug/tracing/current_tracer - # echo 1 > /debug/tracing/tracing_enabled + > set_ftrace_filter + # echo ftrace > current_tracer + # echo 1 > tracing_enabled # usleep 1 - # echo 0 > /debug/tracing/tracing_enabled - # cat /debug/tracing/trace + # echo 0 > tracing_enabled + # cat trace # tracer: ftrace # # TASK-PID CPU# TIMESTAMP FUNCTION @@ -1577,7 +1628,7 @@ If I am only interested in sys_nanosleep and hrtimer_interrupt: To see which functions are being traced, you can cat the file: - # cat /debug/tracing/set_ftrace_filter + # cat set_ftrace_filter hrtimer_interrupt sys_nanosleep @@ -1597,7 +1648,7 @@ Note: It is better to use quotes to enclose the wild cards, otherwise the shell may expand the parameters into names of files in the local directory. - # echo 'hrtimer_*' > /debug/tracing/set_ftrace_filter + # echo 'hrtimer_*' > set_ftrace_filter Produces: @@ -1618,7 +1669,7 @@ Produces: Notice that we lost the sys_nanosleep. - # cat /debug/tracing/set_ftrace_filter + # cat set_ftrace_filter hrtimer_run_queues hrtimer_run_pending hrtimer_init @@ -1644,17 +1695,17 @@ To append to the filters, use '>>' To clear out a filter so that all functions will be recorded again: - # echo > /debug/tracing/set_ftrace_filter - # cat /debug/tracing/set_ftrace_filter + # echo > set_ftrace_filter + # cat set_ftrace_filter # Again, now we want to append. - # echo sys_nanosleep > /debug/tracing/set_ftrace_filter - # cat /debug/tracing/set_ftrace_filter + # echo sys_nanosleep > set_ftrace_filter + # cat set_ftrace_filter sys_nanosleep - # echo 'hrtimer_*' >> /debug/tracing/set_ftrace_filter - # cat /debug/tracing/set_ftrace_filter + # echo 'hrtimer_*' >> set_ftrace_filter + # cat set_ftrace_filter hrtimer_run_queues hrtimer_run_pending hrtimer_init @@ -1677,7 +1728,7 @@ hrtimer_init_sleeper The set_ftrace_notrace prevents those functions from being traced. - # echo '*preempt*' '*lock*' > /debug/tracing/set_ftrace_notrace + # echo '*preempt*' '*lock*' > set_ftrace_notrace Produces: @@ -1767,13 +1818,13 @@ the effect on the tracing is different. Every read from trace_pipe is consumed. This means that subsequent reads will be different. The trace is live. - # echo function > /debug/tracing/current_tracer - # cat /debug/tracing/trace_pipe > /tmp/trace.out & + # echo function > current_tracer + # cat trace_pipe > /tmp/trace.out & [1] 4153 - # echo 1 > /debug/tracing/tracing_enabled + # echo 1 > tracing_enabled # usleep 1 - # echo 0 > /debug/tracing/tracing_enabled - # cat /debug/tracing/trace + # echo 0 > tracing_enabled + # cat trace # tracer: function # # TASK-PID CPU# TIMESTAMP FUNCTION @@ -1809,7 +1860,7 @@ number listed is the number of entries that can be recorded per CPU. To know the full size, multiply the number of possible CPUS with the number of entries. - # cat /debug/tracing/buffer_size_kb + # cat buffer_size_kb 1408 (units kilobytes) Note, to modify this, you must have tracing completely disabled. @@ -1817,18 +1868,18 @@ To do that, echo "nop" into the current_tracer. If the current_tracer is not set to "nop", an EINVAL error will be returned. - # echo nop > /debug/tracing/current_tracer - # echo 10000 > /debug/tracing/buffer_size_kb - # cat /debug/tracing/buffer_size_kb + # echo nop > current_tracer + # echo 10000 > buffer_size_kb + # cat buffer_size_kb 10000 (units kilobytes) The number of pages which will be allocated is limited to a percentage of available memory. Allocating too much will produce an error. - # echo 1000000000000 > /debug/tracing/buffer_size_kb + # echo 1000000000000 > buffer_size_kb -bash: echo: write error: Cannot allocate memory - # cat /debug/tracing/buffer_size_kb + # cat buffer_size_kb 85 ----------- diff --git a/Documentation/trace/mmiotrace.txt b/Documentation/trace/mmiotrace.txt index 5731c67abc55..162effbfbdec 100644 --- a/Documentation/trace/mmiotrace.txt +++ b/Documentation/trace/mmiotrace.txt @@ -32,41 +32,41 @@ is no way to automatically detect if you are losing events due to CPUs racing. Usage Quick Reference --------------------- -$ mount -t debugfs debugfs /debug -$ echo mmiotrace > /debug/tracing/current_tracer -$ cat /debug/tracing/trace_pipe > mydump.txt & +$ mount -t debugfs debugfs /sys/kernel/debug +$ echo mmiotrace > /sys/kernel/debug/tracing/current_tracer +$ cat /sys/kernel/debug/tracing/trace_pipe > mydump.txt & Start X or whatever. -$ echo "X is up" > /debug/tracing/trace_marker -$ echo nop > /debug/tracing/current_tracer +$ echo "X is up" > /sys/kernel/debug/tracing/trace_marker +$ echo nop > /sys/kernel/debug/tracing/current_tracer Check for lost events. Usage ----- -Make sure debugfs is mounted to /debug. If not, (requires root privileges) -$ mount -t debugfs debugfs /debug +Make sure debugfs is mounted to /sys/kernel/debug. If not, (requires root privileges) +$ mount -t debugfs debugfs /sys/kernel/debug Check that the driver you are about to trace is not loaded. Activate mmiotrace (requires root privileges): -$ echo mmiotrace > /debug/tracing/current_tracer +$ echo mmiotrace > /sys/kernel/debug/tracing/current_tracer Start storing the trace: -$ cat /debug/tracing/trace_pipe > mydump.txt & +$ cat /sys/kernel/debug/tracing/trace_pipe > mydump.txt & The 'cat' process should stay running (sleeping) in the background. Load the driver you want to trace and use it. Mmiotrace will only catch MMIO accesses to areas that are ioremapped while mmiotrace is active. During tracing you can place comments (markers) into the trace by -$ echo "X is up" > /debug/tracing/trace_marker +$ echo "X is up" > /sys/kernel/debug/tracing/trace_marker This makes it easier to see which part of the (huge) trace corresponds to which action. It is recommended to place descriptive markers about what you do. Shut down mmiotrace (requires root privileges): -$ echo nop > /debug/tracing/current_tracer +$ echo nop > /sys/kernel/debug/tracing/current_tracer The 'cat' process exits. If it does not, kill it by issuing 'fg' command and pressing ctrl+c. @@ -78,10 +78,10 @@ to view your kernel log and look for "mmiotrace has lost events" warning. If events were lost, the trace is incomplete. You should enlarge the buffers and try again. Buffers are enlarged by first seeing how large the current buffers are: -$ cat /debug/tracing/buffer_size_kb +$ cat /sys/kernel/debug/tracing/buffer_size_kb gives you a number. Approximately double this number and write it back, for instance: -$ echo 128000 > /debug/tracing/buffer_size_kb +$ echo 128000 > /sys/kernel/debug/tracing/buffer_size_kb Then start again from the top. If you are doing a trace for a driver project, e.g. Nouveau, you should also diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 37e0f81cada2..83650e00632d 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -430,7 +430,7 @@ static void pkt_sysfs_cleanup(void) /******************************************************************** entries in debugfs - /debugfs/pktcdvd[0-7]/ + /sys/kernel/debug/pktcdvd[0-7]/ info *******************************************************************/ diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index c77c6c6d9d2c..6ce0e2667a85 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -105,7 +105,7 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, root, tmp, &drm_debugfs_fops); if (!ent) { - DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n", + DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", name, files[i].name); drm_free(tmp, sizeof(struct drm_info_node), _DRM_DRIVER); @@ -133,9 +133,9 @@ EXPORT_SYMBOL(drm_debugfs_create_files); * \param minor device minor number * \param root DRI debugfs dir entry. * - * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry - * "/debugfs/dri/%minor%/", and each entry in debugfs_list as - * "/debugfs/dri/%minor%/%name%". + * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry + * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as + * "/sys/kernel/debug/dri/%minor%/%name%". */ int drm_debugfs_init(struct drm_minor *minor, int minor_id, struct dentry *root) @@ -148,7 +148,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, sprintf(name, "%d", minor_id); minor->debugfs_root = debugfs_create_dir(name, root); if (!minor->debugfs_root) { - DRM_ERROR("Cannot create /debugfs/dri/%s\n", name); + DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name); return -1; } @@ -165,7 +165,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, ret = dev->driver->debugfs_init(minor); if (ret) { DRM_ERROR("DRM: Driver failed to initialize " - "/debugfs/dri.\n"); + "/sys/kernel/debug/dri.\n"); return ret; } } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 019b7c578236..1bf7efd8d334 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -339,7 +339,7 @@ static int __init drm_core_init(void) drm_debugfs_root = debugfs_create_dir("dri", NULL); if (!drm_debugfs_root) { - DRM_ERROR("Cannot create /debugfs/dri\n"); + DRM_ERROR("Cannot create /sys/kernel/debug/dri\n"); ret = -1; goto err_p3; } diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 89050684fe0d..387a8de1bc7e 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -343,7 +343,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t #if defined(CONFIG_DEBUG_FS) ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); if (ret) { - DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n"); + DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); goto err_g2; } #endif diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 01f282cd0989..3b6383168c69 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2206,7 +2206,7 @@ config SKGE_DEBUG depends on SKGE && DEBUG_FS help This option adds the ability to dump driver state for debugging. - The file debugfs/skge/ethX displays the state of the internal + The file /sys/kernel/debug/skge/ethX displays the state of the internal transmit and receive rings. If unsure, say N. @@ -2232,7 +2232,7 @@ config SKY2_DEBUG depends on SKY2 && DEBUG_FS help This option adds the ability to dump driver state for debugging. - The file debugfs/sky2/ethX displays the state of the internal + The file /sys/kernel/debug/sky2/ethX displays the state of the internal transmit and receive rings. If unsure, say N. diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h index 1fe5da4cf0a0..60330f313f27 100644 --- a/drivers/net/wimax/i2400m/i2400m.h +++ b/drivers/net/wimax/i2400m/i2400m.h @@ -432,7 +432,7 @@ struct i2400m { unsigned ready:1; /* all probing steps done */ unsigned rx_reorder:1; /* RX reorder is enabled */ u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */ - /* typed u8 so debugfs/u8 can tweak */ + /* typed u8 so /sys/kernel/debug/u8 can tweak */ enum i2400m_system_state state; wait_queue_head_t state_wq; /* Woken up when on state updates */ diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig index 509b6f94f73b..daf0c83527d8 100644 --- a/drivers/net/wireless/ath/ath5k/Kconfig +++ b/drivers/net/wireless/ath/ath5k/Kconfig @@ -28,11 +28,10 @@ config ATH5K_DEBUG Say Y, if and you will get debug options for ath5k. To use this, you need to mount debugfs: - mkdir /debug/ - mount -t debugfs debug /debug/ + mount -t debugfs debug /sys/kernel/debug You will get access to files under: - /debug/ath5k/phy0/ + /sys/kernel/debug/ath5k/phy0/ To enable debug, pass the debug level to the debug module parameter. For example: diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README index d860fc375752..ab6a2d518af0 100644 --- a/drivers/net/wireless/libertas/README +++ b/drivers/net/wireless/libertas/README @@ -72,7 +72,7 @@ rdrf location that is to be read. This parameter must be specified in hexadecimal (its possible to preceed preceding the number with a "0x"). - Path: /debugfs/libertas_wireless/ethX/registers/ + Path: /sys/kernel/debug/libertas_wireless/ethX/registers/ Usage: echo "0xa123" > rdmac ; cat rdmac @@ -95,7 +95,7 @@ wrrf sleepparams This command is used to set the sleepclock configurations - Path: /debugfs/libertas_wireless/ethX/ + Path: /sys/kernel/debug/libertas_wireless/ethX/ Usage: cat sleepparams: reads the current sleepclock configuration @@ -115,7 +115,7 @@ subscribed_events The subscribed_events directory contains the interface for the subscribed events API. - Path: /debugfs/libertas_wireless/ethX/subscribed_events/ + Path: /sys/kernel/debug/libertas_wireless/ethX/subscribed_events/ Each event is represented by a filename. Each filename consists of the following three fields: @@ -165,7 +165,7 @@ subscribed_events extscan This command is used to do a specific scan. - Path: /debugfs/libertas_wireless/ethX/ + Path: /sys/kernel/debug/libertas_wireless/ethX/ Usage: echo "SSID" > extscan @@ -179,7 +179,7 @@ getscantable Display the current contents of the driver scan table (ie. get the scan results). - Path: /debugfs/libertas_wireless/ethX/ + Path: /sys/kernel/debug/libertas_wireless/ethX/ Usage: cat getscantable @@ -188,7 +188,7 @@ setuserscan Initiate a customized scan and retrieve the results - Path: /debugfs/libertas_wireless/ethX/ + Path: /sys/kernel/debug/libertas_wireless/ethX/ Usage: echo "[ARGS]" > setuserscan diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 2b02b1fb39a0..8d0f0de76b63 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -53,8 +53,7 @@ * debugfs interface * * To access this interface the user should: - * # mkdir /debug - * # mount -t debugfs none /debug + * # mount -t debugfs none /sys/kernel/debug * * The lpfc debugfs directory hierarchy is: * lpfc/lpfcX/vportY diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 883cd44ff765..99b7aada28da 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -406,7 +406,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte) * * Use tracing_on/tracing_off when you want to quickly turn on or off * tracing. It simply enables or disables the recording of the trace events. - * This also corresponds to the user space debugfs/tracing/tracing_on + * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on * file, which gives a means for the kernel and userspace to interact. * Place a tracing_off() in the kernel where you want tracing to end. * From user space, examine the trace, and then echo 1 > tracing_on diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 14df7e635d43..b9dc4ca0246f 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -198,7 +198,7 @@ static inline void tracepoint_synchronize_unregister(void) * * This is how the trace record is structured and will * * be saved into the ring buffer. These are the fields * * that will be exposed to user-space in - * * /debug/tracing/events/<*>/format. + * * /sys/kernel/debug/tracing/events/<*>/format. * * * * The declared 'local variable' is called '__entry' * * @@ -258,7 +258,7 @@ static inline void tracepoint_synchronize_unregister(void) * tracepoint callback (this is used by programmatic plugins and * can also by used by generic instrumentation like SystemTap), and * it is also used to expose a structured trace record in - * /debug/tracing/events/. + * /sys/kernel/debug/tracing/events/. */ #define TRACE_EVENT(name, proto, args, struct, assign, print) \ diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 4a13e5a01ce3..61071fecc82e 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -147,7 +147,7 @@ config IRQSOFF_TRACER disabled by default and can be runtime (re-)started via: - echo 0 > /debugfs/tracing/tracing_max_latency + echo 0 > /sys/kernel/debug/tracing/tracing_max_latency (Note that kernel size and overhead increases with this option enabled. This option and the preempt-off timing option can be @@ -168,7 +168,7 @@ config PREEMPT_TRACER disabled by default and can be runtime (re-)started via: - echo 0 > /debugfs/tracing/tracing_max_latency + echo 0 > /sys/kernel/debug/tracing/tracing_max_latency (Note that kernel size and overhead increases with this option enabled. This option and the irqs-off timing option can be @@ -261,7 +261,7 @@ config PROFILE_ANNOTATED_BRANCHES This tracer profiles all the the likely and unlikely macros in the kernel. It will display the results in: - /debugfs/tracing/profile_annotated_branch + /sys/kernel/debug/tracing/profile_annotated_branch Note: this will add a significant overhead, only turn this on if you need to profile the system's use of these macros. @@ -274,7 +274,7 @@ config PROFILE_ALL_BRANCHES taken in the kernel is recorded whether it hit or miss. The results will be displayed in: - /debugfs/tracing/profile_branch + /sys/kernel/debug/tracing/profile_branch This option also enables the likely/unlikely profiler. @@ -323,7 +323,7 @@ config STACK_TRACER select KALLSYMS help This special tracer records the maximum stack footprint of the - kernel and displays it in debugfs/tracing/stack_trace. + kernel and displays it in /sys/kernel/debug/tracing/stack_trace. This tracer works by hooking into every function call that the kernel executes, and keeping a maximum stack depth value and diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8acd9b81a5d7..c1878bfb2e1e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -344,7 +344,7 @@ static raw_spinlock_t ftrace_max_lock = /* * Copy the new maximum trace into the separate maximum-trace * structure. (this way the maximum trace is permanently saved, - * for later retrieval via /debugfs/tracing/latency_trace) + * for later retrieval via /sys/kernel/debug/tracing/latency_trace) */ static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) @@ -2414,21 +2414,20 @@ static const struct file_operations tracing_iter_fops = { static const char readme_msg[] = "tracing mini-HOWTO:\n\n" - "# mkdir /debug\n" - "# mount -t debugfs nodev /debug\n\n" - "# cat /debug/tracing/available_tracers\n" + "# mount -t debugfs nodev /sys/kernel/debug\n\n" + "# cat /sys/kernel/debug/tracing/available_tracers\n" "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" - "# cat /debug/tracing/current_tracer\n" + "# cat /sys/kernel/debug/tracing/current_tracer\n" "nop\n" - "# echo sched_switch > /debug/tracing/current_tracer\n" - "# cat /debug/tracing/current_tracer\n" + "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" + "# cat /sys/kernel/debug/tracing/current_tracer\n" "sched_switch\n" - "# cat /debug/tracing/trace_options\n" + "# cat /sys/kernel/debug/tracing/trace_options\n" "noprint-parent nosym-offset nosym-addr noverbose\n" - "# echo print-parent > /debug/tracing/trace_options\n" - "# echo 1 > /debug/tracing/tracing_enabled\n" - "# cat /debug/tracing/trace > /tmp/trace.txt\n" - "# echo 0 > /debug/tracing/tracing_enabled\n" + "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" + "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n" + "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" + "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n" ; static ssize_t diff --git a/scripts/tracing/draw_functrace.py b/scripts/tracing/draw_functrace.py index 902f9a992620..db40fa04cd51 100644 --- a/scripts/tracing/draw_functrace.py +++ b/scripts/tracing/draw_functrace.py @@ -12,10 +12,9 @@ calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER - # mkdir /debugfs - # mount -t debug debug /debug - # echo function > /debug/tracing/current_tracer - $ cat /debug/tracing/trace_pipe > ~/raw_trace_func + # mount -t debugfs nodev /sys/kernel/debug + # echo function > /sys/kernel/debug/tracing/current_tracer + $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace -- cgit v1.2.3 From 5fd29d6ccbc98884569d6f3105aeca70858b3e0f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 16 Jun 2009 10:57:02 -0700 Subject: printk: clean up handling of log-levels and newlines It used to be that we would only look at the log-level in a printk() after explicit newlines, which can cause annoying problems when the previous printk() did not end with a '\n'. In that case, the log-level marker would be just printed out in the middle of the line, and be seen as just noise rather than change the logging level. This changes things to always look at the log-level in the first bytes of the printout. If a log level marker is found, it is always used as the log-level. Additionally, if no newline existed, one is added (unless the log-level is the explicit KERN_CONT marker, to explicitly show that it's a continuation of a previous line). Acked-by: Arjan van de Ven Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 2 +- kernel/printk.c | 31 ++++++++++++++++++++++--------- 2 files changed, 23 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 883cd44ff765..066bb1eddfea 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -102,7 +102,7 @@ extern const char linux_proc_banner[]; * line that had no enclosing \n). Only to be used by core/arch code * during early bootup (a continued line is not SMP-safe otherwise). */ -#define KERN_CONT "" +#define KERN_CONT "" extern int console_printk[]; diff --git a/kernel/printk.c b/kernel/printk.c index 5052b5497c67..a87770ce73a2 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -687,20 +687,33 @@ asmlinkage int vprintk(const char *fmt, va_list args) sizeof(printk_buf) - printed_len, fmt, args); + p = printk_buf; + + /* Do we have a loglevel in the string? */ + if (p[0] == '<') { + unsigned char c = p[1]; + if (c && p[2] == '>') { + switch (c) { + case '0' ... '7': /* loglevel */ + current_log_level = c - '0'; + if (!new_text_line) { + emit_log_char('\n'); + new_text_line = 1; + } + /* Fallthrough - skip the loglevel */ + case 'c': /* KERN_CONT */ + p += 3; + break; + } + } + } + /* * Copy the output into log_buf. If the caller didn't provide * appropriate log level tags, we insert them here */ - for (p = printk_buf; *p; p++) { + for ( ; *p; p++) { if (new_text_line) { - /* If a token, set current_log_level and skip over */ - if (p[0] == '<' && p[1] >= '0' && p[1] <= '7' && - p[2] == '>') { - current_log_level = p[1] - '0'; - p += 3; - printed_len -= 3; - } - /* Always output the token */ emit_log_char('<'); emit_log_char(current_log_level + '0'); -- cgit v1.2.3 From e28d713704117bca0820c732210df6075b09f13b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 16 Jun 2009 11:02:28 -0700 Subject: printk: Add KERN_DEFAULT printk log-level This adds a KERN_DEFAULT loglevel marker, for when you cannot decide which loglevel you want, and just want to keep an existing printk with the default loglevel. The difference between having KERN_DEFAULT and having no log-level marker at all is two-fold: - having the log-level marker will now force a new-line if the previous printout had not added one (perhaps because it forgot, but perhaps because it expected a continuation) - having a log-level marker is required if you are printing out a message that otherwise itself could perhaps otherwise be mistaken for a log-level. Signed-of-by: Linus Torvalds --- include/linux/kernel.h | 2 ++ kernel/printk.c | 2 ++ 2 files changed, 4 insertions(+) (limited to 'kernel') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 066bb1eddfea..1b2e1747df1a 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -97,6 +97,8 @@ extern const char linux_proc_banner[]; #define KERN_INFO "<6>" /* informational */ #define KERN_DEBUG "<7>" /* debug-level messages */ +/* Use the default kernel loglevel */ +#define KERN_DEFAULT "" /* * Annotation for a "continued" line of log printout (only done after a * line that had no enclosing \n). Only to be used by core/arch code diff --git a/kernel/printk.c b/kernel/printk.c index a87770ce73a2..b4d97b54c1ec 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -696,6 +696,8 @@ asmlinkage int vprintk(const char *fmt, va_list args) switch (c) { case '0' ... '7': /* loglevel */ current_log_level = c - '0'; + /* Fallthrough - make sure we're on a new line */ + case 'd': /* KERN_DEFAULT */ if (!new_text_line) { emit_log_char('\n'); new_text_line = 1; -- cgit v1.2.3 From b231125af7811a2f68c455d3bda95ac170ee4fa6 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 16 Jun 2009 11:07:14 -0700 Subject: printk: add KERN_DEFAULT loglevel to print_modules() Several WARN_ON() messages omit the '\n' at the end of the string, which is a simple (and understandable) error. The next line printed after that warning line is usually the current module list, and that printk does not have a log-level marker - resulting in one long mixed-up line. Adding this loglevel marker will now avoid this unreadable mess. Signed-off-by: Linus Torvalds --- kernel/module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index e4ab36ce7672..215aaab09e91 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2899,7 +2899,7 @@ void print_modules(void) struct module *mod; char buf[8]; - printk("Modules linked in:"); + printk(KERN_DEFAULT "Modules linked in:"); /* Most callers should already have preempt disabled, but make sure */ preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) -- cgit v1.2.3 From f3b39d47ebc51416fc3b690a32dfe030a2035e67 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Tue, 16 Jun 2009 15:31:46 -0700 Subject: cpusets: restructure the function cpuset_update_task_memory_state() The kernel still allocates the page caches on old node after modifying its cpuset's mems when 'memory_spread_page' was set, or it didn't spread the page cache evenly over all the nodes that faulting task is allowed to usr after memory_spread_page was set. it is caused by the old mem_allowed and flags of the task, the current kernel doesn't updates them unless some function invokes cpuset_update_task_memory_state(), it is too late sometimes.We must update the mem_allowed and the flags of the tasks in time. Slab has the same problem. The following patches fix this bug by updating tasks' mem_allowed and spread flag after its cpuset's mems or spread flag is changed. This patch: Extract a function from cpuset_update_task_memory_state(). It will be used later for update tasks' page/slab spread flags after its cpuset's flag is set Signed-off-by: Miao Xie Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Christoph Lameter Cc: Paul Menage Cc: Nick Piggin Cc: Yasunori Goto Cc: Pekka Enberg Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index d5a7e17474ee..66b24d9b6638 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -331,6 +331,24 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY])); } +/* + * update task's spread flag if cpuset's page/slab spread flag is set + * + * Called with callback_mutex/cgroup_mutex held + */ +static void cpuset_update_task_spread_flag(struct cpuset *cs, + struct task_struct *tsk) +{ + if (is_spread_page(cs)) + tsk->flags |= PF_SPREAD_PAGE; + else + tsk->flags &= ~PF_SPREAD_PAGE; + if (is_spread_slab(cs)) + tsk->flags |= PF_SPREAD_SLAB; + else + tsk->flags &= ~PF_SPREAD_SLAB; +} + /** * cpuset_update_task_memory_state - update task memory placement * @@ -388,14 +406,7 @@ void cpuset_update_task_memory_state(void) cs = task_cs(tsk); /* Maybe changed when task not locked */ guarantee_online_mems(cs, &tsk->mems_allowed); tsk->cpuset_mems_generation = cs->mems_generation; - if (is_spread_page(cs)) - tsk->flags |= PF_SPREAD_PAGE; - else - tsk->flags &= ~PF_SPREAD_PAGE; - if (is_spread_slab(cs)) - tsk->flags |= PF_SPREAD_SLAB; - else - tsk->flags &= ~PF_SPREAD_SLAB; + cpuset_update_task_spread_flag(cs, tsk); task_unlock(tsk); mutex_unlock(&callback_mutex); mpol_rebind_task(tsk, &tsk->mems_allowed); -- cgit v1.2.3 From 950592f7b991f267d707d372b90f508bbe72acbc Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Tue, 16 Jun 2009 15:31:47 -0700 Subject: cpusets: update tasks' page/slab spread flags in time Fix the bug that the kernel didn't spread page cache/slab object evenly over all the allowed nodes when spread flags were set by updating tasks' page/slab spread flags in time. Signed-off-by: Miao Xie Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Christoph Lameter Cc: Paul Menage Cc: Nick Piggin Cc: Yasunori Goto Cc: Pekka Enberg Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 66b24d9b6638..af5a83d52187 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -406,7 +406,6 @@ void cpuset_update_task_memory_state(void) cs = task_cs(tsk); /* Maybe changed when task not locked */ guarantee_online_mems(cs, &tsk->mems_allowed); tsk->cpuset_mems_generation = cs->mems_generation; - cpuset_update_task_spread_flag(cs, tsk); task_unlock(tsk); mutex_unlock(&callback_mutex); mpol_rebind_task(tsk, &tsk->mems_allowed); @@ -1203,6 +1202,46 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) return 0; } +/* + * cpuset_change_flag - make a task's spread flags the same as its cpuset's + * @tsk: task to be updated + * @scan: struct cgroup_scanner containing the cgroup of the task + * + * Called by cgroup_scan_tasks() for each task in a cgroup. + * + * We don't need to re-check for the cgroup/cpuset membership, since we're + * holding cgroup_lock() at this point. + */ +static void cpuset_change_flag(struct task_struct *tsk, + struct cgroup_scanner *scan) +{ + cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk); +} + +/* + * update_tasks_flags - update the spread flags of tasks in the cpuset. + * @cs: the cpuset in which each task's spread flags needs to be changed + * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() + * + * Called with cgroup_mutex held + * + * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, + * calling callback functions for each. + * + * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 + * if @heap != NULL. + */ +static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap) +{ + struct cgroup_scanner scan; + + scan.cg = cs->css.cgroup; + scan.test_task = NULL; + scan.process_task = cpuset_change_flag; + scan.heap = heap; + cgroup_scan_tasks(&scan); +} + /* * update_flag - read a 0 or a 1 in a file and update associated flag * bit: the bit to update (see cpuset_flagbits_t) @@ -1216,8 +1255,10 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on) { struct cpuset *trialcs; - int err; int balance_flag_changed; + int spread_flag_changed; + struct ptr_heap heap; + int err; trialcs = alloc_trial_cpuset(cs); if (!trialcs) @@ -1232,9 +1273,16 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, if (err < 0) goto out; + err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); + if (err < 0) + goto out; + balance_flag_changed = (is_sched_load_balance(cs) != is_sched_load_balance(trialcs)); + spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) + || (is_spread_page(cs) != is_spread_page(trialcs))); + mutex_lock(&callback_mutex); cs->flags = trialcs->flags; mutex_unlock(&callback_mutex); @@ -1242,6 +1290,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) async_rebuild_sched_domains(); + if (spread_flag_changed) + update_tasks_flags(cs, &heap); + heap_free(&heap); out: free_trial_cpuset(trialcs); return err; @@ -1392,6 +1443,8 @@ static void cpuset_attach(struct cgroup_subsys *ss, if (err) return; + cpuset_update_task_spread_flag(cs, tsk); + from = oldcs->mems_allowed; to = cs->mems_allowed; mm = get_task_mm(tsk); @@ -1453,11 +1506,9 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) break; case FILE_SPREAD_PAGE: retval = update_flag(CS_SPREAD_PAGE, cs, val); - cs->mems_generation = cpuset_mems_generation++; break; case FILE_SPREAD_SLAB: retval = update_flag(CS_SPREAD_SLAB, cs, val); - cs->mems_generation = cpuset_mems_generation++; break; default: retval = -EINVAL; -- cgit v1.2.3 From 58568d2a8215cb6f55caf2332017d7bdff954e1c Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Tue, 16 Jun 2009 15:31:49 -0700 Subject: cpuset,mm: update tasks' mems_allowed in time Fix allocating page cache/slab object on the unallowed node when memory spread is set by updating tasks' mems_allowed after its cpuset's mems is changed. In order to update tasks' mems_allowed in time, we must modify the code of memory policy. Because the memory policy is applied in the process's context originally. After applying this patch, one task directly manipulates anothers mems_allowed, and we use alloc_lock in the task_struct to protect mems_allowed and memory policy of the task. But in the fast path, we didn't use lock to protect them, because adding a lock may lead to performance regression. But if we don't add a lock,the task might see no nodes when changing cpuset's mems_allowed to some non-overlapping set. In order to avoid it, we set all new allowed nodes, then clear newly disallowed ones. [lee.schermerhorn@hp.com: The rework of mpol_new() to extract the adjusting of the node mask to apply cpuset and mpol flags "context" breaks set_mempolicy() and mbind() with MPOL_PREFERRED and a NULL nodemask--i.e., explicit local allocation. Fix this by adding the check for MPOL_PREFERRED and empty node mask to mpol_new_mpolicy(). Remove the now unneeded 'nodes = NULL' from mpol_new(). Note that mpol_new_mempolicy() is always called with a non-NULL 'nodes' parameter now that it has been removed from mpol_new(). Therefore, we don't need to test nodes for NULL before testing it for 'empty'. However, just to be extra paranoid, add a VM_BUG_ON() to verify this assumption.] [lee.schermerhorn@hp.com: I don't think the function name 'mpol_new_mempolicy' is descriptive enough to differentiate it from mpol_new(). This function applies cpuset set context, usually constraining nodes to those allowed by the cpuset. However, when the 'RELATIVE_NODES flag is set, it also translates the nodes. So I settled on 'mpol_set_nodemask()', because the comment block for mpol_new() mentions that we need to call this function to "set nodes". Some additional minor line length, whitespace and typo cleanup.] Signed-off-by: Miao Xie Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Christoph Lameter Cc: Paul Menage Cc: Nick Piggin Cc: Yasunori Goto Cc: Pekka Enberg Cc: David Rientjes Signed-off-by: Lee Schermerhorn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cpuset.h | 13 ++-- include/linux/sched.h | 8 +-- init/main.c | 6 +- kernel/cpuset.c | 184 +++++++++++++------------------------------------ kernel/kthread.c | 2 + mm/mempolicy.c | 143 +++++++++++++++++++++++++++----------- mm/page_alloc.c | 5 +- 7 files changed, 170 insertions(+), 191 deletions(-) (limited to 'kernel') diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 05ea1dd7d681..a5740fc4d04b 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -18,7 +18,6 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ -extern int cpuset_init_early(void); extern int cpuset_init(void); extern void cpuset_init_smp(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); @@ -27,7 +26,6 @@ extern void cpuset_cpus_allowed_locked(struct task_struct *p, extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); -void cpuset_update_task_memory_state(void); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); @@ -92,9 +90,13 @@ extern void rebuild_sched_domains(void); extern void cpuset_print_task_mems_allowed(struct task_struct *p); +static inline void set_mems_allowed(nodemask_t nodemask) +{ + current->mems_allowed = nodemask; +} + #else /* !CONFIG_CPUSETS */ -static inline int cpuset_init_early(void) { return 0; } static inline int cpuset_init(void) { return 0; } static inline void cpuset_init_smp(void) {} @@ -116,7 +118,6 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) static inline void cpuset_init_current_mems_allowed(void) {} -static inline void cpuset_update_task_memory_state(void) {} static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) { @@ -188,6 +189,10 @@ static inline void cpuset_print_task_mems_allowed(struct task_struct *p) { } +static inline void set_mems_allowed(nodemask_t nodemask) +{ +} + #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index c900aa530070..1048bf50540a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1318,7 +1318,8 @@ struct task_struct { /* Thread group tracking */ u32 parent_exec_id; u32 self_exec_id; -/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ +/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, + * mempolicy */ spinlock_t alloc_lock; #ifdef CONFIG_GENERIC_HARDIRQS @@ -1386,8 +1387,7 @@ struct task_struct { cputime_t acct_timexpd; /* stime + utime since last update */ #endif #ifdef CONFIG_CPUSETS - nodemask_t mems_allowed; - int cpuset_mems_generation; + nodemask_t mems_allowed; /* Protected by alloc_lock */ int cpuset_mem_spread_rotor; #endif #ifdef CONFIG_CGROUPS @@ -1410,7 +1410,7 @@ struct task_struct { struct list_head perf_counter_list; #endif #ifdef CONFIG_NUMA - struct mempolicy *mempolicy; + struct mempolicy *mempolicy; /* Protected by alloc_lock */ short il_next; #endif atomic_t fs_excl; /* holding fs exclusive resources */ diff --git a/init/main.c b/init/main.c index f6204f712e7c..5e0d3f047eaf 100644 --- a/init/main.c +++ b/init/main.c @@ -670,7 +670,6 @@ asmlinkage void __init start_kernel(void) initrd_start = 0; } #endif - cpuset_init_early(); page_cgroup_init(); enable_debug_pagealloc(); cpu_hotplug_init(); @@ -867,6 +866,11 @@ static noinline int init_post(void) static int __init kernel_init(void * unused) { lock_kernel(); + + /* + * init can allocate pages on any node + */ + set_mems_allowed(node_possible_map); /* * init can run on any cpu. */ diff --git a/kernel/cpuset.c b/kernel/cpuset.c index af5a83d52187..7e75a41bd508 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -97,12 +97,6 @@ struct cpuset { struct cpuset *parent; /* my parent */ - /* - * Copy of global cpuset_mems_generation as of the most - * recent time this cpuset changed its mems_allowed. - */ - int mems_generation; - struct fmeter fmeter; /* memory_pressure filter */ /* partition number for rebuild_sched_domains() */ @@ -176,27 +170,6 @@ static inline int is_spread_slab(const struct cpuset *cs) return test_bit(CS_SPREAD_SLAB, &cs->flags); } -/* - * Increment this integer everytime any cpuset changes its - * mems_allowed value. Users of cpusets can track this generation - * number, and avoid having to lock and reload mems_allowed unless - * the cpuset they're using changes generation. - * - * A single, global generation is needed because cpuset_attach_task() could - * reattach a task to a different cpuset, which must not have its - * generation numbers aliased with those of that tasks previous cpuset. - * - * Generations are needed for mems_allowed because one task cannot - * modify another's memory placement. So we must enable every task, - * on every visit to __alloc_pages(), to efficiently check whether - * its current->cpuset->mems_allowed has changed, requiring an update - * of its current->mems_allowed. - * - * Since writes to cpuset_mems_generation are guarded by the cgroup lock - * there is no need to mark it atomic. - */ -static int cpuset_mems_generation; - static struct cpuset top_cpuset = { .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), }; @@ -228,8 +201,9 @@ static struct cpuset top_cpuset = { * If a task is only holding callback_mutex, then it has read-only * access to cpusets. * - * The task_struct fields mems_allowed and mems_generation may only - * be accessed in the context of that task, so require no locks. + * Now, the task_struct fields mems_allowed and mempolicy may be changed + * by other task, we use alloc_lock in the task_struct fields to protect + * them. * * The cpuset_common_file_read() handlers only hold callback_mutex across * small pieces of code, such as when reading out possibly multi-word @@ -349,69 +323,6 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs, tsk->flags &= ~PF_SPREAD_SLAB; } -/** - * cpuset_update_task_memory_state - update task memory placement - * - * If the current tasks cpusets mems_allowed changed behind our - * backs, update current->mems_allowed, mems_generation and task NUMA - * mempolicy to the new value. - * - * Task mempolicy is updated by rebinding it relative to the - * current->cpuset if a task has its memory placement changed. - * Do not call this routine if in_interrupt(). - * - * Call without callback_mutex or task_lock() held. May be - * called with or without cgroup_mutex held. Thanks in part to - * 'the_top_cpuset_hack', the task's cpuset pointer will never - * be NULL. This routine also might acquire callback_mutex during - * call. - * - * Reading current->cpuset->mems_generation doesn't need task_lock - * to guard the current->cpuset derefence, because it is guarded - * from concurrent freeing of current->cpuset using RCU. - * - * The rcu_dereference() is technically probably not needed, - * as I don't actually mind if I see a new cpuset pointer but - * an old value of mems_generation. However this really only - * matters on alpha systems using cpusets heavily. If I dropped - * that rcu_dereference(), it would save them a memory barrier. - * For all other arch's, rcu_dereference is a no-op anyway, and for - * alpha systems not using cpusets, another planned optimization, - * avoiding the rcu critical section for tasks in the root cpuset - * which is statically allocated, so can't vanish, will make this - * irrelevant. Better to use RCU as intended, than to engage in - * some cute trick to save a memory barrier that is impossible to - * test, for alpha systems using cpusets heavily, which might not - * even exist. - * - * This routine is needed to update the per-task mems_allowed data, - * within the tasks context, when it is trying to allocate memory - * (in various mm/mempolicy.c routines) and notices that some other - * task has been modifying its cpuset. - */ - -void cpuset_update_task_memory_state(void) -{ - int my_cpusets_mem_gen; - struct task_struct *tsk = current; - struct cpuset *cs; - - rcu_read_lock(); - my_cpusets_mem_gen = task_cs(tsk)->mems_generation; - rcu_read_unlock(); - - if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { - mutex_lock(&callback_mutex); - task_lock(tsk); - cs = task_cs(tsk); /* Maybe changed when task not locked */ - guarantee_online_mems(cs, &tsk->mems_allowed); - tsk->cpuset_mems_generation = cs->mems_generation; - task_unlock(tsk); - mutex_unlock(&callback_mutex); - mpol_rebind_task(tsk, &tsk->mems_allowed); - } -} - /* * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? * @@ -1017,14 +928,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, * other task, the task_struct mems_allowed that we are hacking * is for our current task, which must allocate new pages for that * migrating memory region. - * - * We call cpuset_update_task_memory_state() before hacking - * our tasks mems_allowed, so that we are assured of being in - * sync with our tasks cpuset, and in particular, callbacks to - * cpuset_update_task_memory_state() from nested page allocations - * won't see any mismatch of our cpuset and task mems_generation - * values, so won't overwrite our hacked tasks mems_allowed - * nodemask. */ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, @@ -1032,22 +935,37 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, { struct task_struct *tsk = current; - cpuset_update_task_memory_state(); - - mutex_lock(&callback_mutex); tsk->mems_allowed = *to; - mutex_unlock(&callback_mutex); do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); - mutex_lock(&callback_mutex); guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed); - mutex_unlock(&callback_mutex); } /* - * Rebind task's vmas to cpuset's new mems_allowed, and migrate pages to new - * nodes if memory_migrate flag is set. Called with cgroup_mutex held. + * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy + * @tsk: the task to change + * @newmems: new nodes that the task will be set + * + * In order to avoid seeing no nodes if the old and new nodes are disjoint, + * we structure updates as setting all new allowed nodes, then clearing newly + * disallowed ones. + * + * Called with task's alloc_lock held + */ +static void cpuset_change_task_nodemask(struct task_struct *tsk, + nodemask_t *newmems) +{ + nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); + mpol_rebind_task(tsk, &tsk->mems_allowed); + mpol_rebind_task(tsk, newmems); + tsk->mems_allowed = *newmems; +} + +/* + * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy + * of it to cpuset's new mems_allowed, and migrate pages to new nodes if + * memory_migrate flag is set. Called with cgroup_mutex held. */ static void cpuset_change_nodemask(struct task_struct *p, struct cgroup_scanner *scan) @@ -1056,12 +974,19 @@ static void cpuset_change_nodemask(struct task_struct *p, struct cpuset *cs; int migrate; const nodemask_t *oldmem = scan->data; + nodemask_t newmems; + + cs = cgroup_cs(scan->cg); + guarantee_online_mems(cs, &newmems); + + task_lock(p); + cpuset_change_task_nodemask(p, &newmems); + task_unlock(p); mm = get_task_mm(p); if (!mm) return; - cs = cgroup_cs(scan->cg); migrate = is_memory_migrate(cs); mpol_rebind_mm(mm, &cs->mems_allowed); @@ -1114,10 +1039,10 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem, /* * Handle user request to change the 'mems' memory placement * of a cpuset. Needs to validate the request, update the - * cpusets mems_allowed and mems_generation, and for each - * task in the cpuset, rebind any vma mempolicies and if - * the cpuset is marked 'memory_migrate', migrate the tasks - * pages to the new memory. + * cpusets mems_allowed, and for each task in the cpuset, + * update mems_allowed and rebind task's mempolicy and any vma + * mempolicies and if the cpuset is marked 'memory_migrate', + * migrate the tasks pages to the new memory. * * Call with cgroup_mutex held. May take callback_mutex during call. * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, @@ -1170,7 +1095,6 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, mutex_lock(&callback_mutex); cs->mems_allowed = trialcs->mems_allowed; - cs->mems_generation = cpuset_mems_generation++; mutex_unlock(&callback_mutex); update_tasks_nodemask(cs, &oldmem, &heap); @@ -1434,15 +1358,18 @@ static void cpuset_attach(struct cgroup_subsys *ss, if (cs == &top_cpuset) { cpumask_copy(cpus_attach, cpu_possible_mask); + to = node_possible_map; } else { - mutex_lock(&callback_mutex); guarantee_online_cpus(cs, cpus_attach); - mutex_unlock(&callback_mutex); + guarantee_online_mems(cs, &to); } err = set_cpus_allowed_ptr(tsk, cpus_attach); if (err) return; + task_lock(tsk); + cpuset_change_task_nodemask(tsk, &to); + task_unlock(tsk); cpuset_update_task_spread_flag(cs, tsk); from = oldcs->mems_allowed; @@ -1848,8 +1775,6 @@ static struct cgroup_subsys_state *cpuset_create( struct cpuset *parent; if (!cont->parent) { - /* This is early initialization for the top cgroup */ - top_cpuset.mems_generation = cpuset_mems_generation++; return &top_cpuset.css; } parent = cgroup_cs(cont->parent); @@ -1861,7 +1786,6 @@ static struct cgroup_subsys_state *cpuset_create( return ERR_PTR(-ENOMEM); } - cpuset_update_task_memory_state(); cs->flags = 0; if (is_spread_page(parent)) set_bit(CS_SPREAD_PAGE, &cs->flags); @@ -1870,7 +1794,6 @@ static struct cgroup_subsys_state *cpuset_create( set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); cpumask_clear(cs->cpus_allowed); nodes_clear(cs->mems_allowed); - cs->mems_generation = cpuset_mems_generation++; fmeter_init(&cs->fmeter); cs->relax_domain_level = -1; @@ -1889,8 +1812,6 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { struct cpuset *cs = cgroup_cs(cont); - cpuset_update_task_memory_state(); - if (is_sched_load_balance(cs)) update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); @@ -1911,21 +1832,6 @@ struct cgroup_subsys cpuset_subsys = { .early_init = 1, }; -/* - * cpuset_init_early - just enough so that the calls to - * cpuset_update_task_memory_state() in early init code - * are harmless. - */ - -int __init cpuset_init_early(void) -{ - alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_NOWAIT); - - top_cpuset.mems_generation = cpuset_mems_generation++; - return 0; -} - - /** * cpuset_init - initialize cpusets at system boot * @@ -1936,11 +1842,13 @@ int __init cpuset_init(void) { int err = 0; + if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)) + BUG(); + cpumask_setall(top_cpuset.cpus_allowed); nodes_setall(top_cpuset.mems_allowed); fmeter_init(&top_cpuset.fmeter); - top_cpuset.mems_generation = cpuset_mems_generation++; set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); top_cpuset.relax_domain_level = -1; diff --git a/kernel/kthread.c b/kernel/kthread.c index 41c88fe40500..7fa441333529 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -236,6 +237,7 @@ int kthreadd(void *unused) ignore_signals(tsk); set_user_nice(tsk, KTHREAD_NICE_LEVEL); set_cpus_allowed_ptr(tsk, cpu_all_mask); + set_mems_allowed(node_possible_map); current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3eb4a6fdc043..46bdf9ddf2ba 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -182,13 +182,54 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) return 0; } -/* Create a new policy */ +/* + * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if + * any, for the new policy. mpol_new() has already validated the nodes + * parameter with respect to the policy mode and flags. But, we need to + * handle an empty nodemask with MPOL_PREFERRED here. + * + * Must be called holding task's alloc_lock to protect task's mems_allowed + * and mempolicy. May also be called holding the mmap_semaphore for write. + */ +static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes) +{ + nodemask_t cpuset_context_nmask; + int ret; + + /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ + if (pol == NULL) + return 0; + + VM_BUG_ON(!nodes); + if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) + nodes = NULL; /* explicit local allocation */ + else { + if (pol->flags & MPOL_F_RELATIVE_NODES) + mpol_relative_nodemask(&cpuset_context_nmask, nodes, + &cpuset_current_mems_allowed); + else + nodes_and(cpuset_context_nmask, *nodes, + cpuset_current_mems_allowed); + if (mpol_store_user_nodemask(pol)) + pol->w.user_nodemask = *nodes; + else + pol->w.cpuset_mems_allowed = + cpuset_current_mems_allowed; + } + + ret = mpol_ops[pol->mode].create(pol, + nodes ? &cpuset_context_nmask : NULL); + return ret; +} + +/* + * This function just creates a new policy, does some check and simple + * initialization. You must invoke mpol_set_nodemask() to set nodes. + */ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *policy; - nodemask_t cpuset_context_nmask; - int ret; pr_debug("setting mode %d flags %d nodes[0] %lx\n", mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); @@ -210,7 +251,6 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, if (((flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES))) return ERR_PTR(-EINVAL); - nodes = NULL; /* flag local alloc */ } } else if (nodes_empty(*nodes)) return ERR_PTR(-EINVAL); @@ -221,30 +261,6 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, policy->mode = mode; policy->flags = flags; - if (nodes) { - /* - * cpuset related setup doesn't apply to local allocation - */ - cpuset_update_task_memory_state(); - if (flags & MPOL_F_RELATIVE_NODES) - mpol_relative_nodemask(&cpuset_context_nmask, nodes, - &cpuset_current_mems_allowed); - else - nodes_and(cpuset_context_nmask, *nodes, - cpuset_current_mems_allowed); - if (mpol_store_user_nodemask(policy)) - policy->w.user_nodemask = *nodes; - else - policy->w.cpuset_mems_allowed = - cpuset_mems_allowed(current); - } - - ret = mpol_ops[mode].create(policy, - nodes ? &cpuset_context_nmask : NULL); - if (ret < 0) { - kmem_cache_free(policy_cache, policy); - return ERR_PTR(ret); - } return policy; } @@ -324,6 +340,8 @@ static void mpol_rebind_policy(struct mempolicy *pol, /* * Wrapper for mpol_rebind_policy() that just requires task * pointer, and updates task mempolicy. + * + * Called with task's alloc_lock held. */ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) @@ -600,8 +618,9 @@ static void mpol_set_task_struct_flag(void) static long do_set_mempolicy(unsigned short mode, unsigned short flags, nodemask_t *nodes) { - struct mempolicy *new; + struct mempolicy *new, *old; struct mm_struct *mm = current->mm; + int ret; new = mpol_new(mode, flags, nodes); if (IS_ERR(new)) @@ -615,20 +634,33 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, */ if (mm) down_write(&mm->mmap_sem); - mpol_put(current->mempolicy); + task_lock(current); + ret = mpol_set_nodemask(new, nodes); + if (ret) { + task_unlock(current); + if (mm) + up_write(&mm->mmap_sem); + mpol_put(new); + return ret; + } + old = current->mempolicy; current->mempolicy = new; mpol_set_task_struct_flag(); if (new && new->mode == MPOL_INTERLEAVE && nodes_weight(new->v.nodes)) current->il_next = first_node(new->v.nodes); + task_unlock(current); if (mm) up_write(&mm->mmap_sem); + mpol_put(old); return 0; } /* * Return nodemask for policy for get_mempolicy() query + * + * Called with task's alloc_lock held */ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) { @@ -674,7 +706,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, struct vm_area_struct *vma = NULL; struct mempolicy *pol = current->mempolicy; - cpuset_update_task_memory_state(); if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) return -EINVAL; @@ -683,7 +714,9 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) return -EINVAL; *policy = 0; /* just so it's initialized */ + task_lock(current); *nmask = cpuset_current_mems_allowed; + task_unlock(current); return 0; } @@ -738,8 +771,11 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, } err = 0; - if (nmask) + if (nmask) { + task_lock(current); get_policy_nodemask(pol, nmask); + task_unlock(current); + } out: mpol_cond_put(pol); @@ -979,6 +1015,14 @@ static long do_mbind(unsigned long start, unsigned long len, return err; } down_write(&mm->mmap_sem); + task_lock(current); + err = mpol_set_nodemask(new, nmask); + task_unlock(current); + if (err) { + up_write(&mm->mmap_sem); + mpol_put(new); + return err; + } vma = check_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); @@ -1545,8 +1589,6 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) struct mempolicy *pol = get_vma_policy(current, vma, addr); struct zonelist *zl; - cpuset_update_task_memory_state(); - if (unlikely(pol->mode == MPOL_INTERLEAVE)) { unsigned nid; @@ -1593,8 +1635,6 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) { struct mempolicy *pol = current->mempolicy; - if ((gfp & __GFP_WAIT) && !in_interrupt()) - cpuset_update_task_memory_state(); if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) pol = &default_policy; @@ -1854,6 +1894,8 @@ restart: */ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { + int ret; + sp->root = RB_ROOT; /* empty tree == default mempolicy */ spin_lock_init(&sp->lock); @@ -1863,9 +1905,19 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) /* contextualize the tmpfs mount point mempolicy */ new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); - mpol_put(mpol); /* drop our ref on sb mpol */ - if (IS_ERR(new)) + if (IS_ERR(new)) { + mpol_put(mpol); /* drop our ref on sb mpol */ return; /* no valid nodemask intersection */ + } + + task_lock(current); + ret = mpol_set_nodemask(new, &mpol->w.user_nodemask); + task_unlock(current); + mpol_put(mpol); /* drop our ref on sb mpol */ + if (ret) { + mpol_put(new); + return; + } /* Create pseudo-vma that contains just the policy */ memset(&pvma, 0, sizeof(struct vm_area_struct)); @@ -2086,8 +2138,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) new = mpol_new(mode, mode_flags, &nodes); if (IS_ERR(new)) err = 1; - else if (no_context) - new->w.user_nodemask = nodes; /* save for contextualization */ + else { + int ret; + + task_lock(current); + ret = mpol_set_nodemask(new, &nodes); + task_unlock(current); + if (ret) + err = 1; + else if (no_context) { + /* save for contextualization */ + new->w.user_nodemask = nodes; + } + } out: /* Restore string for error message */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 17d5f539a9aa..7cc3179e3591 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1569,10 +1569,7 @@ nofail_alloc: /* We now go into synchronous reclaim */ cpuset_memory_pressure_bump(); - /* - * The task's cpuset might have expanded its set of allowable nodes - */ - cpuset_update_task_memory_state(); + p->flags |= PF_MEMALLOC; lockdep_set_current_reclaim_state(gfp_mask); -- cgit v1.2.3 From 6484eb3e2a81807722c5f28efef94d8338b7b996 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 16 Jun 2009 15:31:54 -0700 Subject: page allocator: do not check NUMA node ID when the caller knows the node is valid Callers of alloc_pages_node() can optionally specify -1 as a node to mean "allocate from the current node". However, a number of the callers in fast paths know for a fact their node is valid. To avoid a comparison and branch, this patch adds alloc_pages_exact_node() that only checks the nid with VM_BUG_ON(). Callers that know their node is valid are then converted. Signed-off-by: Mel Gorman Reviewed-by: Christoph Lameter Reviewed-by: KOSAKI Motohiro Reviewed-by: Pekka Enberg Acked-by: Paul Mundt [for the SLOB NUMA bits] Cc: Peter Zijlstra Cc: Nick Piggin Cc: Dave Hansen Cc: Lee Schermerhorn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/hp/common/sba_iommu.c | 2 +- arch/ia64/kernel/mca.c | 3 +-- arch/ia64/kernel/uncached.c | 3 ++- arch/ia64/sn/pci/pci_dma.c | 3 ++- arch/powerpc/platforms/cell/ras.c | 4 ++-- arch/x86/kvm/vmx.c | 2 +- drivers/misc/sgi-gru/grufile.c | 2 +- drivers/misc/sgi-xp/xpc_uv.c | 2 +- include/linux/gfp.h | 9 +++++++++ include/linux/mm.h | 1 - kernel/profile.c | 8 ++++---- mm/filemap.c | 2 +- mm/hugetlb.c | 4 ++-- mm/mempolicy.c | 2 +- mm/migrate.c | 2 +- mm/slab.c | 4 ++-- mm/slob.c | 4 ++-- 17 files changed, 33 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 56ceb68eb99d..fe63b2dc9d07 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1131,7 +1131,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp #ifdef CONFIG_NUMA { struct page *page; - page = alloc_pages_node(ioc->node == MAX_NUMNODES ? + page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ? numa_node_id() : ioc->node, flags, get_order(size)); diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 8f33a8840422..5b17bd402275 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1829,8 +1829,7 @@ ia64_mca_cpu_init(void *cpu_data) data = mca_bootmem(); first_time = 0; } else - data = page_address(alloc_pages_node(numa_node_id(), - GFP_KERNEL, get_order(sz))); + data = __get_free_pages(GFP_KERNEL, get_order(sz)); if (!data) panic("Could not allocate MCA memory for cpu %d\n", cpu); diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 8eff8c1d40a6..6ba72ab42fcc 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -98,7 +98,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) /* attempt to allocate a granule's worth of cached memory pages */ - page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + page = alloc_pages_exact_node(nid, + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index d876423e4e75..98b684928e12 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -90,7 +90,8 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, */ node = pcibus_to_node(pdev->bus); if (likely(node >=0)) { - struct page *p = alloc_pages_node(node, flags, get_order(size)); + struct page *p = alloc_pages_exact_node(node, + flags, get_order(size)); if (likely(p)) cpuaddr = page_address(p); diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 296b5268754e..5e0a191764fc 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -122,8 +122,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) area->nid = nid; area->order = order; - area->pages = alloc_pages_node(area->nid, GFP_KERNEL | GFP_THISNODE, - area->order); + area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, + area->order); if (!area->pages) { printk(KERN_WARNING "%s: no page on node %d\n", diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 32d6ae8fb60e..e770bf349ec4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1277,7 +1277,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu) struct page *pages; struct vmcs *vmcs; - pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); + pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order); if (!pages) return NULL; vmcs = page_address(pages); diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index bbefe77c67a9..3ce2920e2bf3 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -302,7 +302,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) pnode = uv_node_to_pnode(nid); if (bid < 0 || gru_base[bid]) continue; - page = alloc_pages_node(nid, GFP_KERNEL, order); + page = alloc_pages_exact_node(nid, GFP_KERNEL, order); if (!page) goto fail; gru_base[bid] = page_address(page); diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 9172fcdee4e2..c76677afda1b 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -232,7 +232,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); - page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c2d3fe03b5d2..4efa33088a82 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -5,6 +5,7 @@ #include #include #include +#include struct vm_area_struct; @@ -192,6 +193,14 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); } +static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, + unsigned int order) +{ + VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); + + return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); +} + #ifdef CONFIG_NUMA extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); diff --git a/include/linux/mm.h b/include/linux/mm.h index a880161a3854..7b548e7cfbd9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -7,7 +7,6 @@ #include #include -#include #include #include #include diff --git a/kernel/profile.c b/kernel/profile.c index 28cf26ad2d24..69911b5745eb 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -365,7 +365,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info, node = cpu_to_node(cpu); per_cpu(cpu_profile_flip, cpu) = 0; if (!per_cpu(cpu_profile_hits, cpu)[1]) { - page = alloc_pages_node(node, + page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) @@ -373,7 +373,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info, per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); } if (!per_cpu(cpu_profile_hits, cpu)[0]) { - page = alloc_pages_node(node, + page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) @@ -564,14 +564,14 @@ static int create_hash_tables(void) int node = cpu_to_node(cpu); struct page *page; - page = alloc_pages_node(node, + page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 0); if (!page) goto out_cleanup; per_cpu(cpu_profile_hits, cpu)[1] = (struct profile_hit *)page_address(page); - page = alloc_pages_node(node, + page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 0); if (!page) diff --git a/mm/filemap.c b/mm/filemap.c index 6846a902f5cf..22396713feb9 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -521,7 +521,7 @@ struct page *__page_cache_alloc(gfp_t gfp) { if (cpuset_do_page_mem_spread()) { int n = cpuset_mem_spread_node(); - return alloc_pages_node(n, gfp, 0); + return alloc_pages_exact_node(n, gfp, 0); } return alloc_pages(gfp, 0); } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e83ad2c9228c..2f8241f300f5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -630,7 +630,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) if (h->order >= MAX_ORDER) return NULL; - page = alloc_pages_node(nid, + page = alloc_pages_exact_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); @@ -649,7 +649,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) * Use a helper variable to find the next node and then * copy it back to hugetlb_next_nid afterwards: * otherwise there's a window in which a racer might - * pass invalid nid MAX_NUMNODES to alloc_pages_node. + * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node. * But we don't need to use a spin_lock here: it really * doesn't matter if occasionally a racer chooses the * same nid as we do. Move nid forward in the mask even diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 46bdf9ddf2ba..e08e2c4da63a 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -803,7 +803,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist, static struct page *new_node_page(struct page *page, unsigned long node, int **x) { - return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0); + return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); } /* diff --git a/mm/migrate.c b/mm/migrate.c index 068655d8f883..5a24923e7fd7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -802,7 +802,7 @@ static struct page *new_page_node(struct page *p, unsigned long private, *result = &pm->status; - return alloc_pages_node(pm->node, + return alloc_pages_exact_node(pm->node, GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); } diff --git a/mm/slab.c b/mm/slab.c index 18e3164de09a..bb3254c95cd2 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1707,7 +1707,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) if (cachep->flags & SLAB_RECLAIM_ACCOUNT) flags |= __GFP_RECLAIMABLE; - page = alloc_pages_node(nodeid, flags, cachep->gfporder); + page = alloc_pages_exact_node(nodeid, flags, cachep->gfporder); if (!page) return NULL; @@ -3261,7 +3261,7 @@ retry: if (local_flags & __GFP_WAIT) local_irq_enable(); kmem_flagcheck(cache, flags); - obj = kmem_getpages(cache, local_flags, -1); + obj = kmem_getpages(cache, local_flags, numa_node_id()); if (local_flags & __GFP_WAIT) local_irq_disable(); if (obj) { diff --git a/mm/slob.c b/mm/slob.c index 12f261499925..64f6db1943bf 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -46,7 +46,7 @@ * NUMA support in SLOB is fairly simplistic, pushing most of the real * logic down to the page allocator, and simply doing the node accounting * on the upper levels. In the event that a node id is explicitly - * provided, alloc_pages_node() with the specified node id is used + * provided, alloc_pages_exact_node() with the specified node id is used * instead. The common case (or when the node id isn't explicitly provided) * will default to the current node, as per numa_node_id(). * @@ -244,7 +244,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) #ifdef CONFIG_NUMA if (node != -1) - page = alloc_pages_node(node, gfp, order); + page = alloc_pages_exact_node(node, gfp, order); else #endif page = alloc_pages(gfp, order); -- cgit v1.2.3 From 7f33d49a2ed546e01f7b1d0607661810f2421859 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 16 Jun 2009 15:32:41 -0700 Subject: mm, PM/Freezer: Disable OOM killer when tasks are frozen Currently, the following scenario appears to be possible in theory: * Tasks are frozen for hibernation or suspend. * Free pages are almost exhausted. * Certain piece of code in the suspend code path attempts to allocate some memory using GFP_KERNEL and allocation order less than or equal to PAGE_ALLOC_COSTLY_ORDER. * __alloc_pages_internal() cannot find a free page so it invokes the OOM killer. * The OOM killer attempts to kill a task, but the task is frozen, so it doesn't die immediately. * __alloc_pages_internal() jumps to 'restart', unsuccessfully tries to find a free page and invokes the OOM killer. * No progress can be made. Although it is now hard to trigger during hibernation due to the memory shrinking carried out by the hibernation code, it is theoretically possible to trigger during suspend after the memory shrinking has been removed from that code path. Moreover, since memory allocations are going to be used for the hibernation memory shrinking, it will be even more likely to happen during hibernation. To prevent it from happening, introduce the oom_killer_disabled switch that will cause __alloc_pages_internal() to fail in the situations in which the OOM killer would have been called and make the freezer set this switch after tasks have been successfully frozen. [akpm@linux-foundation.org: be nicer to the namespace] Signed-off-by: Rafael J. Wysocki Cc: Fengguang Wu Cc: David Rientjes Acked-by: Pavel Machek Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 12 ++++++++++++ kernel/power/process.c | 5 +++++ mm/page_alloc.c | 4 ++++ 3 files changed, 21 insertions(+) (limited to 'kernel') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 4efa33088a82..06b7e8cc80ac 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -243,4 +243,16 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(void); void drain_local_pages(void *dummy); +extern bool oom_killer_disabled; + +static inline void oom_killer_disable(void) +{ + oom_killer_disabled = true; +} + +static inline void oom_killer_enable(void) +{ + oom_killer_disabled = false; +} + #endif /* __LINUX_GFP_H */ diff --git a/kernel/power/process.c b/kernel/power/process.c index ca634019497a..da2072d73811 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -117,9 +117,12 @@ int freeze_processes(void) if (error) goto Exit; printk("done."); + + oom_killer_disable(); Exit: BUG_ON(in_atomic()); printk("\n"); + return error; } @@ -145,6 +148,8 @@ static void thaw_tasks(bool nosig_only) void thaw_processes(void) { + oom_killer_enable(); + printk("Restarting tasks ... "); thaw_tasks(true); thaw_tasks(false); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 61290ea721c8..5b09488d0f55 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -178,6 +178,8 @@ static void set_pageblock_migratetype(struct page *page, int migratetype) PB_migrate, PB_migrate_end); } +bool oom_killer_disabled __read_mostly; + #ifdef CONFIG_DEBUG_VM static int page_outside_zone_boundaries(struct zone *zone, struct page *page) { @@ -1769,6 +1771,8 @@ rebalance: */ if (!did_some_progress) { if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { + if (oom_killer_disabled) + goto nopage; page = __alloc_pages_may_oom(gfp_mask, order, zonelist, high_zoneidx, nodemask, preferred_zone, -- cgit v1.2.3 From 6837765963f1723e80ca97b1fae660f3a60d77df Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 16 Jun 2009 15:32:51 -0700 Subject: mm: remove CONFIG_UNEVICTABLE_LRU config option Currently, nobody wants to turn UNEVICTABLE_LRU off. Thus this configurability is unnecessary. Signed-off-by: KOSAKI Motohiro Cc: Johannes Weiner Cc: Andi Kleen Acked-by: Minchan Kim Cc: David Woodhouse Cc: Matt Mackall Cc: Rik van Riel Cc: Lee Schermerhorn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/node.c | 4 ---- fs/proc/meminfo.c | 4 ---- fs/proc/page.c | 2 -- include/linux/mmzone.h | 13 ------------- include/linux/page-flags.h | 16 +--------------- include/linux/pagemap.h | 12 ------------ include/linux/rmap.h | 7 ------- include/linux/swap.h | 19 ------------------- include/linux/vmstat.h | 2 -- kernel/sysctl.c | 2 -- mm/Kconfig | 14 +------------- mm/internal.h | 6 ------ mm/mlock.c | 22 ---------------------- mm/page_alloc.c | 9 --------- mm/rmap.c | 3 +-- mm/vmscan.c | 17 ----------------- mm/vmstat.c | 4 ---- 17 files changed, 3 insertions(+), 153 deletions(-) (limited to 'kernel') diff --git a/drivers/base/node.c b/drivers/base/node.c index 40b809742a1c..91d4087b4039 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -72,10 +72,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev, "Node %d Inactive(anon): %8lu kB\n" "Node %d Active(file): %8lu kB\n" "Node %d Inactive(file): %8lu kB\n" -#ifdef CONFIG_UNEVICTABLE_LRU "Node %d Unevictable: %8lu kB\n" "Node %d Mlocked: %8lu kB\n" -#endif #ifdef CONFIG_HIGHMEM "Node %d HighTotal: %8lu kB\n" "Node %d HighFree: %8lu kB\n" @@ -105,10 +103,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev, nid, K(node_page_state(nid, NR_INACTIVE_ANON)), nid, K(node_page_state(nid, NR_ACTIVE_FILE)), nid, K(node_page_state(nid, NR_INACTIVE_FILE)), -#ifdef CONFIG_UNEVICTABLE_LRU nid, K(node_page_state(nid, NR_UNEVICTABLE)), nid, K(node_page_state(nid, NR_MLOCK)), -#endif #ifdef CONFIG_HIGHMEM nid, K(i.totalhigh), nid, K(i.freehigh), diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index c6b0302af4c4..d5c410d47fae 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -64,10 +64,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) "Inactive(anon): %8lu kB\n" "Active(file): %8lu kB\n" "Inactive(file): %8lu kB\n" -#ifdef CONFIG_UNEVICTABLE_LRU "Unevictable: %8lu kB\n" "Mlocked: %8lu kB\n" -#endif #ifdef CONFIG_HIGHMEM "HighTotal: %8lu kB\n" "HighFree: %8lu kB\n" @@ -109,10 +107,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) K(pages[LRU_INACTIVE_ANON]), K(pages[LRU_ACTIVE_FILE]), K(pages[LRU_INACTIVE_FILE]), -#ifdef CONFIG_UNEVICTABLE_LRU K(pages[LRU_UNEVICTABLE]), K(global_page_state(NR_MLOCK)), -#endif #ifdef CONFIG_HIGHMEM K(i.totalhigh), K(i.freehigh), diff --git a/fs/proc/page.c b/fs/proc/page.c index 9d926bd279a4..2707c6c7a20f 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -172,10 +172,8 @@ static u64 get_uflags(struct page *page) u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); -#ifdef CONFIG_UNEVICTABLE_LRU u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); -#endif #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index db976b9f8791..889598537370 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -83,13 +83,8 @@ enum zone_stat_item { NR_ACTIVE_ANON, /* " " " " " */ NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ -#ifdef CONFIG_UNEVICTABLE_LRU NR_UNEVICTABLE, /* " " " " " */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ -#else - NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */ - NR_MLOCK = NR_ACTIVE_FILE, -#endif NR_ANON_PAGES, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. only modified from process context */ @@ -132,11 +127,7 @@ enum lru_list { LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, -#ifdef CONFIG_UNEVICTABLE_LRU LRU_UNEVICTABLE, -#else - LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */ -#endif NR_LRU_LISTS }; @@ -156,11 +147,7 @@ static inline int is_active_lru(enum lru_list l) static inline int is_unevictable_lru(enum lru_list l) { -#ifdef CONFIG_UNEVICTABLE_LRU return (l == LRU_UNEVICTABLE); -#else - return 0; -#endif } enum zone_watermarks { diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 62214c7d2d93..d6792f88a176 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -95,9 +95,7 @@ enum pageflags { PG_reclaim, /* To be reclaimed asap */ PG_buddy, /* Page is free, on buddy lists */ PG_swapbacked, /* Page is backed by RAM/swap */ -#ifdef CONFIG_UNEVICTABLE_LRU PG_unevictable, /* Page is "unevictable" */ -#endif #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT PG_mlocked, /* Page is vma mlocked */ #endif @@ -248,14 +246,8 @@ PAGEFLAG_FALSE(SwapCache) SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) #endif -#ifdef CONFIG_UNEVICTABLE_LRU PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) TESTCLEARFLAG(Unevictable, unevictable) -#else -PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable) - SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable) - __CLEARPAGEFLAG_NOOP(Unevictable) -#endif #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #define MLOCK_PAGES 1 @@ -382,12 +374,6 @@ static inline void __ClearPageTail(struct page *page) #endif /* !PAGEFLAGS_EXTENDED */ -#ifdef CONFIG_UNEVICTABLE_LRU -#define __PG_UNEVICTABLE (1 << PG_unevictable) -#else -#define __PG_UNEVICTABLE 0 -#endif - #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #define __PG_MLOCKED (1 << PG_mlocked) #else @@ -403,7 +389,7 @@ static inline void __ClearPageTail(struct page *page) 1 << PG_private | 1 << PG_private_2 | \ 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - __PG_UNEVICTABLE | __PG_MLOCKED) + 1 << PG_unevictable | __PG_MLOCKED) /* * Flags checked when a page is prepped for return by the page allocator. diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 34da5230faab..aec3252afcf5 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -22,9 +22,7 @@ enum mapping_flags { AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ -#ifdef CONFIG_UNEVICTABLE_LRU AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ -#endif }; static inline void mapping_set_error(struct address_space *mapping, int error) @@ -37,8 +35,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error) } } -#ifdef CONFIG_UNEVICTABLE_LRU - static inline void mapping_set_unevictable(struct address_space *mapping) { set_bit(AS_UNEVICTABLE, &mapping->flags); @@ -55,14 +51,6 @@ static inline int mapping_unevictable(struct address_space *mapping) return test_bit(AS_UNEVICTABLE, &mapping->flags); return !!mapping; } -#else -static inline void mapping_set_unevictable(struct address_space *mapping) { } -static inline void mapping_clear_unevictable(struct address_space *mapping) { } -static inline int mapping_unevictable(struct address_space *mapping) -{ - return 0; -} -#endif static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b35bc0e19cd9..619379a1dd98 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -105,18 +105,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); */ int page_mkclean(struct page *); -#ifdef CONFIG_UNEVICTABLE_LRU /* * called in munlock()/munmap() path to check for other vmas holding * the page mlocked. */ int try_to_munlock(struct page *); -#else -static inline int try_to_munlock(struct page *page) -{ - return 0; /* a.k.a. SWAP_SUCCESS */ -} -#endif #else /* !CONFIG_MMU */ diff --git a/include/linux/swap.h b/include/linux/swap.h index d476aad3ff57..f30c06908f09 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -235,7 +235,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) } #endif -#ifdef CONFIG_UNEVICTABLE_LRU extern int page_evictable(struct page *page, struct vm_area_struct *vma); extern void scan_mapping_unevictable_pages(struct address_space *); @@ -244,24 +243,6 @@ extern int scan_unevictable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); extern int scan_unevictable_register_node(struct node *node); extern void scan_unevictable_unregister_node(struct node *node); -#else -static inline int page_evictable(struct page *page, - struct vm_area_struct *vma) -{ - return 1; -} - -static inline void scan_mapping_unevictable_pages(struct address_space *mapping) -{ -} - -static inline int scan_unevictable_register_node(struct node *node) -{ - return 0; -} - -static inline void scan_unevictable_unregister_node(struct node *node) { } -#endif extern int kswapd_run(int nid); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 524cd1b28ecb..ff4696c6dce3 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -41,7 +41,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, #endif -#ifdef CONFIG_UNEVICTABLE_LRU UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ @@ -50,7 +49,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ UNEVICTABLE_MLOCKFREED, -#endif NR_VM_EVENT_ITEMS }; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0e51a35a4486..2ccee08f92f1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1325,7 +1325,6 @@ static struct ctl_table vm_table[] = { .extra2 = &one, }, #endif -#ifdef CONFIG_UNEVICTABLE_LRU { .ctl_name = CTL_UNNUMBERED, .procname = "scan_unevictable_pages", @@ -1334,7 +1333,6 @@ static struct ctl_table vm_table[] = { .mode = 0644, .proc_handler = &scan_unevictable_handler, }, -#endif /* * NOTE: do not add new entries to this table unless you have read * Documentation/sysctl/ctl_unnumbered.txt diff --git a/mm/Kconfig b/mm/Kconfig index 71830ba7b986..97d2c88b745e 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -203,25 +203,13 @@ config VIRT_TO_BUS def_bool y depends on !ARCH_NO_VIRT_TO_BUS -config UNEVICTABLE_LRU - bool "Add LRU list to track non-evictable pages" - default y - help - Keeps unevictable pages off of the active and inactive pageout - lists, so kswapd will not waste CPU time or have its balancing - algorithms thrown off by scanning these pages. Selecting this - will use one page flag and increase the code size a little, - say Y unless you know what you are doing. - - See Documentation/vm/unevictable-lru.txt for more information. - config HAVE_MLOCK bool default y if MMU=y config HAVE_MLOCKED_PAGE_BIT bool - default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y + default y if HAVE_MLOCK=y config MMU_NOTIFIER bool diff --git a/mm/internal.h b/mm/internal.h index b4ac332e8072..f02c7508068d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -73,7 +73,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) } #endif -#ifdef CONFIG_UNEVICTABLE_LRU /* * unevictable_migrate_page() called only from migrate_page_copy() to * migrate unevictable flag to new page. @@ -85,11 +84,6 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) if (TestClearPageUnevictable(old)) SetPageUnevictable(new); } -#else -static inline void unevictable_migrate_page(struct page *new, struct page *old) -{ -} -#endif #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT /* diff --git a/mm/mlock.c b/mm/mlock.c index ac130433c7d3..45eb650b9654 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -31,7 +31,6 @@ int can_do_mlock(void) } EXPORT_SYMBOL(can_do_mlock); -#ifdef CONFIG_UNEVICTABLE_LRU /* * Mlocked pages are marked with PageMlocked() flag for efficient testing * in vmscan and, possibly, the fault path; and to support semi-accurate @@ -261,27 +260,6 @@ static int __mlock_posix_error_return(long retval) return retval; } -#else /* CONFIG_UNEVICTABLE_LRU */ - -/* - * Just make pages present if VM_LOCKED. No-op if unlocking. - */ -static long __mlock_vma_pages_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end, - int mlock) -{ - if (mlock && (vma->vm_flags & VM_LOCKED)) - return make_pages_present(start, end); - return 0; -} - -static inline int __mlock_posix_error_return(long retval) -{ - return 0; -} - -#endif /* CONFIG_UNEVICTABLE_LRU */ - /** * mlock_vma_pages_range() - mlock pages in specified vma range. * @vma - the vma containing the specfied address range diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 00e293734fc9..c95a77cd581b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2077,19 +2077,14 @@ void show_free_areas(void) printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" " inactive_file:%lu" -//TODO: check/adjust line lengths -#ifdef CONFIG_UNEVICTABLE_LRU " unevictable:%lu" -#endif " dirty:%lu writeback:%lu unstable:%lu\n" " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", global_page_state(NR_ACTIVE_ANON), global_page_state(NR_ACTIVE_FILE), global_page_state(NR_INACTIVE_ANON), global_page_state(NR_INACTIVE_FILE), -#ifdef CONFIG_UNEVICTABLE_LRU global_page_state(NR_UNEVICTABLE), -#endif global_page_state(NR_FILE_DIRTY), global_page_state(NR_WRITEBACK), global_page_state(NR_UNSTABLE_NFS), @@ -2113,9 +2108,7 @@ void show_free_areas(void) " inactive_anon:%lukB" " active_file:%lukB" " inactive_file:%lukB" -#ifdef CONFIG_UNEVICTABLE_LRU " unevictable:%lukB" -#endif " present:%lukB" " pages_scanned:%lu" " all_unreclaimable? %s" @@ -2129,9 +2122,7 @@ void show_free_areas(void) K(zone_page_state(zone, NR_INACTIVE_ANON)), K(zone_page_state(zone, NR_ACTIVE_FILE)), K(zone_page_state(zone, NR_INACTIVE_FILE)), -#ifdef CONFIG_UNEVICTABLE_LRU K(zone_page_state(zone, NR_UNEVICTABLE)), -#endif K(zone->present_pages), zone->pages_scanned, (zone_is_all_unreclaimable(zone) ? "yes" : "no") diff --git a/mm/rmap.c b/mm/rmap.c index 23122af32611..316c9d6930ad 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1202,7 +1202,6 @@ int try_to_unmap(struct page *page, int migration) return ret; } -#ifdef CONFIG_UNEVICTABLE_LRU /** * try_to_munlock - try to munlock a page * @page: the page to be munlocked @@ -1226,4 +1225,4 @@ int try_to_munlock(struct page *page) else return try_to_unmap_file(page, 1, 0); } -#endif + diff --git a/mm/vmscan.c b/mm/vmscan.c index 879d034930c4..2c4b945b011f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -514,7 +514,6 @@ int remove_mapping(struct address_space *mapping, struct page *page) * * lru_lock must not be held, interrupts must be enabled. */ -#ifdef CONFIG_UNEVICTABLE_LRU void putback_lru_page(struct page *page) { int lru; @@ -568,20 +567,6 @@ redo: put_page(page); /* drop ref from isolate */ } -#else /* CONFIG_UNEVICTABLE_LRU */ - -void putback_lru_page(struct page *page) -{ - int lru; - VM_BUG_ON(PageLRU(page)); - - lru = !!TestClearPageActive(page) + page_is_file_cache(page); - lru_cache_add_lru(page, lru); - put_page(page); -} -#endif /* CONFIG_UNEVICTABLE_LRU */ - - /* * shrink_page_list() returns the number of reclaimed pages */ @@ -2470,7 +2455,6 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) } #endif -#ifdef CONFIG_UNEVICTABLE_LRU /* * page_evictable - test whether a page is evictable * @page: the page to test @@ -2717,4 +2701,3 @@ void scan_unevictable_unregister_node(struct node *node) sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); } -#endif diff --git a/mm/vmstat.c b/mm/vmstat.c index 1e151cf6bf86..1e3aa8139f22 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -629,10 +629,8 @@ static const char * const vmstat_text[] = { "nr_active_anon", "nr_inactive_file", "nr_active_file", -#ifdef CONFIG_UNEVICTABLE_LRU "nr_unevictable", "nr_mlock", -#endif "nr_anon_pages", "nr_mapped", "nr_file_pages", @@ -687,7 +685,6 @@ static const char * const vmstat_text[] = { "htlb_buddy_alloc_success", "htlb_buddy_alloc_fail", #endif -#ifdef CONFIG_UNEVICTABLE_LRU "unevictable_pgs_culled", "unevictable_pgs_scanned", "unevictable_pgs_rescued", @@ -697,7 +694,6 @@ static const char * const vmstat_text[] = { "unevictable_pgs_stranded", "unevictable_pgs_mlockfreed", #endif -#endif }; static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, -- cgit v1.2.3 From b33112d1cc25e658c334125d127a6ae15d5a0ad6 Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Tue, 16 Jun 2009 15:33:34 -0700 Subject: kernel/kfifo.c: replace conditional test with is_power_of_2() Signed-off-by: Robert P. J. Day Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kfifo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/kfifo.c b/kernel/kfifo.c index bc41ad0f24f8..26539e3228e5 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c @@ -72,9 +72,9 @@ struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) /* * round up to the next power of 2, since our 'let the indices - * wrap' tachnique works only in this case. + * wrap' technique works only in this case. */ - if (size & (size - 1)) { + if (!is_power_of_2(size)) { BUG_ON(size > 0x80000000); size = roundup_pow_of_two(size); } -- cgit v1.2.3 From 30639b6af85a92491b22dd14c17b14ca11da60e6 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 16 Jun 2009 15:33:40 -0700 Subject: groups: move code to kernel/groups.c Move supplementary groups implementation to kernel/groups.c . kernel/sys.c already accumulated quite a few random stuff. Do strictly copy/paste + add required headers to compile. Compile-tested on many configs and archs. Signed-off-by: Alexey Dobriyan Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/Makefile | 1 + kernel/groups.c | 288 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/sys.c | 283 ------------------------------------------------------- 3 files changed, 289 insertions(+), 283 deletions(-) create mode 100644 kernel/groups.c (limited to 'kernel') diff --git a/kernel/Makefile b/kernel/Makefile index 90b53f6dc226..9df4501cb921 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -11,6 +11,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ async.o +obj-y += groups.o ifdef CONFIG_FUNCTION_TRACER # Do not trace debug files and internal ftrace files diff --git a/kernel/groups.c b/kernel/groups.c new file mode 100644 index 000000000000..2b45b2ee3964 --- /dev/null +++ b/kernel/groups.c @@ -0,0 +1,288 @@ +/* + * Supplementary group IDs + */ +#include +#include +#include +#include +#include +#include + +/* init to 2 - one for init_task, one to ensure it is never freed */ +struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; + +struct group_info *groups_alloc(int gidsetsize) +{ + struct group_info *group_info; + int nblocks; + int i; + + nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; + /* Make sure we always allocate at least one indirect block pointer */ + nblocks = nblocks ? : 1; + group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); + if (!group_info) + return NULL; + group_info->ngroups = gidsetsize; + group_info->nblocks = nblocks; + atomic_set(&group_info->usage, 1); + + if (gidsetsize <= NGROUPS_SMALL) + group_info->blocks[0] = group_info->small_block; + else { + for (i = 0; i < nblocks; i++) { + gid_t *b; + b = (void *)__get_free_page(GFP_USER); + if (!b) + goto out_undo_partial_alloc; + group_info->blocks[i] = b; + } + } + return group_info; + +out_undo_partial_alloc: + while (--i >= 0) { + free_page((unsigned long)group_info->blocks[i]); + } + kfree(group_info); + return NULL; +} + +EXPORT_SYMBOL(groups_alloc); + +void groups_free(struct group_info *group_info) +{ + if (group_info->blocks[0] != group_info->small_block) { + int i; + for (i = 0; i < group_info->nblocks; i++) + free_page((unsigned long)group_info->blocks[i]); + } + kfree(group_info); +} + +EXPORT_SYMBOL(groups_free); + +/* export the group_info to a user-space array */ +static int groups_to_user(gid_t __user *grouplist, + const struct group_info *group_info) +{ + int i; + unsigned int count = group_info->ngroups; + + for (i = 0; i < group_info->nblocks; i++) { + unsigned int cp_count = min(NGROUPS_PER_BLOCK, count); + unsigned int len = cp_count * sizeof(*grouplist); + + if (copy_to_user(grouplist, group_info->blocks[i], len)) + return -EFAULT; + + grouplist += NGROUPS_PER_BLOCK; + count -= cp_count; + } + return 0; +} + +/* fill a group_info from a user-space array - it must be allocated already */ +static int groups_from_user(struct group_info *group_info, + gid_t __user *grouplist) +{ + int i; + unsigned int count = group_info->ngroups; + + for (i = 0; i < group_info->nblocks; i++) { + unsigned int cp_count = min(NGROUPS_PER_BLOCK, count); + unsigned int len = cp_count * sizeof(*grouplist); + + if (copy_from_user(group_info->blocks[i], grouplist, len)) + return -EFAULT; + + grouplist += NGROUPS_PER_BLOCK; + count -= cp_count; + } + return 0; +} + +/* a simple Shell sort */ +static void groups_sort(struct group_info *group_info) +{ + int base, max, stride; + int gidsetsize = group_info->ngroups; + + for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1) + ; /* nothing */ + stride /= 3; + + while (stride) { + max = gidsetsize - stride; + for (base = 0; base < max; base++) { + int left = base; + int right = left + stride; + gid_t tmp = GROUP_AT(group_info, right); + + while (left >= 0 && GROUP_AT(group_info, left) > tmp) { + GROUP_AT(group_info, right) = + GROUP_AT(group_info, left); + right = left; + left -= stride; + } + GROUP_AT(group_info, right) = tmp; + } + stride /= 3; + } +} + +/* a simple bsearch */ +int groups_search(const struct group_info *group_info, gid_t grp) +{ + unsigned int left, right; + + if (!group_info) + return 0; + + left = 0; + right = group_info->ngroups; + while (left < right) { + unsigned int mid = (left+right)/2; + int cmp = grp - GROUP_AT(group_info, mid); + if (cmp > 0) + left = mid + 1; + else if (cmp < 0) + right = mid; + else + return 1; + } + return 0; +} + +/** + * set_groups - Change a group subscription in a set of credentials + * @new: The newly prepared set of credentials to alter + * @group_info: The group list to install + * + * Validate a group subscription and, if valid, insert it into a set + * of credentials. + */ +int set_groups(struct cred *new, struct group_info *group_info) +{ + int retval; + + retval = security_task_setgroups(group_info); + if (retval) + return retval; + + put_group_info(new->group_info); + groups_sort(group_info); + get_group_info(group_info); + new->group_info = group_info; + return 0; +} + +EXPORT_SYMBOL(set_groups); + +/** + * set_current_groups - Change current's group subscription + * @group_info: The group list to impose + * + * Validate a group subscription and, if valid, impose it upon current's task + * security record. + */ +int set_current_groups(struct group_info *group_info) +{ + struct cred *new; + int ret; + + new = prepare_creds(); + if (!new) + return -ENOMEM; + + ret = set_groups(new, group_info); + if (ret < 0) { + abort_creds(new); + return ret; + } + + return commit_creds(new); +} + +EXPORT_SYMBOL(set_current_groups); + +SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist) +{ + const struct cred *cred = current_cred(); + int i; + + if (gidsetsize < 0) + return -EINVAL; + + /* no need to grab task_lock here; it cannot change */ + i = cred->group_info->ngroups; + if (gidsetsize) { + if (i > gidsetsize) { + i = -EINVAL; + goto out; + } + if (groups_to_user(grouplist, cred->group_info)) { + i = -EFAULT; + goto out; + } + } +out: + return i; +} + +/* + * SMP: Our groups are copy-on-write. We can set them safely + * without another task interfering. + */ + +SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist) +{ + struct group_info *group_info; + int retval; + + if (!capable(CAP_SETGID)) + return -EPERM; + if ((unsigned)gidsetsize > NGROUPS_MAX) + return -EINVAL; + + group_info = groups_alloc(gidsetsize); + if (!group_info) + return -ENOMEM; + retval = groups_from_user(group_info, grouplist); + if (retval) { + put_group_info(group_info); + return retval; + } + + retval = set_current_groups(group_info); + put_group_info(group_info); + + return retval; +} + +/* + * Check whether we're fsgid/egid or in the supplemental group.. + */ +int in_group_p(gid_t grp) +{ + const struct cred *cred = current_cred(); + int retval = 1; + + if (grp != cred->fsgid) + retval = groups_search(cred->group_info, grp); + return retval; +} + +EXPORT_SYMBOL(in_group_p); + +int in_egroup_p(gid_t grp) +{ + const struct cred *cred = current_cred(); + int retval = 1; + + if (grp != cred->egid) + retval = groups_search(cred->group_info, grp); + return retval; +} + +EXPORT_SYMBOL(in_egroup_p); diff --git a/kernel/sys.c b/kernel/sys.c index 438d99a38c87..b3f1097c76fa 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1113,289 +1113,6 @@ out: return err; } -/* - * Supplementary group IDs - */ - -/* init to 2 - one for init_task, one to ensure it is never freed */ -struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; - -struct group_info *groups_alloc(int gidsetsize) -{ - struct group_info *group_info; - int nblocks; - int i; - - nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; - /* Make sure we always allocate at least one indirect block pointer */ - nblocks = nblocks ? : 1; - group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); - if (!group_info) - return NULL; - group_info->ngroups = gidsetsize; - group_info->nblocks = nblocks; - atomic_set(&group_info->usage, 1); - - if (gidsetsize <= NGROUPS_SMALL) - group_info->blocks[0] = group_info->small_block; - else { - for (i = 0; i < nblocks; i++) { - gid_t *b; - b = (void *)__get_free_page(GFP_USER); - if (!b) - goto out_undo_partial_alloc; - group_info->blocks[i] = b; - } - } - return group_info; - -out_undo_partial_alloc: - while (--i >= 0) { - free_page((unsigned long)group_info->blocks[i]); - } - kfree(group_info); - return NULL; -} - -EXPORT_SYMBOL(groups_alloc); - -void groups_free(struct group_info *group_info) -{ - if (group_info->blocks[0] != group_info->small_block) { - int i; - for (i = 0; i < group_info->nblocks; i++) - free_page((unsigned long)group_info->blocks[i]); - } - kfree(group_info); -} - -EXPORT_SYMBOL(groups_free); - -/* export the group_info to a user-space array */ -static int groups_to_user(gid_t __user *grouplist, - const struct group_info *group_info) -{ - int i; - unsigned int count = group_info->ngroups; - - for (i = 0; i < group_info->nblocks; i++) { - unsigned int cp_count = min(NGROUPS_PER_BLOCK, count); - unsigned int len = cp_count * sizeof(*grouplist); - - if (copy_to_user(grouplist, group_info->blocks[i], len)) - return -EFAULT; - - grouplist += NGROUPS_PER_BLOCK; - count -= cp_count; - } - return 0; -} - -/* fill a group_info from a user-space array - it must be allocated already */ -static int groups_from_user(struct group_info *group_info, - gid_t __user *grouplist) -{ - int i; - unsigned int count = group_info->ngroups; - - for (i = 0; i < group_info->nblocks; i++) { - unsigned int cp_count = min(NGROUPS_PER_BLOCK, count); - unsigned int len = cp_count * sizeof(*grouplist); - - if (copy_from_user(group_info->blocks[i], grouplist, len)) - return -EFAULT; - - grouplist += NGROUPS_PER_BLOCK; - count -= cp_count; - } - return 0; -} - -/* a simple Shell sort */ -static void groups_sort(struct group_info *group_info) -{ - int base, max, stride; - int gidsetsize = group_info->ngroups; - - for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1) - ; /* nothing */ - stride /= 3; - - while (stride) { - max = gidsetsize - stride; - for (base = 0; base < max; base++) { - int left = base; - int right = left + stride; - gid_t tmp = GROUP_AT(group_info, right); - - while (left >= 0 && GROUP_AT(group_info, left) > tmp) { - GROUP_AT(group_info, right) = - GROUP_AT(group_info, left); - right = left; - left -= stride; - } - GROUP_AT(group_info, right) = tmp; - } - stride /= 3; - } -} - -/* a simple bsearch */ -int groups_search(const struct group_info *group_info, gid_t grp) -{ - unsigned int left, right; - - if (!group_info) - return 0; - - left = 0; - right = group_info->ngroups; - while (left < right) { - unsigned int mid = (left+right)/2; - int cmp = grp - GROUP_AT(group_info, mid); - if (cmp > 0) - left = mid + 1; - else if (cmp < 0) - right = mid; - else - return 1; - } - return 0; -} - -/** - * set_groups - Change a group subscription in a set of credentials - * @new: The newly prepared set of credentials to alter - * @group_info: The group list to install - * - * Validate a group subscription and, if valid, insert it into a set - * of credentials. - */ -int set_groups(struct cred *new, struct group_info *group_info) -{ - int retval; - - retval = security_task_setgroups(group_info); - if (retval) - return retval; - - put_group_info(new->group_info); - groups_sort(group_info); - get_group_info(group_info); - new->group_info = group_info; - return 0; -} - -EXPORT_SYMBOL(set_groups); - -/** - * set_current_groups - Change current's group subscription - * @group_info: The group list to impose - * - * Validate a group subscription and, if valid, impose it upon current's task - * security record. - */ -int set_current_groups(struct group_info *group_info) -{ - struct cred *new; - int ret; - - new = prepare_creds(); - if (!new) - return -ENOMEM; - - ret = set_groups(new, group_info); - if (ret < 0) { - abort_creds(new); - return ret; - } - - return commit_creds(new); -} - -EXPORT_SYMBOL(set_current_groups); - -SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist) -{ - const struct cred *cred = current_cred(); - int i; - - if (gidsetsize < 0) - return -EINVAL; - - /* no need to grab task_lock here; it cannot change */ - i = cred->group_info->ngroups; - if (gidsetsize) { - if (i > gidsetsize) { - i = -EINVAL; - goto out; - } - if (groups_to_user(grouplist, cred->group_info)) { - i = -EFAULT; - goto out; - } - } -out: - return i; -} - -/* - * SMP: Our groups are copy-on-write. We can set them safely - * without another task interfering. - */ - -SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist) -{ - struct group_info *group_info; - int retval; - - if (!capable(CAP_SETGID)) - return -EPERM; - if ((unsigned)gidsetsize > NGROUPS_MAX) - return -EINVAL; - - group_info = groups_alloc(gidsetsize); - if (!group_info) - return -ENOMEM; - retval = groups_from_user(group_info, grouplist); - if (retval) { - put_group_info(group_info); - return retval; - } - - retval = set_current_groups(group_info); - put_group_info(group_info); - - return retval; -} - -/* - * Check whether we're fsgid/egid or in the supplemental group.. - */ -int in_group_p(gid_t grp) -{ - const struct cred *cred = current_cred(); - int retval = 1; - - if (grp != cred->fsgid) - retval = groups_search(cred->group_info, grp); - return retval; -} - -EXPORT_SYMBOL(in_group_p); - -int in_egroup_p(gid_t grp) -{ - const struct cred *cred = current_cred(); - int retval = 1; - - if (grp != cred->egid) - retval = groups_search(cred->group_info, grp); - return retval; -} - -EXPORT_SYMBOL(in_egroup_p); - DECLARE_RWSEM(uts_sem); SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) -- cgit v1.2.3 From 009789f040b71699278e70a6664701c10065e430 Mon Sep 17 00:00:00 2001 From: Chris Peterson Date: Tue, 16 Jun 2009 15:33:43 -0700 Subject: slow-work: use round_jiffies() for thread pool's cull and OOM timers Round the slow work queue's cull and OOM timeouts to whole second boundary with round_jiffies(). The slow work queue uses a pair of timers to cull idle threads and, after OOM, to delay new thread creation. This patch also extracts the mod_timer() logic for the cull timer into a separate helper function. By rounding non-time-critical timers such as these to whole seconds, they will be batched up to fire at the same time rather than being spread out. This allows the CPU wake up less, which saves power. Signed-off-by: Chris Peterson Signed-off-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/slow-work.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 521ed2004d63..09d7519557d3 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -318,6 +318,15 @@ cant_get_ref: } EXPORT_SYMBOL(slow_work_enqueue); +/* + * Schedule a cull of the thread pool at some time in the near future + */ +static void slow_work_schedule_cull(void) +{ + mod_timer(&slow_work_cull_timer, + round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT)); +} + /* * Worker thread culling algorithm */ @@ -335,8 +344,7 @@ static bool slow_work_cull_thread(void) list_empty(&vslow_work_queue) && atomic_read(&slow_work_thread_count) > slow_work_min_threads) { - mod_timer(&slow_work_cull_timer, - jiffies + SLOW_WORK_CULL_TIMEOUT); + slow_work_schedule_cull(); do_cull = true; } } @@ -393,8 +401,7 @@ static int slow_work_thread(void *_data) list_empty(&vslow_work_queue) && atomic_read(&slow_work_thread_count) > slow_work_min_threads) - mod_timer(&slow_work_cull_timer, - jiffies + SLOW_WORK_CULL_TIMEOUT); + slow_work_schedule_cull(); continue; } @@ -458,7 +465,7 @@ static void slow_work_new_thread_execute(struct slow_work *work) if (atomic_dec_and_test(&slow_work_thread_count)) BUG(); /* we're running on a slow work thread... */ mod_timer(&slow_work_oom_timer, - jiffies + SLOW_WORK_OOM_TIMEOUT); + round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT)); } else { /* ratelimit the starting of new threads */ mod_timer(&slow_work_oom_timer, jiffies + 1); @@ -502,8 +509,7 @@ static int slow_work_min_threads_sysctl(struct ctl_table *table, int write, if (n < 0 && !slow_work_may_not_start_new_thread) slow_work_enqueue(&slow_work_new_thread); else if (n > 0) - mod_timer(&slow_work_cull_timer, - jiffies + SLOW_WORK_CULL_TIMEOUT); + slow_work_schedule_cull(); } mutex_unlock(&slow_work_user_lock); } @@ -529,8 +535,7 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, atomic_read(&slow_work_thread_count); if (n < 0) - mod_timer(&slow_work_cull_timer, - jiffies + SLOW_WORK_CULL_TIMEOUT); + slow_work_schedule_cull(); } mutex_unlock(&slow_work_user_lock); } -- cgit v1.2.3