summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/cputime.c52
1 files changed, 22 insertions, 30 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index d28e9c53727c..f3a56bfa745f 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -75,14 +75,13 @@ void irqtime_account_irq(struct task_struct *curr)
}
EXPORT_SYMBOL_GPL(irqtime_account_irq);
-static cputime_t irqtime_tick_accounted(cputime_t maxtime)
+static u64 irqtime_tick_accounted(u64 maxtime)
{
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
- cputime_t delta;
+ u64 delta;
- delta = nsecs_to_cputime(irqtime->tick_delta);
- delta = min(delta, maxtime);
- irqtime->tick_delta -= cputime_to_nsecs(delta);
+ delta = min(irqtime->tick_delta, maxtime);
+ irqtime->tick_delta -= delta;
return delta;
}
@@ -91,7 +90,7 @@ static cputime_t irqtime_tick_accounted(cputime_t maxtime)
#define sched_clock_irqtime (0)
-static cputime_t irqtime_tick_accounted(cputime_t dummy)
+static u64 irqtime_tick_accounted(u64 dummy)
{
return 0;
}
@@ -234,22 +233,19 @@ void account_idle_time(u64 cputime)
* ticks are not redelivered later. Due to that, this function may on
* occasion account more time than the calling functions think elapsed.
*/
-static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
+static __always_inline u64 steal_account_process_time(u64 maxtime)
{
#ifdef CONFIG_PARAVIRT
if (static_key_false(&paravirt_steal_enabled)) {
- cputime_t steal_cputime;
- u64 steal, rounded;
+ u64 steal;
steal = paravirt_steal_clock(smp_processor_id());
steal -= this_rq()->prev_steal_time;
+ steal = min(steal, maxtime);
+ account_steal_time(steal);
+ this_rq()->prev_steal_time += steal;
- steal_cputime = min(nsecs_to_cputime(steal), maxtime);
- rounded = cputime_to_nsecs(steal_cputime);
- account_steal_time(rounded);
- this_rq()->prev_steal_time += rounded;
-
- return steal_cputime;
+ return steal;
}
#endif
return 0;
@@ -258,9 +254,9 @@ static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
/*
* Account how much elapsed time was spent in steal, irq, or softirq time.
*/
-static inline cputime_t account_other_time(cputime_t max)
+static inline u64 account_other_time(u64 max)
{
- cputime_t accounted;
+ u64 accounted;
/* Shall be converted to a lockdep-enabled lightweight check */
WARN_ON_ONCE(!irqs_disabled());
@@ -364,9 +360,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq, int ticks)
{
- u64 old_cputime = (__force u64) cputime_one_jiffy * ticks;
- cputime_t other;
- u64 cputime;
+ u64 other, cputime = TICK_NSEC * ticks;
/*
* When returning from idle, many ticks can get accounted at
@@ -376,11 +370,10 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
* other time can exceed ticks occasionally.
*/
other = account_other_time(ULONG_MAX);
- if (other >= old_cputime)
+ if (other >= cputime)
return;
- old_cputime -= other;
- cputime = cputime_to_nsecs(old_cputime);
+ cputime -= other;
if (this_cpu_ksoftirqd() == p) {
/*
@@ -477,8 +470,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
- cputime_t old_cputime, steal;
- u64 cputime;
+ u64 cputime, steal;
struct rq *rq = this_rq();
if (vtime_accounting_cpu_enabled())
@@ -489,14 +481,13 @@ void account_process_tick(struct task_struct *p, int user_tick)
return;
}
- old_cputime = cputime_one_jiffy;
+ cputime = TICK_NSEC;
steal = steal_account_process_time(ULONG_MAX);
- if (steal >= old_cputime)
+ if (steal >= cputime)
return;
- old_cputime -= steal;
- cputime = cputime_to_nsecs(old_cputime);
+ cputime -= steal;
if (user_tick)
account_user_time(p, cputime);
@@ -520,7 +511,7 @@ void account_idle_ticks(unsigned long ticks)
}
cputime = ticks * TICK_NSEC;
- steal = cputime_to_nsecs(steal_account_process_time(ULONG_MAX));
+ steal = steal_account_process_time(ULONG_MAX);
if (steal >= cputime)
return;
@@ -741,6 +732,7 @@ void vtime_account_user(struct task_struct *tsk)
write_seqcount_begin(&tsk->vtime_seqcount);
tsk->vtime_snap_whence = VTIME_SYS;
if (vtime_delta(tsk)) {
+ u64 nsecs;
delta_cpu = get_vtime_delta(tsk);
account_user_time(tsk, cputime_to_nsecs(delta_cpu));
}